2024-02-01 09:13:28

by Liu Shixin

[permalink] [raw]
Subject: [PATCH 0/2] Fix I/O high when memory almost met memcg limit

Recently, when install package in a docker environment where the memory
almost reached the memcg limit, the program have no respond severely for
more than 15 minutes. During this period, the I/O is high(~1G/s) which
cause other programs failed to work properly.

The problem can be constructed in the following way:

1. Download the image:
docker pull centos:7
2. Create a docker with 4G memory limit and 6G memsw limit(cgroupv1):
docker create --name dockerhub_centos7 --cpu-period=100000
--cpu-quota=400000 --memory 4G --memory-swap 6G --cap-add=SYS_PTRACE
--cap-add=SYS_ADMIN --cap-add=NET_ADMIN --cap-add=NET_RAW
--pids-limit=20000 --ulimit nofile=1048576:1048576
--ulimit memlock=-1:-1 dockerhub_centos7:latest /usr/sbin/init
3. Start the docker:
docker start dockerhub_centos7
4. Allocate 6094MB memory in docker.
5. run 'yum install expect'.

We found that this problem is caused by a lot ot meaningless readahead.
Since memory is almost met memcg limit, the readahead page will be
reclaimed immediately and will readahead and reclaim again and again.

These two patch will stop readahead early when memcg charge failed and
will skip readahead when there are too many active refault.

[1] https://lore.kernel.org/linux-mm/[email protected]/T/

Liu Shixin (2):
mm/readahead: stop readahead loop if memcg charge fails
mm/readahead: limit sync readahead while too many active refault

include/linux/fs.h | 2 ++
include/linux/pagemap.h | 1 +
mm/filemap.c | 16 ++++++++++++++++
mm/readahead.c | 12 ++++++++++--
4 files changed, 29 insertions(+), 2 deletions(-)

--
2.25.1



2024-02-01 09:14:05

by Liu Shixin

[permalink] [raw]
Subject: [PATCH 1/2] mm/readahead: stop readahead loop if memcg charge fails

When a task in memcg readaheads file pages, page_cache_ra_unbounded()
will try to readahead nr_to_read pages. Even if the new allocated page
fails to charge, page_cache_ra_unbounded() still tries to readahead
next page. This leads to too much memory reclaim.

Stop readahead if mem_cgroup_charge() fails, i.e. add_to_page_cache_lru()
returns -ENOMEM.

Signed-off-by: Liu Shixin <[email protected]>
Signed-off-by: Jinjiang Tu <[email protected]>
---
mm/readahead.c | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)

diff --git a/mm/readahead.c b/mm/readahead.c
index 23620c57c1225..cc4abb67eb223 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -228,6 +228,7 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
*/
for (i = 0; i < nr_to_read; i++) {
struct folio *folio = xa_load(&mapping->i_pages, index + i);
+ int ret;

if (folio && !xa_is_value(folio)) {
/*
@@ -247,9 +248,12 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
folio = filemap_alloc_folio(gfp_mask, 0);
if (!folio)
break;
- if (filemap_add_folio(mapping, folio, index + i,
- gfp_mask) < 0) {
+
+ ret = filemap_add_folio(mapping, folio, index + i, gfp_mask);
+ if (ret < 0) {
folio_put(folio);
+ if (ret == -ENOMEM)
+ break;
read_pages(ractl);
ractl->_index++;
i = ractl->_index + ractl->_nr_pages - index - 1;
--
2.25.1


2024-02-01 09:15:57

by Liu Shixin

[permalink] [raw]
Subject: [PATCH 2/2] mm/readahead: limit sync readahead while too many active refault

When the pagefault is not for write and the refault distance is close,
the page will be activated directly. If there are too many such pages in
a file, that means the pages may be reclaimed immediately.
In such situation, there is no positive effect to read-ahead since it will
only waste IO. So collect the number of such pages and when the number is
too large, stop bothering with read-ahead for a while until it decreased
automatically.

Define 'too large' as 10000 experientially, which can solves the problem
and does not affect by the occasional active refault.

Signed-off-by: Liu Shixin <[email protected]>
---
include/linux/fs.h | 2 ++
include/linux/pagemap.h | 1 +
mm/filemap.c | 16 ++++++++++++++++
mm/readahead.c | 4 ++++
4 files changed, 23 insertions(+)

diff --git a/include/linux/fs.h b/include/linux/fs.h
index ed5966a704951..f2a1825442f5a 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -960,6 +960,7 @@ struct fown_struct {
* the first of these pages is accessed.
* @ra_pages: Maximum size of a readahead request, copied from the bdi.
* @mmap_miss: How many mmap accesses missed in the page cache.
+ * @active_refault: Number of active page refault.
* @prev_pos: The last byte in the most recent read request.
*
* When this structure is passed to ->readahead(), the "most recent"
@@ -971,6 +972,7 @@ struct file_ra_state {
unsigned int async_size;
unsigned int ra_pages;
unsigned int mmap_miss;
+ unsigned int active_refault;
loff_t prev_pos;
};

diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 2df35e65557d2..da9eaf985dec4 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -1256,6 +1256,7 @@ struct readahead_control {
pgoff_t _index;
unsigned int _nr_pages;
unsigned int _batch_count;
+ unsigned int _active_refault;
bool _workingset;
unsigned long _pflags;
};
diff --git a/mm/filemap.c b/mm/filemap.c
index 750e779c23db7..4de80592ab270 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -3037,6 +3037,7 @@ loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,

#ifdef CONFIG_MMU
#define MMAP_LOTSAMISS (100)
+#define ACTIVE_REFAULT_LIMIT (10000)
/*
* lock_folio_maybe_drop_mmap - lock the page, possibly dropping the mmap_lock
* @vmf - the vm_fault for this fault.
@@ -3142,6 +3143,18 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
if (mmap_miss > MMAP_LOTSAMISS)
return fpin;

+ ractl._active_refault = READ_ONCE(ra->active_refault);
+ if (ractl._active_refault)
+ WRITE_ONCE(ra->active_refault, --ractl._active_refault);
+
+ /*
+ * If there are a lot of refault of active pages in this file,
+ * that means the memory reclaim is ongoing. Stop bothering with
+ * read-ahead since it will only waste IO.
+ */
+ if (ractl._active_refault >= ACTIVE_REFAULT_LIMIT)
+ return fpin;
+
/*
* mmap read-around
*/
@@ -3151,6 +3164,9 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
ra->async_size = ra->ra_pages / 4;
ractl._index = ra->start;
page_cache_ra_order(&ractl, ra, 0);
+
+ WRITE_ONCE(ra->active_refault, ractl._active_refault);
+
return fpin;
}

diff --git a/mm/readahead.c b/mm/readahead.c
index cc4abb67eb223..d79bb70a232c4 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -263,6 +263,10 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
folio_set_readahead(folio);
ractl->_workingset |= folio_test_workingset(folio);
ractl->_nr_pages++;
+ if (unlikely(folio_test_workingset(folio)))
+ ractl->_active_refault++;
+ else if (unlikely(ractl->_active_refault))
+ ractl->_active_refault--;
}

/*
--
2.25.1


2024-02-01 09:47:20

by Jan Kara

[permalink] [raw]
Subject: Re: [PATCH 1/2] mm/readahead: stop readahead loop if memcg charge fails

On Thu 01-02-24 18:08:34, Liu Shixin wrote:
> When a task in memcg readaheads file pages, page_cache_ra_unbounded()
> will try to readahead nr_to_read pages. Even if the new allocated page
> fails to charge, page_cache_ra_unbounded() still tries to readahead
> next page. This leads to too much memory reclaim.
>
> Stop readahead if mem_cgroup_charge() fails, i.e. add_to_page_cache_lru()
> returns -ENOMEM.
>
> Signed-off-by: Liu Shixin <[email protected]>
> Signed-off-by: Jinjiang Tu <[email protected]>

Makes sense. Feel free to add:

Reviewed-by: Jan Kara <[email protected]>

Honza

> ---
> mm/readahead.c | 8 ++++++--
> 1 file changed, 6 insertions(+), 2 deletions(-)
>
> diff --git a/mm/readahead.c b/mm/readahead.c
> index 23620c57c1225..cc4abb67eb223 100644
> --- a/mm/readahead.c
> +++ b/mm/readahead.c
> @@ -228,6 +228,7 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
> */
> for (i = 0; i < nr_to_read; i++) {
> struct folio *folio = xa_load(&mapping->i_pages, index + i);
> + int ret;
>
> if (folio && !xa_is_value(folio)) {
> /*
> @@ -247,9 +248,12 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
> folio = filemap_alloc_folio(gfp_mask, 0);
> if (!folio)
> break;
> - if (filemap_add_folio(mapping, folio, index + i,
> - gfp_mask) < 0) {
> +
> + ret = filemap_add_folio(mapping, folio, index + i, gfp_mask);
> + if (ret < 0) {
> folio_put(folio);
> + if (ret == -ENOMEM)
> + break;
> read_pages(ractl);
> ractl->_index++;
> i = ractl->_index + ractl->_nr_pages - index - 1;
> --
> 2.25.1
>
--
Jan Kara <[email protected]>
SUSE Labs, CR

2024-02-01 10:06:55

by Jan Kara

[permalink] [raw]
Subject: Re: [PATCH 2/2] mm/readahead: limit sync readahead while too many active refault

On Thu 01-02-24 18:08:35, Liu Shixin wrote:
> When the pagefault is not for write and the refault distance is close,
> the page will be activated directly. If there are too many such pages in
> a file, that means the pages may be reclaimed immediately.
> In such situation, there is no positive effect to read-ahead since it will
> only waste IO. So collect the number of such pages and when the number is
> too large, stop bothering with read-ahead for a while until it decreased
> automatically.
>
> Define 'too large' as 10000 experientially, which can solves the problem
> and does not affect by the occasional active refault.
>
> Signed-off-by: Liu Shixin <[email protected]>

So I'm not convinced this new logic is needed. We already have
ra->mmap_miss which gets incremented when a page fault has to read the page
(and decremented when a page fault found the page already in cache). This
should already work to detect trashing as well, shouldn't it? If it does
not, why?

Honza

> ---
> include/linux/fs.h | 2 ++
> include/linux/pagemap.h | 1 +
> mm/filemap.c | 16 ++++++++++++++++
> mm/readahead.c | 4 ++++
> 4 files changed, 23 insertions(+)
>
> diff --git a/include/linux/fs.h b/include/linux/fs.h
> index ed5966a704951..f2a1825442f5a 100644
> --- a/include/linux/fs.h
> +++ b/include/linux/fs.h
> @@ -960,6 +960,7 @@ struct fown_struct {
> * the first of these pages is accessed.
> * @ra_pages: Maximum size of a readahead request, copied from the bdi.
> * @mmap_miss: How many mmap accesses missed in the page cache.
> + * @active_refault: Number of active page refault.
> * @prev_pos: The last byte in the most recent read request.
> *
> * When this structure is passed to ->readahead(), the "most recent"
> @@ -971,6 +972,7 @@ struct file_ra_state {
> unsigned int async_size;
> unsigned int ra_pages;
> unsigned int mmap_miss;
> + unsigned int active_refault;
> loff_t prev_pos;
> };
>
> diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
> index 2df35e65557d2..da9eaf985dec4 100644
> --- a/include/linux/pagemap.h
> +++ b/include/linux/pagemap.h
> @@ -1256,6 +1256,7 @@ struct readahead_control {
> pgoff_t _index;
> unsigned int _nr_pages;
> unsigned int _batch_count;
> + unsigned int _active_refault;
> bool _workingset;
> unsigned long _pflags;
> };
> diff --git a/mm/filemap.c b/mm/filemap.c
> index 750e779c23db7..4de80592ab270 100644
> --- a/mm/filemap.c
> +++ b/mm/filemap.c
> @@ -3037,6 +3037,7 @@ loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,
>
> #ifdef CONFIG_MMU
> #define MMAP_LOTSAMISS (100)
> +#define ACTIVE_REFAULT_LIMIT (10000)
> /*
> * lock_folio_maybe_drop_mmap - lock the page, possibly dropping the mmap_lock
> * @vmf - the vm_fault for this fault.
> @@ -3142,6 +3143,18 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
> if (mmap_miss > MMAP_LOTSAMISS)
> return fpin;
>
> + ractl._active_refault = READ_ONCE(ra->active_refault);
> + if (ractl._active_refault)
> + WRITE_ONCE(ra->active_refault, --ractl._active_refault);
> +
> + /*
> + * If there are a lot of refault of active pages in this file,
> + * that means the memory reclaim is ongoing. Stop bothering with
> + * read-ahead since it will only waste IO.
> + */
> + if (ractl._active_refault >= ACTIVE_REFAULT_LIMIT)
> + return fpin;
> +
> /*
> * mmap read-around
> */
> @@ -3151,6 +3164,9 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
> ra->async_size = ra->ra_pages / 4;
> ractl._index = ra->start;
> page_cache_ra_order(&ractl, ra, 0);
> +
> + WRITE_ONCE(ra->active_refault, ractl._active_refault);
> +
> return fpin;
> }
>
> diff --git a/mm/readahead.c b/mm/readahead.c
> index cc4abb67eb223..d79bb70a232c4 100644
> --- a/mm/readahead.c
> +++ b/mm/readahead.c
> @@ -263,6 +263,10 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
> folio_set_readahead(folio);
> ractl->_workingset |= folio_test_workingset(folio);
> ractl->_nr_pages++;
> + if (unlikely(folio_test_workingset(folio)))
> + ractl->_active_refault++;
> + else if (unlikely(ractl->_active_refault))
> + ractl->_active_refault--;
> }
>
> /*
> --
> 2.25.1
>
--
Jan Kara <[email protected]>
SUSE Labs, CR

2024-02-01 10:41:49

by Liu Shixin

[permalink] [raw]
Subject: Re: [PATCH 2/2] mm/readahead: limit sync readahead while too many active refault



On 2024/2/1 17:37, Jan Kara wrote:
> On Thu 01-02-24 18:08:35, Liu Shixin wrote:
>> When the pagefault is not for write and the refault distance is close,
>> the page will be activated directly. If there are too many such pages in
>> a file, that means the pages may be reclaimed immediately.
>> In such situation, there is no positive effect to read-ahead since it will
>> only waste IO. So collect the number of such pages and when the number is
>> too large, stop bothering with read-ahead for a while until it decreased
>> automatically.
>>
>> Define 'too large' as 10000 experientially, which can solves the problem
>> and does not affect by the occasional active refault.
>>
>> Signed-off-by: Liu Shixin <[email protected]>
> So I'm not convinced this new logic is needed. We already have
> ra->mmap_miss which gets incremented when a page fault has to read the page
> (and decremented when a page fault found the page already in cache). This
> should already work to detect trashing as well, shouldn't it? If it does
> not, why?
>
> Honza
ra->mmap_miss doesn't help, it increased only one in do_sync_mmap_readahead()
and then decreased one for every page in filemap_map_pages(). So in this scenario,
it can't exceed MMAP_LOTSAMISS.

Thanks,
>
>> ---
>> include/linux/fs.h | 2 ++
>> include/linux/pagemap.h | 1 +
>> mm/filemap.c | 16 ++++++++++++++++
>> mm/readahead.c | 4 ++++
>> 4 files changed, 23 insertions(+)
>>
>> diff --git a/include/linux/fs.h b/include/linux/fs.h
>> index ed5966a704951..f2a1825442f5a 100644
>> --- a/include/linux/fs.h
>> +++ b/include/linux/fs.h
>> @@ -960,6 +960,7 @@ struct fown_struct {
>> * the first of these pages is accessed.
>> * @ra_pages: Maximum size of a readahead request, copied from the bdi.
>> * @mmap_miss: How many mmap accesses missed in the page cache.
>> + * @active_refault: Number of active page refault.
>> * @prev_pos: The last byte in the most recent read request.
>> *
>> * When this structure is passed to ->readahead(), the "most recent"
>> @@ -971,6 +972,7 @@ struct file_ra_state {
>> unsigned int async_size;
>> unsigned int ra_pages;
>> unsigned int mmap_miss;
>> + unsigned int active_refault;
>> loff_t prev_pos;
>> };
>>
>> diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
>> index 2df35e65557d2..da9eaf985dec4 100644
>> --- a/include/linux/pagemap.h
>> +++ b/include/linux/pagemap.h
>> @@ -1256,6 +1256,7 @@ struct readahead_control {
>> pgoff_t _index;
>> unsigned int _nr_pages;
>> unsigned int _batch_count;
>> + unsigned int _active_refault;
>> bool _workingset;
>> unsigned long _pflags;
>> };
>> diff --git a/mm/filemap.c b/mm/filemap.c
>> index 750e779c23db7..4de80592ab270 100644
>> --- a/mm/filemap.c
>> +++ b/mm/filemap.c
>> @@ -3037,6 +3037,7 @@ loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,
>>
>> #ifdef CONFIG_MMU
>> #define MMAP_LOTSAMISS (100)
>> +#define ACTIVE_REFAULT_LIMIT (10000)
>> /*
>> * lock_folio_maybe_drop_mmap - lock the page, possibly dropping the mmap_lock
>> * @vmf - the vm_fault for this fault.
>> @@ -3142,6 +3143,18 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
>> if (mmap_miss > MMAP_LOTSAMISS)
>> return fpin;
>>
>> + ractl._active_refault = READ_ONCE(ra->active_refault);
>> + if (ractl._active_refault)
>> + WRITE_ONCE(ra->active_refault, --ractl._active_refault);
>> +
>> + /*
>> + * If there are a lot of refault of active pages in this file,
>> + * that means the memory reclaim is ongoing. Stop bothering with
>> + * read-ahead since it will only waste IO.
>> + */
>> + if (ractl._active_refault >= ACTIVE_REFAULT_LIMIT)
>> + return fpin;
>> +
>> /*
>> * mmap read-around
>> */
>> @@ -3151,6 +3164,9 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
>> ra->async_size = ra->ra_pages / 4;
>> ractl._index = ra->start;
>> page_cache_ra_order(&ractl, ra, 0);
>> +
>> + WRITE_ONCE(ra->active_refault, ractl._active_refault);
>> +
>> return fpin;
>> }
>>
>> diff --git a/mm/readahead.c b/mm/readahead.c
>> index cc4abb67eb223..d79bb70a232c4 100644
>> --- a/mm/readahead.c
>> +++ b/mm/readahead.c
>> @@ -263,6 +263,10 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
>> folio_set_readahead(folio);
>> ractl->_workingset |= folio_test_workingset(folio);
>> ractl->_nr_pages++;
>> + if (unlikely(folio_test_workingset(folio)))
>> + ractl->_active_refault++;
>> + else if (unlikely(ractl->_active_refault))
>> + ractl->_active_refault--;
>> }
>>
>> /*
>> --
>> 2.25.1
>>


2024-02-01 13:53:07

by Jan Kara

[permalink] [raw]
Subject: Re: [PATCH 1/2] mm/readahead: stop readahead loop if memcg charge fails

On Thu 01-02-24 13:47:03, Matthew Wilcox wrote:
> On Thu, Feb 01, 2024 at 06:08:34PM +0800, Liu Shixin wrote:
> > @@ -247,9 +248,12 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
> > folio = filemap_alloc_folio(gfp_mask, 0);
> > if (!folio)
> > break;
> > - if (filemap_add_folio(mapping, folio, index + i,
> > - gfp_mask) < 0) {
> > +
> > + ret = filemap_add_folio(mapping, folio, index + i, gfp_mask);
> > + if (ret < 0) {
> > folio_put(folio);
> > + if (ret == -ENOMEM)
> > + break;
>
> No, that's too early. You've still got a batch of pages which were
> successfully added; you have to read them. You were only off by one
> line though ;-)

There's a read_pages() call just outside of the loop so this break is
actually fine AFAICT.

Honza
--
Jan Kara <[email protected]>
SUSE Labs, CR

2024-02-01 17:26:18

by Matthew Wilcox

[permalink] [raw]
Subject: Re: [PATCH 1/2] mm/readahead: stop readahead loop if memcg charge fails

On Thu, Feb 01, 2024 at 06:08:34PM +0800, Liu Shixin wrote:
> @@ -247,9 +248,12 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
> folio = filemap_alloc_folio(gfp_mask, 0);
> if (!folio)
> break;
> - if (filemap_add_folio(mapping, folio, index + i,
> - gfp_mask) < 0) {
> +
> + ret = filemap_add_folio(mapping, folio, index + i, gfp_mask);
> + if (ret < 0) {
> folio_put(folio);
> + if (ret == -ENOMEM)
> + break;

No, that's too early. You've still got a batch of pages which were
successfully added; you have to read them. You were only off by one
line though ;-)

> read_pages(ractl);
> ractl->_index++;
> i = ractl->_index + ractl->_nr_pages - index - 1;
> --
> 2.25.1
>
>

2024-02-01 17:32:07

by Jan Kara

[permalink] [raw]
Subject: Re: [PATCH 2/2] mm/readahead: limit sync readahead while too many active refault

On Thu 01-02-24 18:41:30, Liu Shixin wrote:
> On 2024/2/1 17:37, Jan Kara wrote:
> > On Thu 01-02-24 18:08:35, Liu Shixin wrote:
> >> When the pagefault is not for write and the refault distance is close,
> >> the page will be activated directly. If there are too many such pages in
> >> a file, that means the pages may be reclaimed immediately.
> >> In such situation, there is no positive effect to read-ahead since it will
> >> only waste IO. So collect the number of such pages and when the number is
> >> too large, stop bothering with read-ahead for a while until it decreased
> >> automatically.
> >>
> >> Define 'too large' as 10000 experientially, which can solves the problem
> >> and does not affect by the occasional active refault.
> >>
> >> Signed-off-by: Liu Shixin <[email protected]>
> > So I'm not convinced this new logic is needed. We already have
> > ra->mmap_miss which gets incremented when a page fault has to read the page
> > (and decremented when a page fault found the page already in cache). This
> > should already work to detect trashing as well, shouldn't it? If it does
> > not, why?
> >
> > Honza
> ra->mmap_miss doesn't help, it increased only one in do_sync_mmap_readahead()
> and then decreased one for every page in filemap_map_pages(). So in this scenario,
> it can't exceed MMAP_LOTSAMISS.

I see, OK. But that's a (longstanding) bug in how mmap_miss is handled. Can
you please test whether attached patches fix the trashing for you? At least
now I can see mmap_miss properly increments when we are hitting uncached
pages... Thanks!

Honza

--
Jan Kara <[email protected]>
SUSE Labs, CR


Attachments:
(No filename) (1.64 kB)
0001-mm-readahead-Improve-page-readaround-miss-detection.patch (4.50 kB)
0002-mm-readahead-Fix-readahead-miss-detection-with-FAULT.patch (1.82 kB)
Download all attachments

2024-02-01 17:57:20

by Matthew Wilcox

[permalink] [raw]
Subject: Re: [PATCH 1/2] mm/readahead: stop readahead loop if memcg charge fails

On Thu, Feb 01, 2024 at 02:52:31PM +0100, Jan Kara wrote:
> On Thu 01-02-24 13:47:03, Matthew Wilcox wrote:
> > On Thu, Feb 01, 2024 at 06:08:34PM +0800, Liu Shixin wrote:
> > > @@ -247,9 +248,12 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
> > > folio = filemap_alloc_folio(gfp_mask, 0);
> > > if (!folio)
> > > break;
> > > - if (filemap_add_folio(mapping, folio, index + i,
> > > - gfp_mask) < 0) {
> > > +
> > > + ret = filemap_add_folio(mapping, folio, index + i, gfp_mask);
> > > + if (ret < 0) {
> > > folio_put(folio);
> > > + if (ret == -ENOMEM)
> > > + break;
> >
> > No, that's too early. You've still got a batch of pages which were
> > successfully added; you have to read them. You were only off by one
> > line though ;-)
>
> There's a read_pages() call just outside of the loop so this break is
> actually fine AFAICT.

Oh, good point! I withdraw my criticism.

2024-02-02 01:25:38

by Liu Shixin

[permalink] [raw]
Subject: Re: [PATCH 2/2] mm/readahead: limit sync readahead while too many active refault


On 2024/2/2 1:31, Jan Kara wrote:
> On Thu 01-02-24 18:41:30, Liu Shixin wrote:
>> On 2024/2/1 17:37, Jan Kara wrote:
>>> On Thu 01-02-24 18:08:35, Liu Shixin wrote:
>>>> When the pagefault is not for write and the refault distance is close,
>>>> the page will be activated directly. If there are too many such pages in
>>>> a file, that means the pages may be reclaimed immediately.
>>>> In such situation, there is no positive effect to read-ahead since it will
>>>> only waste IO. So collect the number of such pages and when the number is
>>>> too large, stop bothering with read-ahead for a while until it decreased
>>>> automatically.
>>>>
>>>> Define 'too large' as 10000 experientially, which can solves the problem
>>>> and does not affect by the occasional active refault.
>>>>
>>>> Signed-off-by: Liu Shixin <[email protected]>
>>> So I'm not convinced this new logic is needed. We already have
>>> ra->mmap_miss which gets incremented when a page fault has to read the page
>>> (and decremented when a page fault found the page already in cache). This
>>> should already work to detect trashing as well, shouldn't it? If it does
>>> not, why?
>>>
>>> Honza
>> ra->mmap_miss doesn't help, it increased only one in do_sync_mmap_readahead()
>> and then decreased one for every page in filemap_map_pages(). So in this scenario,
>> it can't exceed MMAP_LOTSAMISS.
> I see, OK. But that's a (longstanding) bug in how mmap_miss is handled. Can
> you please test whether attached patches fix the trashing for you? At least
> now I can see mmap_miss properly increments when we are hitting uncached
> pages... Thanks!
>
> Honza
Thanks for the patch, I will test it.
>


2024-02-02 09:10:05

by Liu Shixin

[permalink] [raw]
Subject: Re: [PATCH 2/2] mm/readahead: limit sync readahead while too many active refault



On 2024/2/2 1:31, Jan Kara wrote:
> On Thu 01-02-24 18:41:30, Liu Shixin wrote:
>> On 2024/2/1 17:37, Jan Kara wrote:
>>> On Thu 01-02-24 18:08:35, Liu Shixin wrote:
>>>> When the pagefault is not for write and the refault distance is close,
>>>> the page will be activated directly. If there are too many such pages in
>>>> a file, that means the pages may be reclaimed immediately.
>>>> In such situation, there is no positive effect to read-ahead since it will
>>>> only waste IO. So collect the number of such pages and when the number is
>>>> too large, stop bothering with read-ahead for a while until it decreased
>>>> automatically.
>>>>
>>>> Define 'too large' as 10000 experientially, which can solves the problem
>>>> and does not affect by the occasional active refault.
>>>>
>>>> Signed-off-by: Liu Shixin <[email protected]>
>>> So I'm not convinced this new logic is needed. We already have
>>> ra->mmap_miss which gets incremented when a page fault has to read the page
>>> (and decremented when a page fault found the page already in cache). This
>>> should already work to detect trashing as well, shouldn't it? If it does
>>> not, why?
>>>
>>> Honza
>> ra->mmap_miss doesn't help, it increased only one in do_sync_mmap_readahead()
>> and then decreased one for every page in filemap_map_pages(). So in this scenario,
>> it can't exceed MMAP_LOTSAMISS.
> I see, OK. But that's a (longstanding) bug in how mmap_miss is handled. Can
> you please test whether attached patches fix the trashing for you? At least
> now I can see mmap_miss properly increments when we are hitting uncached
> pages... Thanks!
>
> Honza
The patch doesn't seem to have much effect. I will try to analyze why it doesn't work.
The attached file is my testcase.

Thanks,
>


Attachments:
test.sh (404.00 B)
alloc_page.c (444.00 B)
Download all attachments

2024-02-29 09:02:11

by Liu Shixin

[permalink] [raw]
Subject: Re: [PATCH 2/2] mm/readahead: limit sync readahead while too many active refault



On 2024/2/2 17:02, Liu Shixin wrote:
>
> On 2024/2/2 1:31, Jan Kara wrote:
>> On Thu 01-02-24 18:41:30, Liu Shixin wrote:
>>> On 2024/2/1 17:37, Jan Kara wrote:
>>>> On Thu 01-02-24 18:08:35, Liu Shixin wrote:
>>>>> When the pagefault is not for write and the refault distance is close,
>>>>> the page will be activated directly. If there are too many such pages in
>>>>> a file, that means the pages may be reclaimed immediately.
>>>>> In such situation, there is no positive effect to read-ahead since it will
>>>>> only waste IO. So collect the number of such pages and when the number is
>>>>> too large, stop bothering with read-ahead for a while until it decreased
>>>>> automatically.
>>>>>
>>>>> Define 'too large' as 10000 experientially, which can solves the problem
>>>>> and does not affect by the occasional active refault.
>>>>>
>>>>> Signed-off-by: Liu Shixin <[email protected]>
>>>> So I'm not convinced this new logic is needed. We already have
>>>> ra->mmap_miss which gets incremented when a page fault has to read the page
>>>> (and decremented when a page fault found the page already in cache). This
>>>> should already work to detect trashing as well, shouldn't it? If it does
>>>> not, why?
>>>>
>>>> Honza
>>> ra->mmap_miss doesn't help, it increased only one in do_sync_mmap_readahead()
>>> and then decreased one for every page in filemap_map_pages(). So in this scenario,
>>> it can't exceed MMAP_LOTSAMISS.
>> I see, OK. But that's a (longstanding) bug in how mmap_miss is handled. Can
>> you please test whether attached patches fix the trashing for you? At least
>> now I can see mmap_miss properly increments when we are hitting uncached
>> pages... Thanks!
>>
>> Honza
> The patch doesn't seem to have much effect. I will try to analyze why it doesn't work.
> The attached file is my testcase.
>
> Thanks,

I think I figured out why mmap_miss doesn't work. After do_sync_mmap_readahead(), there is a
__filemap_get_folio() to make sure the page is ready. Then, it is ready too in filemap_map_pages(),
so the mmap_miss will decreased once. mmap_miss goes back to 0, and can't stop read-ahead.
Overall, I don't think mmap_miss can solve this problem.

.


2024-03-05 07:07:58

by Liu Shixin

[permalink] [raw]
Subject: Re: [PATCH 2/2] mm/readahead: limit sync readahead while too many active refault

Hi, Jan, All,

Please take a look at this patch again. Although this may not be a graceful way.

I can't think any other way to fix the problem except using workingset.


Thanks,

On 2024/2/1 18:08, Liu Shixin wrote:
> When the pagefault is not for write and the refault distance is close,
> the page will be activated directly. If there are too many such pages in
> a file, that means the pages may be reclaimed immediately.
> In such situation, there is no positive effect to read-ahead since it will
> only waste IO. So collect the number of such pages and when the number is
> too large, stop bothering with read-ahead for a while until it decreased
> automatically.
>
> Define 'too large' as 10000 experientially, which can solves the problem
> and does not affect by the occasional active refault.
>
> Signed-off-by: Liu Shixin <[email protected]>
> ---
> include/linux/fs.h | 2 ++
> include/linux/pagemap.h | 1 +
> mm/filemap.c | 16 ++++++++++++++++
> mm/readahead.c | 4 ++++
> 4 files changed, 23 insertions(+)
>
> diff --git a/include/linux/fs.h b/include/linux/fs.h
> index ed5966a704951..f2a1825442f5a 100644
> --- a/include/linux/fs.h
> +++ b/include/linux/fs.h
> @@ -960,6 +960,7 @@ struct fown_struct {
> * the first of these pages is accessed.
> * @ra_pages: Maximum size of a readahead request, copied from the bdi.
> * @mmap_miss: How many mmap accesses missed in the page cache.
> + * @active_refault: Number of active page refault.
> * @prev_pos: The last byte in the most recent read request.
> *
> * When this structure is passed to ->readahead(), the "most recent"
> @@ -971,6 +972,7 @@ struct file_ra_state {
> unsigned int async_size;
> unsigned int ra_pages;
> unsigned int mmap_miss;
> + unsigned int active_refault;
> loff_t prev_pos;
> };
>
> diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
> index 2df35e65557d2..da9eaf985dec4 100644
> --- a/include/linux/pagemap.h
> +++ b/include/linux/pagemap.h
> @@ -1256,6 +1256,7 @@ struct readahead_control {
> pgoff_t _index;
> unsigned int _nr_pages;
> unsigned int _batch_count;
> + unsigned int _active_refault;
> bool _workingset;
> unsigned long _pflags;
> };
> diff --git a/mm/filemap.c b/mm/filemap.c
> index 750e779c23db7..4de80592ab270 100644
> --- a/mm/filemap.c
> +++ b/mm/filemap.c
> @@ -3037,6 +3037,7 @@ loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,
>
> #ifdef CONFIG_MMU
> #define MMAP_LOTSAMISS (100)
> +#define ACTIVE_REFAULT_LIMIT (10000)
> /*
> * lock_folio_maybe_drop_mmap - lock the page, possibly dropping the mmap_lock
> * @vmf - the vm_fault for this fault.
> @@ -3142,6 +3143,18 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
> if (mmap_miss > MMAP_LOTSAMISS)
> return fpin;
>
> + ractl._active_refault = READ_ONCE(ra->active_refault);
> + if (ractl._active_refault)
> + WRITE_ONCE(ra->active_refault, --ractl._active_refault);
> +
> + /*
> + * If there are a lot of refault of active pages in this file,
> + * that means the memory reclaim is ongoing. Stop bothering with
> + * read-ahead since it will only waste IO.
> + */
> + if (ractl._active_refault >= ACTIVE_REFAULT_LIMIT)
> + return fpin;
> +
> /*
> * mmap read-around
> */
> @@ -3151,6 +3164,9 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
> ra->async_size = ra->ra_pages / 4;
> ractl._index = ra->start;
> page_cache_ra_order(&ractl, ra, 0);
> +
> + WRITE_ONCE(ra->active_refault, ractl._active_refault);
> +
> return fpin;
> }
>
> diff --git a/mm/readahead.c b/mm/readahead.c
> index cc4abb67eb223..d79bb70a232c4 100644
> --- a/mm/readahead.c
> +++ b/mm/readahead.c
> @@ -263,6 +263,10 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
> folio_set_readahead(folio);
> ractl->_workingset |= folio_test_workingset(folio);
> ractl->_nr_pages++;
> + if (unlikely(folio_test_workingset(folio)))
> + ractl->_active_refault++;
> + else if (unlikely(ractl->_active_refault))
> + ractl->_active_refault--;
> }
>
> /*