2024-02-13 16:49:08

by Darrick J. Wong

[permalink] [raw]
Subject: Re: [RFC v2 07/14] readahead: allocate folios with mapping_min_order in ra_(unbounded|order)

On Tue, Feb 13, 2024 at 10:37:06AM +0100, Pankaj Raghav (Samsung) wrote:
> From: Pankaj Raghav <[email protected]>
>
> Allocate folios with at least mapping_min_order in
> page_cache_ra_unbounded() and page_cache_ra_order() as we need to
> guarantee a minimum order in the page cache.
>
> Signed-off-by: Pankaj Raghav <[email protected]>
> Signed-off-by: Luis Chamberlain <[email protected]>

Acked-by: Darrick J. Wong <[email protected]>

--D

> ---
> mm/readahead.c | 30 ++++++++++++++++++++++++++----
> 1 file changed, 26 insertions(+), 4 deletions(-)
>
> diff --git a/mm/readahead.c b/mm/readahead.c
> index 13b62cbd3b79..a361fba18674 100644
> --- a/mm/readahead.c
> +++ b/mm/readahead.c
> @@ -214,6 +214,7 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
> unsigned long index = readahead_index(ractl);
> gfp_t gfp_mask = readahead_gfp_mask(mapping);
> unsigned long i = 0;
> + unsigned int min_nrpages = mapping_min_folio_nrpages(mapping);
>
> /*
> * Partway through the readahead operation, we will have added
> @@ -235,6 +236,8 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
> struct folio *folio = xa_load(&mapping->i_pages, index + i);
>
> if (folio && !xa_is_value(folio)) {
> + long nr_pages = folio_nr_pages(folio);
> +
> /*
> * Page already present? Kick off the current batch
> * of contiguous pages before continuing with the
> @@ -244,19 +247,31 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
> * not worth getting one just for that.
> */
> read_pages(ractl);
> - ractl->_index += folio_nr_pages(folio);
> +
> + /*
> + * Move the ractl->_index by at least min_pages
> + * if the folio got truncated to respect the
> + * alignment constraint in the page cache.
> + *
> + */
> + if (mapping != folio->mapping)
> + nr_pages = min_nrpages;
> +
> + VM_BUG_ON_FOLIO(nr_pages < min_nrpages, folio);
> + ractl->_index += nr_pages;
> i = ractl->_index + ractl->_nr_pages - index;
> continue;
> }
>
> - folio = filemap_alloc_folio(gfp_mask, 0);
> + folio = filemap_alloc_folio(gfp_mask,
> + mapping_min_folio_order(mapping));
> if (!folio)
> break;
> if (filemap_add_folio(mapping, folio, index + i,
> gfp_mask) < 0) {
> folio_put(folio);
> read_pages(ractl);
> - ractl->_index++;
> + ractl->_index += min_nrpages;
> i = ractl->_index + ractl->_nr_pages - index;
> continue;
> }
> @@ -516,6 +531,7 @@ void page_cache_ra_order(struct readahead_control *ractl,
> {
> struct address_space *mapping = ractl->mapping;
> pgoff_t index = readahead_index(ractl);
> + unsigned int min_order = mapping_min_folio_order(mapping);
> pgoff_t limit = (i_size_read(mapping->host) - 1) >> PAGE_SHIFT;
> pgoff_t mark = index + ra->size - ra->async_size;
> int err = 0;
> @@ -542,11 +558,17 @@ void page_cache_ra_order(struct readahead_control *ractl,
> if (index & ((1UL << order) - 1))
> order = __ffs(index);
> /* Don't allocate pages past EOF */
> - while (index + (1UL << order) - 1 > limit)
> + while (order > min_order && index + (1UL << order) - 1 > limit)
> order--;
> /* THP machinery does not support order-1 */
> if (order == 1)
> order = 0;
> +
> + if (order < min_order)
> + order = min_order;
> +
> + VM_BUG_ON(index & ((1UL << order) - 1));
> +
> err = ra_alloc_folio(ractl, index, mark, order, gfp);
> if (err)
> break;
> --
> 2.43.0
>
>