Converted the function to use folios throughout. This is in preparation
for the removal of find_get_pages_range_tag(). Now supports large
folios.
Signed-off-by: Vishal Moola (Oracle) <[email protected]>
---
fs/ext4/inode.c | 55 ++++++++++++++++++++++++-------------------------
1 file changed, 27 insertions(+), 28 deletions(-)
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 2b5ef1b64249..69a0708c8e87 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2572,8 +2572,8 @@ static int ext4_da_writepages_trans_blocks(struct inode *inode)
static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
{
struct address_space *mapping = mpd->inode->i_mapping;
- struct pagevec pvec;
- unsigned int nr_pages;
+ struct folio_batch fbatch;
+ unsigned int nr_folios;
long left = mpd->wbc->nr_to_write;
pgoff_t index = mpd->first_page;
pgoff_t end = mpd->last_page;
@@ -2587,18 +2587,17 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
tag = PAGECACHE_TAG_TOWRITE;
else
tag = PAGECACHE_TAG_DIRTY;
-
- pagevec_init(&pvec);
+ folio_batch_init(&fbatch);
mpd->map.m_len = 0;
mpd->next_page = index;
while (index <= end) {
- nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
- tag);
- if (nr_pages == 0)
+ nr_folios = filemap_get_folios_tag(mapping, &index, end,
+ tag, &fbatch);
+ if (nr_folios == 0)
break;
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pvec.pages[i];
+ for (i = 0; i < nr_folios; i++) {
+ struct folio *folio = fbatch.folios[i];
/*
* Accumulated enough dirty pages? This doesn't apply
@@ -2612,10 +2611,10 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
goto out;
/* If we can't merge this page, we are done. */
- if (mpd->map.m_len > 0 && mpd->next_page != page->index)
+ if (mpd->map.m_len > 0 && mpd->next_page != folio->index)
goto out;
- lock_page(page);
+ folio_lock(folio);
/*
* If the page is no longer dirty, or its mapping no
* longer corresponds to inode we are writing (which
@@ -2623,16 +2622,16 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
* page is already under writeback and we are not doing
* a data integrity writeback, skip the page
*/
- if (!PageDirty(page) ||
- (PageWriteback(page) &&
+ if (!folio_test_dirty(folio) ||
+ (folio_test_writeback(folio) &&
(mpd->wbc->sync_mode == WB_SYNC_NONE)) ||
- unlikely(page->mapping != mapping)) {
- unlock_page(page);
+ unlikely(folio->mapping != mapping)) {
+ folio_unlock(folio);
continue;
}
- wait_on_page_writeback(page);
- BUG_ON(PageWriteback(page));
+ folio_wait_writeback(folio);
+ BUG_ON(folio_test_writeback(folio));
/*
* Should never happen but for buggy code in
@@ -2643,33 +2642,33 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
*
* [1] https://lore.kernel.org/linux-mm/[email protected]
*/
- if (!page_has_buffers(page)) {
- ext4_warning_inode(mpd->inode, "page %lu does not have buffers attached", page->index);
- ClearPageDirty(page);
- unlock_page(page);
+ if (!folio_buffers(folio)) {
+ ext4_warning_inode(mpd->inode, "page %lu does not have buffers attached", folio->index);
+ folio_clear_dirty(folio);
+ folio_unlock(folio);
continue;
}
if (mpd->map.m_len == 0)
- mpd->first_page = page->index;
- mpd->next_page = page->index + 1;
+ mpd->first_page = folio->index;
+ mpd->next_page = folio->index + folio_nr_pages(folio);
/* Add all dirty buffers to mpd */
- lblk = ((ext4_lblk_t)page->index) <<
+ lblk = ((ext4_lblk_t)folio->index) <<
(PAGE_SHIFT - blkbits);
- head = page_buffers(page);
+ head = folio_buffers(folio);
err = mpage_process_page_bufs(mpd, head, head, lblk);
if (err <= 0)
goto out;
err = 0;
- left--;
+ left -= folio_nr_pages(folio);
}
- pagevec_release(&pvec);
+ folio_batch_release(&fbatch);
cond_resched();
}
mpd->scanned_until_end = 1;
return 0;
out:
- pagevec_release(&pvec);
+ folio_batch_release(&fbatch);
return err;
}
--
2.36.1
On Mon, Oct 17, 2022 at 1:25 PM Vishal Moola (Oracle)
<[email protected]> wrote:
>
> Converted the function to use folios throughout. This is in preparation
> for the removal of find_get_pages_range_tag(). Now supports large
> folios.
>
> Signed-off-by: Vishal Moola (Oracle) <[email protected]>
> ---
> fs/ext4/inode.c | 55 ++++++++++++++++++++++++-------------------------
> 1 file changed, 27 insertions(+), 28 deletions(-)
>
> diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
> index 2b5ef1b64249..69a0708c8e87 100644
> --- a/fs/ext4/inode.c
> +++ b/fs/ext4/inode.c
> @@ -2572,8 +2572,8 @@ static int ext4_da_writepages_trans_blocks(struct inode *inode)
> static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
> {
> struct address_space *mapping = mpd->inode->i_mapping;
> - struct pagevec pvec;
> - unsigned int nr_pages;
> + struct folio_batch fbatch;
> + unsigned int nr_folios;
> long left = mpd->wbc->nr_to_write;
> pgoff_t index = mpd->first_page;
> pgoff_t end = mpd->last_page;
> @@ -2587,18 +2587,17 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
> tag = PAGECACHE_TAG_TOWRITE;
> else
> tag = PAGECACHE_TAG_DIRTY;
> -
> - pagevec_init(&pvec);
> + folio_batch_init(&fbatch);
> mpd->map.m_len = 0;
> mpd->next_page = index;
> while (index <= end) {
> - nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
> - tag);
> - if (nr_pages == 0)
> + nr_folios = filemap_get_folios_tag(mapping, &index, end,
> + tag, &fbatch);
> + if (nr_folios == 0)
> break;
>
> - for (i = 0; i < nr_pages; i++) {
> - struct page *page = pvec.pages[i];
> + for (i = 0; i < nr_folios; i++) {
> + struct folio *folio = fbatch.folios[i];
>
> /*
> * Accumulated enough dirty pages? This doesn't apply
> @@ -2612,10 +2611,10 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
> goto out;
>
> /* If we can't merge this page, we are done. */
> - if (mpd->map.m_len > 0 && mpd->next_page != page->index)
> + if (mpd->map.m_len > 0 && mpd->next_page != folio->index)
> goto out;
>
> - lock_page(page);
> + folio_lock(folio);
> /*
> * If the page is no longer dirty, or its mapping no
> * longer corresponds to inode we are writing (which
> @@ -2623,16 +2622,16 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
> * page is already under writeback and we are not doing
> * a data integrity writeback, skip the page
> */
> - if (!PageDirty(page) ||
> - (PageWriteback(page) &&
> + if (!folio_test_dirty(folio) ||
> + (folio_test_writeback(folio) &&
> (mpd->wbc->sync_mode == WB_SYNC_NONE)) ||
> - unlikely(page->mapping != mapping)) {
> - unlock_page(page);
> + unlikely(folio->mapping != mapping)) {
> + folio_unlock(folio);
> continue;
> }
>
> - wait_on_page_writeback(page);
> - BUG_ON(PageWriteback(page));
> + folio_wait_writeback(folio);
> + BUG_ON(folio_test_writeback(folio));
>
> /*
> * Should never happen but for buggy code in
> @@ -2643,33 +2642,33 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
> *
> * [1] https://lore.kernel.org/linux-mm/[email protected]
> */
> - if (!page_has_buffers(page)) {
> - ext4_warning_inode(mpd->inode, "page %lu does not have buffers attached", page->index);
> - ClearPageDirty(page);
> - unlock_page(page);
> + if (!folio_buffers(folio)) {
> + ext4_warning_inode(mpd->inode, "page %lu does not have buffers attached", folio->index);
> + folio_clear_dirty(folio);
> + folio_unlock(folio);
> continue;
> }
>
> if (mpd->map.m_len == 0)
> - mpd->first_page = page->index;
> - mpd->next_page = page->index + 1;
> + mpd->first_page = folio->index;
> + mpd->next_page = folio->index + folio_nr_pages(folio);
> /* Add all dirty buffers to mpd */
> - lblk = ((ext4_lblk_t)page->index) <<
> + lblk = ((ext4_lblk_t)folio->index) <<
> (PAGE_SHIFT - blkbits);
> - head = page_buffers(page);
> + head = folio_buffers(folio);
> err = mpage_process_page_bufs(mpd, head, head, lblk);
> if (err <= 0)
> goto out;
> err = 0;
> - left--;
> + left -= folio_nr_pages(folio);
> }
> - pagevec_release(&pvec);
> + folio_batch_release(&fbatch);
> cond_resched();
> }
> mpd->scanned_until_end = 1;
> return 0;
> out:
> - pagevec_release(&pvec);
> + folio_batch_release(&fbatch);
> return err;
> }
>
> --
> 2.36.1
>
Does anyone have some time to look over this ext4 patch this week?
Feedback is appreciated.