Converts mext_page_double_lock() to use folios. This change saves
146 bytes of kernel text and removes 3 calls to compound_head().
Signed-off-by: Vishal Moola (Oracle) <[email protected]>
---
fs/ext4/move_extent.c | 46 +++++++++++++++++++++----------------------
1 file changed, 23 insertions(+), 23 deletions(-)
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 8dbb87edf24c..2de9829aed63 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -110,22 +110,23 @@ mext_check_coverage(struct inode *inode, ext4_lblk_t from, ext4_lblk_t count,
}
/**
- * mext_page_double_lock - Grab and lock pages on both @inode1 and @inode2
+ * mext_folio_double_lock - Grab and lock folio on both @inode1 and @inode2
*
* @inode1: the inode structure
* @inode2: the inode structure
- * @index1: page index
- * @index2: page index
- * @page: result page vector
+ * @index1: folio index
+ * @index2: folio index
+ * @folio: result folio vector
*
- * Grab two locked pages for inode's by inode order
+ * Grab two locked folio for inode's by inode order
*/
static int
-mext_page_double_lock(struct inode *inode1, struct inode *inode2,
- pgoff_t index1, pgoff_t index2, struct page *page[2])
+mext_folio_double_lock(struct inode *inode1, struct inode *inode2,
+ pgoff_t index1, pgoff_t index2, struct folio *folio[2])
{
struct address_space *mapping[2];
unsigned int flags;
+ unsigned fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE;
BUG_ON(!inode1 || !inode2);
if (inode1 < inode2) {
@@ -138,28 +139,30 @@ mext_page_double_lock(struct inode *inode1, struct inode *inode2,
}
flags = memalloc_nofs_save();
- page[0] = grab_cache_page_write_begin(mapping[0], index1);
- if (!page[0]) {
+ folio[0] = __filemap_get_folio(mapping[0], index1, fgp_flags,
+ mapping_gfp_mask(mapping[0]));
+ if (!folio[0]) {
memalloc_nofs_restore(flags);
return -ENOMEM;
}
- page[1] = grab_cache_page_write_begin(mapping[1], index2);
+ folio[1] = __filemap_get_folio(mapping[1], index2, fgp_flags,
+ mapping_gfp_mask(mapping[1]));
memalloc_nofs_restore(flags);
- if (!page[1]) {
- unlock_page(page[0]);
- put_page(page[0]);
+ if (!folio[1]) {
+ folio_unlock(folio[0]);
+ folio_put(folio[0]);
return -ENOMEM;
}
/*
- * grab_cache_page_write_begin() may not wait on page's writeback if
+ * __filemap_get_folio() may not wait on folio's writeback if
* BDI not demand that. But it is reasonable to be very conservative
- * here and explicitly wait on page's writeback
+ * here and explicitly wait on folio's writeback
*/
- wait_on_page_writeback(page[0]);
- wait_on_page_writeback(page[1]);
+ folio_wait_writeback(folio[0]);
+ folio_wait_writeback(folio[1]);
if (inode1 > inode2)
- swap(page[0], page[1]);
+ swap(folio[0], folio[1]);
return 0;
}
@@ -252,7 +255,6 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
int block_len_in_page, int unwritten, int *err)
{
struct inode *orig_inode = file_inode(o_filp);
- struct page *pagep[2] = {NULL, NULL};
struct folio *folio[2] = {NULL, NULL};
handle_t *handle;
ext4_lblk_t orig_blk_offset, donor_blk_offset;
@@ -303,8 +305,8 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
replaced_size = data_size;
- *err = mext_page_double_lock(orig_inode, donor_inode, orig_page_offset,
- donor_page_offset, pagep);
+ *err = mext_folio_double_lock(orig_inode, donor_inode, orig_page_offset,
+ donor_page_offset, folio);
if (unlikely(*err < 0))
goto stop_journal;
/*
@@ -314,8 +316,6 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
* hold page's lock, if it is still the case data copy is not
* necessary, just swap data blocks between orig and donor.
*/
- folio[0] = page_folio(pagep[0]);
- folio[1] = page_folio(pagep[1]);
VM_BUG_ON_FOLIO(folio_test_large(folio[0]), folio[0]);
VM_BUG_ON_FOLIO(folio_test_large(folio[1]), folio[1]);
--
2.38.1
On Tue, Dec 06, 2022 at 12:41:15PM -0800, Vishal Moola (Oracle) wrote:
> Converts mext_page_double_lock() to use folios. This change saves
> 146 bytes of kernel text and removes 3 calls to compound_head().
I think it actually removes more than three ...
> flags = memalloc_nofs_save();
> - page[0] = grab_cache_page_write_begin(mapping[0], index1);
> - if (!page[0]) {
> + folio[0] = __filemap_get_folio(mapping[0], index1, fgp_flags,
> + mapping_gfp_mask(mapping[0]));
one
> + if (!folio[0]) {
> memalloc_nofs_restore(flags);
> return -ENOMEM;
> }
>
> - page[1] = grab_cache_page_write_begin(mapping[1], index2);
> + folio[1] = __filemap_get_folio(mapping[1], index2, fgp_flags,
> + mapping_gfp_mask(mapping[1]));
two
> memalloc_nofs_restore(flags);
> - if (!page[1]) {
> - unlock_page(page[0]);
> - put_page(page[0]);
> + if (!folio[1]) {
> + folio_unlock(folio[0]);
> + folio_put(folio[0]);
four
> return -ENOMEM;
> }
> /*
> - * grab_cache_page_write_begin() may not wait on page's writeback if
> + * __filemap_get_folio() may not wait on folio's writeback if
> * BDI not demand that. But it is reasonable to be very conservative
> - * here and explicitly wait on page's writeback
> + * here and explicitly wait on folio's writeback
> */
> - wait_on_page_writeback(page[0]);
> - wait_on_page_writeback(page[1]);
> + folio_wait_writeback(folio[0]);
> + folio_wait_writeback(folio[1]);
six
> if (inode1 > inode2)
> - swap(page[0], page[1]);
> + swap(folio[0], folio[1]);
>
> return 0;
> }
> @@ -252,7 +255,6 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
> int block_len_in_page, int unwritten, int *err)
> {
> struct inode *orig_inode = file_inode(o_filp);
> - struct page *pagep[2] = {NULL, NULL};
> struct folio *folio[2] = {NULL, NULL};
> handle_t *handle;
> ext4_lblk_t orig_blk_offset, donor_blk_offset;
> @@ -303,8 +305,8 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
>
> replaced_size = data_size;
>
> - *err = mext_page_double_lock(orig_inode, donor_inode, orig_page_offset,
> - donor_page_offset, pagep);
> + *err = mext_folio_double_lock(orig_inode, donor_inode, orig_page_offset,
> + donor_page_offset, folio);
> if (unlikely(*err < 0))
> goto stop_journal;
> /*
> @@ -314,8 +316,6 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
> * hold page's lock, if it is still the case data copy is not
> * necessary, just swap data blocks between orig and donor.
> */
> - folio[0] = page_folio(pagep[0]);
> - folio[1] = page_folio(pagep[1]);
eight.
Three are inline, which makes sense for the 146 bytes, but we're also
removing out of line calls as well as the inline calls.
Anyway, whether the description is updated or not, this looks good to me.
Reviewed-by: Matthew Wilcox (Oracle) <[email protected]>
On Tue, Dec 06, 2022 at 08:50:02PM +0000, Matthew Wilcox wrote:
> > flags = memalloc_nofs_save();
> > - page[0] = grab_cache_page_write_begin(mapping[0], index1);
> > - if (!page[0]) {
> > + folio[0] = __filemap_get_folio(mapping[0], index1, fgp_flags,
> > + mapping_gfp_mask(mapping[0]));
>
> one
>
> > + if (!folio[0]) {
> > memalloc_nofs_restore(flags);
> > return -ENOMEM;
> > }
> >
> > - page[1] = grab_cache_page_write_begin(mapping[1], index2);
> > + folio[1] = __filemap_get_folio(mapping[1], index2, fgp_flags,
> > + mapping_gfp_mask(mapping[1]));
>
> two
*facepalm*. Those don't contain calls to compound_head(), they contain
calls to folio_file_page(), which is still a moderately-expensive
unnecessary conversion, but a conversion in the other direction.
> Three are inline, which makes sense for the 146 bytes, but we're also
> removing out of line calls as well as the inline calls.
>
> Anyway, whether the description is updated or not, this looks good to me.
I seem to have miscounted. We can correct that part in the description
to "removes 6 calls to compound_head() and 2 calls to folio_file_page()."
Thanks for the review!
> Reviewed-by: Matthew Wilcox (Oracle) <[email protected]>