Remove the only spots in affs which actually use a struct page; there
are a few places where one is mentioned, but it's part of the interface.
The rest of this is removing the remaining calls to set_bh_page(),
and then removing the function before any new users show up.
Matthew Wilcox (Oracle) (7):
highmem: Add memcpy_to_folio() and memcpy_from_folio()
affs: Convert affs_symlink_read_folio() to use the folio
affs: Convert data read and write to use folios
migrate: Use folio_set_bh() instead of set_bh_page()
ntfs3: Convert ntfs_get_block_vbo() to use a folio
jbd2: Use a folio in jbd2_journal_write_metadata_buffer()
buffer: Remove set_bh_page()
fs/affs/file.c | 77 ++++++++++++++++++-------------------
fs/affs/symlink.c | 12 +++---
fs/buffer.c | 15 --------
fs/jbd2/journal.c | 35 ++++++++---------
fs/ntfs3/inode.c | 10 ++---
include/linux/buffer_head.h | 2 -
include/linux/highmem.h | 44 +++++++++++++++++++++
mm/migrate.c | 2 +-
8 files changed, 109 insertions(+), 88 deletions(-)
--
2.39.2
These are the folio equivalent of memcpy_to_page() and memcpy_from_page().
Signed-off-by: Matthew Wilcox (Oracle) <[email protected]>
---
include/linux/highmem.h | 44 +++++++++++++++++++++++++++++++++++++++++
1 file changed, 44 insertions(+)
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 68da30625a6c..0280f57d4744 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -439,6 +439,50 @@ static inline void memzero_page(struct page *page, size_t offset, size_t len)
kunmap_local(addr);
}
+static inline void memcpy_from_folio(char *to, struct folio *folio,
+ size_t offset, size_t len)
+{
+ VM_BUG_ON(offset + len > folio_size(folio));
+
+ do {
+ char *from = kmap_local_folio(folio, offset);
+ size_t chunk = len;
+
+ if (folio_test_highmem(folio) &&
+ (chunk > (PAGE_SIZE - offset_in_page(offset))))
+ chunk = PAGE_SIZE - offset_in_page(offset);
+ memcpy(to, from, len);
+ kunmap_local(from);
+
+ from += chunk;
+ offset += chunk;
+ len -= chunk;
+ } while (len > 0);
+}
+
+static inline void memcpy_to_folio(struct folio *folio, size_t offset,
+ const char *from, size_t len)
+{
+ VM_BUG_ON(offset + len > folio_size(folio));
+
+ do {
+ char *to = kmap_local_folio(folio, offset);
+ size_t chunk = len;
+
+ if (folio_test_highmem(folio) &&
+ (chunk > (PAGE_SIZE - offset_in_page(offset))))
+ chunk = PAGE_SIZE - offset_in_page(offset);
+ memcpy(to, from, len);
+ kunmap_local(to);
+
+ from += chunk;
+ offset += chunk;
+ len -= chunk;
+ } while (len > 0);
+
+ flush_dcache_folio(folio);
+}
+
/**
* memcpy_from_file_folio - Copy some bytes from a file folio.
* @to: The destination buffer.
--
2.39.2
We still need to convert to/from folios in write_begin & write_end
to fit the API, but this removes a lot of calls to old page-based
functions, removing many hidden calls to compound_head().
Signed-off-by: Matthew Wilcox (Oracle) <[email protected]>
---
fs/affs/file.c | 77 +++++++++++++++++++++++++-------------------------
1 file changed, 38 insertions(+), 39 deletions(-)
diff --git a/fs/affs/file.c b/fs/affs/file.c
index e43f2f007ac1..705e227ff63d 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -520,21 +520,20 @@ affs_getemptyblk_ino(struct inode *inode, int block)
return ERR_PTR(err);
}
-static int
-affs_do_readpage_ofs(struct page *page, unsigned to, int create)
+static int affs_do_read_folio_ofs(struct folio *folio, size_t to, int create)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct super_block *sb = inode->i_sb;
struct buffer_head *bh;
- unsigned pos = 0;
- u32 bidx, boff, bsize;
+ size_t pos = 0;
+ size_t bidx, boff, bsize;
u32 tmp;
- pr_debug("%s(%lu, %ld, 0, %d)\n", __func__, inode->i_ino,
- page->index, to);
- BUG_ON(to > PAGE_SIZE);
+ pr_debug("%s(%lu, %ld, 0, %zu)\n", __func__, inode->i_ino,
+ folio->index, to);
+ BUG_ON(to > folio_size(folio));
bsize = AFFS_SB(sb)->s_data_blksize;
- tmp = page->index << PAGE_SHIFT;
+ tmp = folio_pos(folio);
bidx = tmp / bsize;
boff = tmp % bsize;
@@ -544,7 +543,7 @@ affs_do_readpage_ofs(struct page *page, unsigned to, int create)
return PTR_ERR(bh);
tmp = min(bsize - boff, to - pos);
BUG_ON(pos + tmp > to || tmp > bsize);
- memcpy_to_page(page, pos, AFFS_DATA(bh) + boff, tmp);
+ memcpy_to_folio(folio, pos, AFFS_DATA(bh) + boff, tmp);
affs_brelse(bh);
bidx++;
pos += tmp;
@@ -624,25 +623,23 @@ affs_extent_file_ofs(struct inode *inode, u32 newsize)
return PTR_ERR(bh);
}
-static int
-affs_read_folio_ofs(struct file *file, struct folio *folio)
+static int affs_read_folio_ofs(struct file *file, struct folio *folio)
{
- struct page *page = &folio->page;
- struct inode *inode = page->mapping->host;
- u32 to;
+ struct inode *inode = folio->mapping->host;
+ size_t to;
int err;
- pr_debug("%s(%lu, %ld)\n", __func__, inode->i_ino, page->index);
- to = PAGE_SIZE;
- if (((page->index + 1) << PAGE_SHIFT) > inode->i_size) {
- to = inode->i_size & ~PAGE_MASK;
- memset(page_address(page) + to, 0, PAGE_SIZE - to);
+ pr_debug("%s(%lu, %ld)\n", __func__, inode->i_ino, folio->index);
+ to = folio_size(folio);
+ if (folio_pos(folio) + to > inode->i_size) {
+ to = inode->i_size - folio_pos(folio);
+ folio_zero_segment(folio, to, folio_size(folio));
}
- err = affs_do_readpage_ofs(page, to, 0);
+ err = affs_do_read_folio_ofs(folio, to, 0);
if (!err)
- SetPageUptodate(page);
- unlock_page(page);
+ folio_mark_uptodate(folio);
+ folio_unlock(folio);
return err;
}
@@ -651,7 +648,7 @@ static int affs_write_begin_ofs(struct file *file, struct address_space *mapping
struct page **pagep, void **fsdata)
{
struct inode *inode = mapping->host;
- struct page *page;
+ struct folio *folio;
pgoff_t index;
int err = 0;
@@ -667,19 +664,20 @@ static int affs_write_begin_ofs(struct file *file, struct address_space *mapping
}
index = pos >> PAGE_SHIFT;
- page = grab_cache_page_write_begin(mapping, index);
- if (!page)
- return -ENOMEM;
- *pagep = page;
+ folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
+ mapping_gfp_mask(mapping));
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+ *pagep = &folio->page;
- if (PageUptodate(page))
+ if (folio_test_uptodate(folio))
return 0;
/* XXX: inefficient but safe in the face of short writes */
- err = affs_do_readpage_ofs(page, PAGE_SIZE, 1);
+ err = affs_do_read_folio_ofs(folio, folio_size(folio), 1);
if (err) {
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
}
return err;
}
@@ -688,6 +686,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
+ struct folio *folio = page_folio(page);
struct inode *inode = mapping->host;
struct super_block *sb = inode->i_sb;
struct buffer_head *bh, *prev_bh;
@@ -701,18 +700,18 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
to = from + len;
/*
* XXX: not sure if this can handle short copies (len < copied), but
- * we don't have to, because the page should always be uptodate here,
+ * we don't have to, because the folio should always be uptodate here,
* due to write_begin.
*/
pr_debug("%s(%lu, %llu, %llu)\n", __func__, inode->i_ino, pos,
pos + len);
bsize = AFFS_SB(sb)->s_data_blksize;
- data = page_address(page);
+ data = folio_address(folio);
bh = NULL;
written = 0;
- tmp = (page->index << PAGE_SHIFT) + from;
+ tmp = (folio->index << PAGE_SHIFT) + from;
bidx = tmp / bsize;
boff = tmp % bsize;
if (boff) {
@@ -804,11 +803,11 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
from += tmp;
bidx++;
}
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
done:
affs_brelse(bh);
- tmp = (page->index << PAGE_SHIFT) + from;
+ tmp = (folio->index << PAGE_SHIFT) + from;
if (tmp > inode->i_size)
inode->i_size = AFFS_I(inode)->mmu_private = tmp;
@@ -819,8 +818,8 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
}
err_first_bh:
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
return written;
--
2.39.2
This function was converted before folio_set_bh() existed. Catch
up to the new API.
Signed-off-by: Matthew Wilcox (Oracle) <[email protected]>
---
mm/migrate.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/mm/migrate.c b/mm/migrate.c
index af8557d78549..1363053894ce 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -773,7 +773,7 @@ static int __buffer_migrate_folio(struct address_space *mapping,
bh = head;
do {
- set_bh_page(bh, &dst->page, bh_offset(bh));
+ folio_set_bh(bh, dst, bh_offset(bh));
bh = bh->b_this_page;
} while (bh != head);
--
2.39.2
Remove use of the old page APIs. That includes use of setting PageError
on error; simply not setting the uptodate flag is sufficient.
Signed-off-by: Matthew Wilcox (Oracle) <[email protected]>
---
fs/affs/symlink.c | 12 +++++-------
1 file changed, 5 insertions(+), 7 deletions(-)
diff --git a/fs/affs/symlink.c b/fs/affs/symlink.c
index 31d6446dc166..094aec8d17b8 100644
--- a/fs/affs/symlink.c
+++ b/fs/affs/symlink.c
@@ -13,10 +13,9 @@
static int affs_symlink_read_folio(struct file *file, struct folio *folio)
{
- struct page *page = &folio->page;
struct buffer_head *bh;
- struct inode *inode = page->mapping->host;
- char *link = page_address(page);
+ struct inode *inode = folio->mapping->host;
+ char *link = folio_address(folio);
struct slink_front *lf;
int i, j;
char c;
@@ -58,12 +57,11 @@ static int affs_symlink_read_folio(struct file *file, struct folio *folio)
}
link[i] = '\0';
affs_brelse(bh);
- SetPageUptodate(page);
- unlock_page(page);
+ folio_mark_uptodate(folio);
+ folio_unlock(folio);
return 0;
fail:
- SetPageError(page);
- unlock_page(page);
+ folio_unlock(folio);
return -EIO;
}
--
2.39.2
Remove a user of set_bh_page().
Signed-off-by: Matthew Wilcox (Oracle) <[email protected]>
---
fs/ntfs3/inode.c | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
index dc7e7ab701c6..8ae572aacc69 100644
--- a/fs/ntfs3/inode.c
+++ b/fs/ntfs3/inode.c
@@ -554,7 +554,7 @@ static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo,
struct super_block *sb = inode->i_sb;
struct ntfs_sb_info *sbi = sb->s_fs_info;
struct ntfs_inode *ni = ntfs_i(inode);
- struct page *page = bh->b_page;
+ struct folio *folio = bh->b_folio;
u8 cluster_bits = sbi->cluster_bits;
u32 block_size = sb->s_blocksize;
u64 bytes, lbo, valid;
@@ -569,7 +569,7 @@ static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo,
if (is_resident(ni)) {
ni_lock(ni);
- err = attr_data_read_resident(ni, page);
+ err = attr_data_read_resident(ni, &folio->page);
ni_unlock(ni);
if (!err)
@@ -642,17 +642,17 @@ static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo,
*/
bytes = block_size;
- if (page) {
+ if (folio) {
u32 voff = valid - vbo;
bh->b_size = block_size;
off = vbo & (PAGE_SIZE - 1);
- set_bh_page(bh, page, off);
+ folio_set_bh(bh, folio, off);
err = bh_read(bh, 0);
if (err < 0)
goto out;
- zero_user_segment(page, off + voff, off + block_size);
+ folio_zero_segment(folio, off + voff, off + block_size);
}
}
--
2.39.2
The primary goal here is removing the use of set_bh_page().
Take the opportunity to switch from kmap_atomic() to kmap_local().
This simplifies the function as the offset is already added to
the pointer.
Signed-off-by: Matthew Wilcox (Oracle) <[email protected]>
---
fs/jbd2/journal.c | 35 ++++++++++++++++-------------------
1 file changed, 16 insertions(+), 19 deletions(-)
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index fbce16fedaa4..1b5a45ab62b0 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -341,7 +341,7 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
int do_escape = 0;
char *mapped_data;
struct buffer_head *new_bh;
- struct page *new_page;
+ struct folio *new_folio;
unsigned int new_offset;
struct buffer_head *bh_in = jh2bh(jh_in);
journal_t *journal = transaction->t_journal;
@@ -370,14 +370,14 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
*/
if (jh_in->b_frozen_data) {
done_copy_out = 1;
- new_page = virt_to_page(jh_in->b_frozen_data);
- new_offset = offset_in_page(jh_in->b_frozen_data);
+ new_folio = virt_to_folio(jh_in->b_frozen_data);
+ new_offset = offset_in_folio(new_folio, jh_in->b_frozen_data);
} else {
- new_page = jh2bh(jh_in)->b_page;
- new_offset = offset_in_page(jh2bh(jh_in)->b_data);
+ new_folio = jh2bh(jh_in)->b_folio;
+ new_offset = offset_in_folio(new_folio, jh2bh(jh_in)->b_data);
}
- mapped_data = kmap_atomic(new_page);
+ mapped_data = kmap_local_folio(new_folio, new_offset);
/*
* Fire data frozen trigger if data already wasn't frozen. Do this
* before checking for escaping, as the trigger may modify the magic
@@ -385,18 +385,17 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
* data in the buffer.
*/
if (!done_copy_out)
- jbd2_buffer_frozen_trigger(jh_in, mapped_data + new_offset,
+ jbd2_buffer_frozen_trigger(jh_in, mapped_data,
jh_in->b_triggers);
/*
* Check for escaping
*/
- if (*((__be32 *)(mapped_data + new_offset)) ==
- cpu_to_be32(JBD2_MAGIC_NUMBER)) {
+ if (*((__be32 *)mapped_data) == cpu_to_be32(JBD2_MAGIC_NUMBER)) {
need_copy_out = 1;
do_escape = 1;
}
- kunmap_atomic(mapped_data);
+ kunmap_local(mapped_data);
/*
* Do we need to do a data copy?
@@ -417,12 +416,10 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
}
jh_in->b_frozen_data = tmp;
- mapped_data = kmap_atomic(new_page);
- memcpy(tmp, mapped_data + new_offset, bh_in->b_size);
- kunmap_atomic(mapped_data);
+ memcpy_from_folio(tmp, new_folio, new_offset, bh_in->b_size);
- new_page = virt_to_page(tmp);
- new_offset = offset_in_page(tmp);
+ new_folio = virt_to_folio(tmp);
+ new_offset = offset_in_folio(new_folio, tmp);
done_copy_out = 1;
/*
@@ -438,12 +435,12 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
* copying, we can finally do so.
*/
if (do_escape) {
- mapped_data = kmap_atomic(new_page);
- *((unsigned int *)(mapped_data + new_offset)) = 0;
- kunmap_atomic(mapped_data);
+ mapped_data = kmap_local_folio(new_folio, new_offset);
+ *((unsigned int *)mapped_data) = 0;
+ kunmap_local(mapped_data);
}
- set_bh_page(new_bh, new_page, new_offset);
+ folio_set_bh(new_bh, new_folio, new_offset);
new_bh->b_size = bh_in->b_size;
new_bh->b_bdev = journal->j_dev;
new_bh->b_blocknr = blocknr;
--
2.39.2
On 2023-07-13 05:55, Matthew Wilcox (Oracle) wrote:
> We still need to convert to/from folios in write_begin & write_end
> to fit the API, but this removes a lot of calls to old page-based
> functions, removing many hidden calls to compound_head().
>
> Signed-off-by: Matthew Wilcox (Oracle) <[email protected]>
Looks good to me except one minor nit!
Reviewed-by: Pankaj Raghav <[email protected]>
> @@ -624,25 +623,23 @@ affs_extent_file_ofs(struct inode *inode, u32 newsize)
> return PTR_ERR(bh);
> }
>
> -static int
> -affs_read_folio_ofs(struct file *file, struct folio *folio)
> +static int affs_read_folio_ofs(struct file *file, struct folio *folio)
> {
> - struct page *page = &folio->page;
> - struct inode *inode = page->mapping->host;
> - u32 to;
> + struct inode *inode = folio->mapping->host;
> + size_t to;
> int err;
>
> - pr_debug("%s(%lu, %ld)\n", __func__, inode->i_ino, page->index);
> - to = PAGE_SIZE;
> - if (((page->index + 1) << PAGE_SHIFT) > inode->i_size) {
> - to = inode->i_size & ~PAGE_MASK;
> - memset(page_address(page) + to, 0, PAGE_SIZE - to);
> + pr_debug("%s(%lu, %ld)\n", __func__, inode->i_ino, folio->index);
> + to = folio_size(folio);
> + if (folio_pos(folio) + to > inode->i_size) {
> + to = inode->i_size - folio_pos(folio);
> + folio_zero_segment(folio, to, folio_size(folio));
This change makes the code more readable!
> }
>
> - err = affs_do_readpage_ofs(page, to, 0);
> + err = affs_do_read_folio_ofs(folio, to, 0);
> if (!err)
> @@ -688,6 +686,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
> loff_t pos, unsigned len, unsigned copied,
> struct page *page, void *fsdata)
> {
> + struct folio *folio = page_folio(page);
> struct inode *inode = mapping->host;
> struct super_block *sb = inode->i_sb;
> struct buffer_head *bh, *prev_bh;
> @@ -701,18 +700,18 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
> to = from + len;
> /*
> * XXX: not sure if this can handle short copies (len < copied), but
> - * we don't have to, because the page should always be uptodate here,
> + * we don't have to, because the folio should always be uptodate here,
> * due to write_begin.
> */
>
> pr_debug("%s(%lu, %llu, %llu)\n", __func__, inode->i_ino, pos,
> pos + len);
> bsize = AFFS_SB(sb)->s_data_blksize;
> - data = page_address(page);
> + data = folio_address(folio);
>
> bh = NULL;
> written = 0;
> - tmp = (page->index << PAGE_SHIFT) + from;
> + tmp = (folio->index << PAGE_SHIFT) + from;
Can this be made to:
tmp = folio_pos(folio) + from;
Similar to what is being done in affs_do_read_folio_ofs()
> bidx = tmp / bsize;
> boff = tmp % bsize;
> if (boff) {
> @@ -804,11 +803,11 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
> from += tmp;
> bidx++;
> }
> - SetPageUptodate(page);
> + folio_mark_uptodate(folio);
>
> done:
> affs_brelse(bh);
> - tmp = (page->index << PAGE_SHIFT) + from;
> + tmp = (folio->index << PAGE_SHIFT) + from;
Same as the previous comment.
> if (tmp > inode->i_size)
> inode->i_size = AFFS_I(inode)->mmu_private = tmp;
>
> @@ -819,8 +818,8 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
> }
>
> err_first_bh:
> - unlock_page(page);
> - put_page(page);
> + folio_unlock(folio);
> + folio_put(folio);
>
> return written;
>
On Thu, Jul 13, 2023 at 04:55:08AM +0100, Matthew Wilcox (Oracle) wrote:
> We still need to convert to/from folios in write_begin & write_end
> to fit the API, but this removes a lot of calls to old page-based
> functions, removing many hidden calls to compound_head().
>
> Signed-off-by: Matthew Wilcox (Oracle) <[email protected]>
Acked-by: David Sterba <[email protected]>
On Thu, Jul 13, 2023 at 04:55:07AM +0100, Matthew Wilcox (Oracle) wrote:
> Remove use of the old page APIs. That includes use of setting PageError
> on error; simply not setting the uptodate flag is sufficient.
>
> Signed-off-by: Matthew Wilcox (Oracle) <[email protected]>
Acked-by: David Sterba <[email protected]>
Am Do., 13. Juli 2023 um 06:04 Uhr schrieb Matthew Wilcox (Oracle)
<[email protected]>:
> These are the folio equivalent of memcpy_to_page() and memcpy_from_page().
>
> Signed-off-by: Matthew Wilcox (Oracle) <[email protected]>
> ---
> include/linux/highmem.h | 44 +++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 44 insertions(+)
>
> diff --git a/include/linux/highmem.h b/include/linux/highmem.h
> index 68da30625a6c..0280f57d4744 100644
> --- a/include/linux/highmem.h
> +++ b/include/linux/highmem.h
> @@ -439,6 +439,50 @@ static inline void memzero_page(struct page *page, size_t offset, size_t len)
> kunmap_local(addr);
> }
>
> +static inline void memcpy_from_folio(char *to, struct folio *folio,
> + size_t offset, size_t len)
> +{
> + VM_BUG_ON(offset + len > folio_size(folio));
> +
> + do {
> + char *from = kmap_local_folio(folio, offset);
> + size_t chunk = len;
> +
> + if (folio_test_highmem(folio) &&
> + (chunk > (PAGE_SIZE - offset_in_page(offset))))
> + chunk = PAGE_SIZE - offset_in_page(offset);
> + memcpy(to, from, len);
We want memcpy(to, from, chunk) here.
> + kunmap_local(from);
> +
> + from += chunk;
> + offset += chunk;
> + len -= chunk;
> + } while (len > 0);
> +}
> +
> +static inline void memcpy_to_folio(struct folio *folio, size_t offset,
> + const char *from, size_t len)
> +{
> + VM_BUG_ON(offset + len > folio_size(folio));
> +
> + do {
> + char *to = kmap_local_folio(folio, offset);
> + size_t chunk = len;
> +
> + if (folio_test_highmem(folio) &&
> + (chunk > (PAGE_SIZE - offset_in_page(offset))))
> + chunk = PAGE_SIZE - offset_in_page(offset);
> + memcpy(to, from, len);
And also here.
> + kunmap_local(to);
> +
> + from += chunk;
> + offset += chunk;
> + len -= chunk;
> + } while (len > 0);
> +
> + flush_dcache_folio(folio);
> +}
> +
> /**
> * memcpy_from_file_folio - Copy some bytes from a file folio.
> * @to: The destination buffer.
> --
> 2.39.2
>
Thanks,
Andreas
On Thu 13-07-23 04:55:11, Matthew Wilcox (Oracle) wrote:
> The primary goal here is removing the use of set_bh_page().
> Take the opportunity to switch from kmap_atomic() to kmap_local().
> This simplifies the function as the offset is already added to
> the pointer.
>
> Signed-off-by: Matthew Wilcox (Oracle) <[email protected]>
Looks good. Feel free to add:
Reviewed-by: Jan Kara <[email protected]>
Honza
> ---
> fs/jbd2/journal.c | 35 ++++++++++++++++-------------------
> 1 file changed, 16 insertions(+), 19 deletions(-)
>
> diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
> index fbce16fedaa4..1b5a45ab62b0 100644
> --- a/fs/jbd2/journal.c
> +++ b/fs/jbd2/journal.c
> @@ -341,7 +341,7 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
> int do_escape = 0;
> char *mapped_data;
> struct buffer_head *new_bh;
> - struct page *new_page;
> + struct folio *new_folio;
> unsigned int new_offset;
> struct buffer_head *bh_in = jh2bh(jh_in);
> journal_t *journal = transaction->t_journal;
> @@ -370,14 +370,14 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
> */
> if (jh_in->b_frozen_data) {
> done_copy_out = 1;
> - new_page = virt_to_page(jh_in->b_frozen_data);
> - new_offset = offset_in_page(jh_in->b_frozen_data);
> + new_folio = virt_to_folio(jh_in->b_frozen_data);
> + new_offset = offset_in_folio(new_folio, jh_in->b_frozen_data);
> } else {
> - new_page = jh2bh(jh_in)->b_page;
> - new_offset = offset_in_page(jh2bh(jh_in)->b_data);
> + new_folio = jh2bh(jh_in)->b_folio;
> + new_offset = offset_in_folio(new_folio, jh2bh(jh_in)->b_data);
> }
>
> - mapped_data = kmap_atomic(new_page);
> + mapped_data = kmap_local_folio(new_folio, new_offset);
> /*
> * Fire data frozen trigger if data already wasn't frozen. Do this
> * before checking for escaping, as the trigger may modify the magic
> @@ -385,18 +385,17 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
> * data in the buffer.
> */
> if (!done_copy_out)
> - jbd2_buffer_frozen_trigger(jh_in, mapped_data + new_offset,
> + jbd2_buffer_frozen_trigger(jh_in, mapped_data,
> jh_in->b_triggers);
>
> /*
> * Check for escaping
> */
> - if (*((__be32 *)(mapped_data + new_offset)) ==
> - cpu_to_be32(JBD2_MAGIC_NUMBER)) {
> + if (*((__be32 *)mapped_data) == cpu_to_be32(JBD2_MAGIC_NUMBER)) {
> need_copy_out = 1;
> do_escape = 1;
> }
> - kunmap_atomic(mapped_data);
> + kunmap_local(mapped_data);
>
> /*
> * Do we need to do a data copy?
> @@ -417,12 +416,10 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
> }
>
> jh_in->b_frozen_data = tmp;
> - mapped_data = kmap_atomic(new_page);
> - memcpy(tmp, mapped_data + new_offset, bh_in->b_size);
> - kunmap_atomic(mapped_data);
> + memcpy_from_folio(tmp, new_folio, new_offset, bh_in->b_size);
>
> - new_page = virt_to_page(tmp);
> - new_offset = offset_in_page(tmp);
> + new_folio = virt_to_folio(tmp);
> + new_offset = offset_in_folio(new_folio, tmp);
> done_copy_out = 1;
>
> /*
> @@ -438,12 +435,12 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
> * copying, we can finally do so.
> */
> if (do_escape) {
> - mapped_data = kmap_atomic(new_page);
> - *((unsigned int *)(mapped_data + new_offset)) = 0;
> - kunmap_atomic(mapped_data);
> + mapped_data = kmap_local_folio(new_folio, new_offset);
> + *((unsigned int *)mapped_data) = 0;
> + kunmap_local(mapped_data);
> }
>
> - set_bh_page(new_bh, new_page, new_offset);
> + folio_set_bh(new_bh, new_folio, new_offset);
> new_bh->b_size = bh_in->b_size;
> new_bh->b_bdev = journal->j_dev;
> new_bh->b_blocknr = blocknr;
> --
> 2.39.2
>
--
Jan Kara <[email protected]>
SUSE Labs, CR
On Thu 13-07-23 04:55:09, Matthew Wilcox (Oracle) wrote:
> This function was converted before folio_set_bh() existed. Catch
> up to the new API.
>
> Signed-off-by: Matthew Wilcox (Oracle) <[email protected]>
Looks good. Feel free to add:
Reviewed-by: Jan Kara <[email protected]>
Honza
> ---
> mm/migrate.c | 2 +-
> 1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/mm/migrate.c b/mm/migrate.c
> index af8557d78549..1363053894ce 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -773,7 +773,7 @@ static int __buffer_migrate_folio(struct address_space *mapping,
>
> bh = head;
> do {
> - set_bh_page(bh, &dst->page, bh_offset(bh));
> + folio_set_bh(bh, dst, bh_offset(bh));
> bh = bh->b_this_page;
> } while (bh != head);
>
> --
> 2.39.2
>
--
Jan Kara <[email protected]>
SUSE Labs, CR