From: Zhang Yi <[email protected]>
Factor buffer_head related handles out from ext4_da_map_blocks() to
ext4_da_get_block_prep(), and make ext4_da_map_blocks() always return 0
if success, we can distinguish delalloc map through EXT4_MAP_DELAYED
flag.
Signed-off-by: Zhang Yi <[email protected]>
---
fs/ext4/inode.c | 75 ++++++++++++++++++++++++-------------------------
1 file changed, 36 insertions(+), 39 deletions(-)
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 9ac9bb548a4c..9f9b1fce8da8 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1690,48 +1690,37 @@ static int ext4_insert_delayed_blocks(struct inode *inode, ext4_lblk_t lblk,
* time. This function looks up the requested blocks and sets the
* buffer delay bit under the protection of i_data_sem.
*/
-static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
- struct ext4_map_blocks *map,
- struct buffer_head *bh)
+static int ext4_da_map_blocks(struct inode *inode, struct ext4_map_blocks *map)
{
struct extent_status es;
int retval;
- sector_t invalid_block = ~((sector_t) 0xffff);
#ifdef ES_AGGRESSIVE_TEST
struct ext4_map_blocks orig_map;
memcpy(&orig_map, map, sizeof(*map));
#endif
- if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
- invalid_block = ~0;
-
map->m_flags = 0;
ext_debug(inode, "max_blocks %u, logical block %lu\n", map->m_len,
(unsigned long) map->m_lblk);
/* Lookup extent status tree firstly */
- if (ext4_es_lookup_extent(inode, iblock, NULL, &es)) {
- retval = es.es_len - (iblock - es.es_lblk);
- if (retval > map->m_len)
- retval = map->m_len;
- map->m_len = retval;
-
- if (ext4_es_is_hole(&es))
- goto add_delayed;
+ if (ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
+ retval = es.es_len - (map->m_lblk - es.es_lblk);
+ map->m_len = min_t(unsigned int, map->m_len, retval);
/*
* Delayed extent could be allocated by fallocate.
* So we need to check it.
*/
- if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) {
- map_bh(bh, inode->i_sb, invalid_block);
- set_buffer_new(bh);
- set_buffer_delay(bh);
+ if (ext4_es_is_delonly(&es)) {
+ map->m_flags |= EXT4_MAP_DELAYED;
return 0;
}
+ if (ext4_es_is_hole(&es))
+ goto add_delayed;
- map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk;
+ map->m_pblk = ext4_es_pblock(&es) + map->m_lblk - es.es_lblk;
if (ext4_es_is_written(&es))
map->m_flags |= EXT4_MAP_MAPPED;
else if (ext4_es_is_unwritten(&es))
@@ -1743,7 +1732,7 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0);
#endif
if (ext4_es_is_delayed(&es) || ext4_es_is_written(&es))
- return retval;
+ return 0;
down_read(&EXT4_I(inode)->i_data_sem);
goto insert_extent;
@@ -1775,6 +1764,7 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
WARN_ON(1);
}
insert_extent:
+ retval = 0;
status = map->m_flags & EXT4_MAP_UNWRITTEN ?
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
if (status == EXTENT_STATUS_UNWRITTEN)
@@ -1788,14 +1778,10 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
add_delayed:
down_write(&EXT4_I(inode)->i_data_sem);
+ map->m_flags |= EXT4_MAP_DELAYED;
retval = ext4_insert_delayed_blocks(inode, map->m_lblk, map->m_len);
up_write(&EXT4_I(inode)->i_data_sem);
- if (retval)
- return retval;
- map_bh(bh, inode->i_sb, invalid_block);
- set_buffer_new(bh);
- set_buffer_delay(bh);
return retval;
}
@@ -1815,11 +1801,15 @@ int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
struct buffer_head *bh, int create)
{
struct ext4_map_blocks map;
+ sector_t invalid_block = ~((sector_t) 0xffff);
int ret = 0;
BUG_ON(create == 0);
BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
+ if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
+ invalid_block = ~0;
+
map.m_lblk = iblock;
map.m_len = 1;
@@ -1828,22 +1818,29 @@ int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
* preallocated blocks are unmapped but should treated
* the same as allocated blocks.
*/
- ret = ext4_da_map_blocks(inode, iblock, &map, bh);
- if (ret <= 0)
+ ret = ext4_da_map_blocks(inode, &map);
+ if (ret < 0)
return ret;
- map_bh(bh, inode->i_sb, map.m_pblk);
- ext4_update_bh_state(bh, map.m_flags);
-
- if (buffer_unwritten(bh)) {
- /* A delayed write to unwritten bh should be marked
- * new and mapped. Mapped ensures that we don't do
- * get_block multiple times when we write to the same
- * offset and new ensures that we do proper zero out
- * for partial write.
- */
+ if (map.m_flags & EXT4_MAP_DELAYED) {
+ map_bh(bh, inode->i_sb, invalid_block);
set_buffer_new(bh);
- set_buffer_mapped(bh);
+ set_buffer_delay(bh);
+ } else {
+ map_bh(bh, inode->i_sb, map.m_pblk);
+ ext4_update_bh_state(bh, map.m_flags);
+
+ if (buffer_unwritten(bh)) {
+ /*
+ * A delayed write to unwritten bh should be marked
+ * new and mapped. Mapped ensures that we don't do
+ * get_block multiple times when we write to the same
+ * offset and new ensures that we do proper zero out
+ * for partial write.
+ */
+ set_buffer_new(bh);
+ set_buffer_mapped(bh);
+ }
}
return 0;
}
--
2.39.2