Add large folio support for shmem write and fallocate paths matching the
same high order preference mechanism used in the iomap buffered IO path
as used in __filemap_get_folio().
Add shmem_mapping_size_order() to get a hint for the order of the folio
based on the file size which takes care of the mapping requirements.
Swap does not support high order folios for now, so make it order-0 in
case swap is enabled.
Skip high order folio allocation loop when reclaim path returns with no
space left (ENOSPC).
Add __GFP_COMP flag for high order folios allocation path to fix a
memory leak.
Signed-off-by: Daniel Gomez <[email protected]>
---
mm/shmem.c | 49 +++++++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 47 insertions(+), 2 deletions(-)
diff --git a/mm/shmem.c b/mm/shmem.c
index fcd2c9befe19..9308a334a940 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1836,23 +1836,63 @@ static struct folio *shmem_alloc_folio(gfp_t gfp, struct shmem_inode_info *info,
struct page *page;
mpol = shmem_get_pgoff_policy(info, index, order, &ilx);
- page = alloc_pages_mpol(gfp, order, mpol, ilx, numa_node_id());
+ page = alloc_pages_mpol(gfp | __GFP_COMP, order, mpol, ilx,
+ numa_node_id());
mpol_cond_put(mpol);
return page_rmappable_folio(page);
}
+/**
+ * shmem_mapping_size_order - Get maximum folio order for the given file size.
+ * @mapping: Target address_space.
+ * @index: The page index.
+ * @size: The suggested size of the folio to create.
+ *
+ * This returns a high order for folios (when supported) based on the file size
+ * which the mapping currently allows at the given index. The index is relevant
+ * due to alignment considerations the mapping might have. The returned order
+ * may be less than the size passed.
+ *
+ * Like __filemap_get_folio order calculation.
+ *
+ * Return: The order.
+ */
+static inline unsigned int
+shmem_mapping_size_order(struct address_space *mapping, pgoff_t index,
+ size_t size, struct shmem_sb_info *sbinfo)
+{
+ unsigned int order = ilog2(size);
+
+ if ((order <= PAGE_SHIFT) ||
+ (!mapping_large_folio_support(mapping) || !sbinfo->noswap))
+ return 0;
+
+ order -= PAGE_SHIFT;
+
+ /* If we're not aligned, allocate a smaller folio */
+ if (index & ((1UL << order) - 1))
+ order = __ffs(index);
+
+ order = min_t(size_t, order, MAX_PAGECACHE_ORDER);
+
+ /* Order-1 not supported due to THP dependency */
+ return (order == 1) ? 0 : order;
+}
+
static struct folio *shmem_alloc_and_add_folio(gfp_t gfp,
struct inode *inode, pgoff_t index,
struct mm_struct *fault_mm, bool huge, size_t len)
{
struct address_space *mapping = inode->i_mapping;
struct shmem_inode_info *info = SHMEM_I(inode);
- unsigned int order = 0;
+ unsigned int order = shmem_mapping_size_order(mapping, index, len,
+ SHMEM_SB(inode->i_sb));
struct folio *folio;
long pages;
int error;
+neworder:
if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
huge = false;
@@ -1937,6 +1977,11 @@ static struct folio *shmem_alloc_and_add_folio(gfp_t gfp,
unlock:
folio_unlock(folio);
folio_put(folio);
+ if ((error != -ENOSPC) && (order > 0)) {
+ if (--order == 1)
+ order = 0;
+ goto neworder;
+ }
return ERR_PTR(error);
}
--
2.43.0
Hi Daniel,
kernel test robot noticed the following build warnings:
[auto build test WARNING on akpm-mm/mm-everything]
[also build test WARNING on xfs-linux/for-next brauner-vfs/vfs.all linus/master v6.9 next-20240515]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Daniel-Gomez/splice-don-t-check-for-uptodate-if-partially-uptodate-is-impl/20240515-135925
base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link: https://lore.kernel.org/r/20240515055719.32577-13-da.gomez%40samsung.com
patch subject: [PATCH 12/12] shmem: add large folio support to the write and fallocate paths
config: openrisc-defconfig (https://download.01.org/0day-ci/archive/20240516/[email protected]/config)
compiler: or1k-linux-gcc (GCC) 13.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240516/[email protected]/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <[email protected]>
| Closes: https://lore.kernel.org/oe-kbuild-all/[email protected]/
All warnings (new ones prefixed by >>):
>> mm/shmem.c:1864: warning: Function parameter or struct member 'sbinfo' not described in 'shmem_mapping_size_order'
mm/shmem.c:2427: warning: Function parameter or struct member 'len' not described in 'shmem_get_folio'
vim +1864 mm/shmem.c
1845
1846 /**
1847 * shmem_mapping_size_order - Get maximum folio order for the given file size.
1848 * @mapping: Target address_space.
1849 * @index: The page index.
1850 * @size: The suggested size of the folio to create.
1851 *
1852 * This returns a high order for folios (when supported) based on the file size
1853 * which the mapping currently allows at the given index. The index is relevant
1854 * due to alignment considerations the mapping might have. The returned order
1855 * may be less than the size passed.
1856 *
1857 * Like __filemap_get_folio order calculation.
1858 *
1859 * Return: The order.
1860 */
1861 static inline unsigned int
1862 shmem_mapping_size_order(struct address_space *mapping, pgoff_t index,
1863 size_t size, struct shmem_sb_info *sbinfo)
> 1864 {
1865 unsigned int order = ilog2(size);
1866
1867 if ((order <= PAGE_SHIFT) ||
1868 (!mapping_large_folio_support(mapping) || !sbinfo->noswap))
1869 return 0;
1870
1871 order -= PAGE_SHIFT;
1872
1873 /* If we're not aligned, allocate a smaller folio */
1874 if (index & ((1UL << order) - 1))
1875 order = __ffs(index);
1876
1877 order = min_t(size_t, order, MAX_PAGECACHE_ORDER);
1878
1879 /* Order-1 not supported due to THP dependency */
1880 return (order == 1) ? 0 : order;
1881 }
1882
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki