From: Luis Chamberlain <[email protected]>
Align indices to mapping_min_order in invalidate_inode_pages2_range(),
mapping_try_invalidate() and truncate_inode_pages_range(). This is
necessary to keep the folios added to the page cache aligned with
mapping_min_order.
Signed-off-by: Luis Chamberlain <[email protected]>
Signed-off-by: Pankaj Raghav <[email protected]>
---
mm/truncate.c | 34 ++++++++++++++++++++++++----------
1 file changed, 24 insertions(+), 10 deletions(-)
diff --git a/mm/truncate.c b/mm/truncate.c
index 8e3aa9e8618e..d5ce8e30df70 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -337,6 +337,8 @@ void truncate_inode_pages_range(struct address_space *mapping,
int i;
struct folio *folio;
bool same_folio;
+ unsigned int order = mapping_min_folio_order(mapping);
+ unsigned int nrpages = 1U << order;
if (mapping_empty(mapping))
return;
@@ -347,7 +349,9 @@ void truncate_inode_pages_range(struct address_space *mapping,
* start of the range and 'partial_end' at the end of the range.
* Note that 'end' is exclusive while 'lend' is inclusive.
*/
- start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ start = (lstart + (nrpages * PAGE_SIZE) - 1) >> PAGE_SHIFT;
+ start = round_down(start, nrpages);
+
if (lend == -1)
/*
* lend == -1 indicates end-of-file so we have to set 'end'
@@ -356,7 +360,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
*/
end = -1;
else
- end = (lend + 1) >> PAGE_SHIFT;
+ end = round_down((lend + 1) >> PAGE_SHIFT, nrpages);
folio_batch_init(&fbatch);
index = start;
@@ -372,8 +376,9 @@ void truncate_inode_pages_range(struct address_space *mapping,
cond_resched();
}
- same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
- folio = __filemap_get_folio(mapping, lstart >> PAGE_SHIFT, FGP_LOCK, 0);
+ same_folio = round_down(lstart >> PAGE_SHIFT, nrpages) ==
+ round_down(lend >> PAGE_SHIFT, nrpages);
+ folio = __filemap_get_folio(mapping, start, FGP_LOCK, 0);
if (!IS_ERR(folio)) {
same_folio = lend < folio_pos(folio) + folio_size(folio);
if (!truncate_inode_partial_folio(folio, lstart, lend)) {
@@ -387,7 +392,8 @@ void truncate_inode_pages_range(struct address_space *mapping,
}
if (!same_folio) {
- folio = __filemap_get_folio(mapping, lend >> PAGE_SHIFT,
+ folio = __filemap_get_folio(mapping,
+ round_down(lend >> PAGE_SHIFT, nrpages),
FGP_LOCK, 0);
if (!IS_ERR(folio)) {
if (!truncate_inode_partial_folio(folio, lstart, lend))
@@ -497,15 +503,18 @@ EXPORT_SYMBOL(truncate_inode_pages_final);
unsigned long mapping_try_invalidate(struct address_space *mapping,
pgoff_t start, pgoff_t end, unsigned long *nr_failed)
{
+ unsigned int min_order = mapping_min_folio_order(mapping);
+ unsigned int nrpages = 1UL << min_order;
pgoff_t indices[PAGEVEC_SIZE];
struct folio_batch fbatch;
- pgoff_t index = start;
+ pgoff_t index = round_up(start, nrpages);
+ pgoff_t end_idx = round_down(end, nrpages);
unsigned long ret;
unsigned long count = 0;
int i;
folio_batch_init(&fbatch);
- while (find_lock_entries(mapping, &index, end, &fbatch, indices)) {
+ while (find_lock_entries(mapping, &index, end_idx, &fbatch, indices)) {
for (i = 0; i < folio_batch_count(&fbatch); i++) {
struct folio *folio = fbatch.folios[i];
@@ -618,9 +627,11 @@ static int folio_launder(struct address_space *mapping, struct folio *folio)
int invalidate_inode_pages2_range(struct address_space *mapping,
pgoff_t start, pgoff_t end)
{
+ unsigned int min_order = mapping_min_folio_order(mapping);
+ unsigned int nrpages = 1UL << min_order;
pgoff_t indices[PAGEVEC_SIZE];
struct folio_batch fbatch;
- pgoff_t index;
+ pgoff_t index, end_idx;
int i;
int ret = 0;
int ret2 = 0;
@@ -630,8 +641,9 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
return 0;
folio_batch_init(&fbatch);
- index = start;
- while (find_get_entries(mapping, &index, end, &fbatch, indices)) {
+ index = round_up(start, nrpages);
+ end_idx = round_down(end, nrpages);
+ while (find_get_entries(mapping, &index, end_idx, &fbatch, indices)) {
for (i = 0; i < folio_batch_count(&fbatch); i++) {
struct folio *folio = fbatch.folios[i];
@@ -660,6 +672,8 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
continue;
}
VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio);
+ VM_BUG_ON_FOLIO(folio_order(folio) < min_order, folio);
+ VM_BUG_ON_FOLIO(folio->index & (nrpages - 1), folio);
folio_wait_writeback(folio);
if (folio_mapped(folio))
--
2.40.1