Readahead policy after thrashing.
It tries to recover gracefully from the thrashing.
Signed-off-by: Wu Fengguang <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
--- linux-2.6.19-rc5-mm2.orig/mm/readahead.c
+++ linux-2.6.19-rc5-mm2/mm/readahead.c
@@ -1522,6 +1522,50 @@ try_backward_prefetching(struct file_ra_
}
/*
+ * Readahead thrashing recovery.
+ */
+static unsigned long
+thrashing_recovery_readahead(struct address_space *mapping,
+ struct file *filp, struct file_ra_state *ra,
+ pgoff_t offset, unsigned long ra_max)
+{
+ unsigned long ra_size;
+
+#ifdef CONFIG_DEBUG_READAHEAD
+ if (probe_page(mapping, offset - 1))
+ ra_account(ra, RA_EVENT_READAHEAD_MUTILATE,
+ ra->readahead_index - offset);
+ ra_account(ra, RA_EVENT_READAHEAD_THRASHING,
+ ra->readahead_index - offset);
+#endif
+
+ /*
+ * Some thrashing occur in (ra_index, la_index], in which case the
+ * old read-ahead chunk is lost soon after the new one is allocated.
+ * Ensure that we recover all needed pages in the old chunk.
+ */
+ if (offset < ra->ra_index)
+ ra_size = ra->ra_index - offset;
+ else {
+ /* After thrashing, we know the exact thrashing-threshold. */
+ ra_size = offset - ra->ra_index;
+ update_ra_thrash_bytes(mapping->backing_dev_info, ra_size);
+
+ /* And we'd better be a bit conservative. */
+ ra_size = ra_size * 3 / 4;
+ }
+
+ if (ra_size > ra_max)
+ ra_size = ra_max;
+
+ ra_set_class(ra, RA_CLASS_THRASHING);
+ ra_set_index(ra, offset, offset);
+ ra_set_size(ra, ra_size, ra_size / LOOKAHEAD_RATIO);
+
+ return ra_submit(ra, mapping, filp);
+}
+
+/*
* ra_min is mainly determined by the size of cache memory. Reasonable?
*
* Table of concrete numbers for 4KB page size:
--