Readahead policy for reading backward.
Signed-off-by: Wu Fengguang <[email protected]>
---
mm/readahead.c | 40 ++++++++++++++++++++++++++++++++++++++++
1 files changed, 40 insertions(+)
--- linux-2.6.17-rc4-mm3.orig/mm/readahead.c
+++ linux-2.6.17-rc4-mm3/mm/readahead.c
@@ -1574,6 +1574,46 @@ initial_readahead(struct address_space *
}
/*
+ * Backward prefetching.
+ *
+ * No look-ahead and thrashing safety guard: should be unnecessary.
+ */
+static int
+try_read_backward(struct file_ra_state *ra, pgoff_t begin_index,
+ unsigned long ra_size, unsigned long ra_max)
+{
+ pgoff_t end_index;
+
+ /* Are we reading backward? */
+ if (begin_index > ra->prev_page)
+ return 0;
+
+ if ((ra->flags & RA_CLASS_MASK) == RA_CLASS_BACKWARD &&
+ ra_has_index(ra, ra->prev_page)) {
+ ra_size += 2 * ra_cache_hit(ra, 0);
+ end_index = ra->la_index;
+ } else {
+ ra_size += ra_size + ra_size * (readahead_hit_rate - 1) / 2;
+ end_index = ra->prev_page;
+ }
+
+ if (ra_size > ra_max)
+ ra_size = ra_max;
+
+ /* Read traces close enough to be covered by the prefetching? */
+ if (end_index > begin_index + ra_size)
+ return 0;
+
+ begin_index = end_index - ra_size;
+
+ ra_set_class(ra, RA_CLASS_BACKWARD);
+ ra_set_index(ra, begin_index, begin_index);
+ ra_set_size(ra, ra_size, 0);
+
+ return 1;
+}
+
+/*
* ra_min is mainly determined by the size of cache memory. Reasonable?
*
* Table of concrete numbers for 4KB page size:
--