- Enlarge VM_MAX_READAHEAD to 1024 if new read-ahead code is compiled in.
This value is no longer tightly coupled with the thrashing problem,
therefore constrained by it. The adaptive read-ahead logic merely takes
it as an upper bound, and will not stick to it under memory pressure.
- Slightly enlarge minimal/initial read-ahead size on big memory systems.
Memory bounty systems are less likely to suffer from thrashing on small
read-ahead sizes. A bigger initial value helps the ra_size scaling up
progress.
Signed-off-by: Wu Fengguang <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
--- linux-2.6.19-rc5-mm2.orig/include/linux/mm.h
+++ linux-2.6.19-rc5-mm2/include/linux/mm.h
@@ -1046,7 +1046,11 @@ extern int filemap_populate(struct vm_ar
int write_one_page(struct page *page, int wait);
/* readahead.c */
+#ifdef CONFIG_ADAPTIVE_READAHEAD
+#define VM_MAX_READAHEAD 1024 /* kbytes */
+#else
#define VM_MAX_READAHEAD 128 /* kbytes */
+#endif
#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */
#define VM_MAX_CACHE_HIT 256 /* max pages in a row in cache before
* turning readahead off */
--- linux-2.6.19-rc5-mm2.orig/mm/readahead.c
+++ linux-2.6.19-rc5-mm2/mm/readahead.c
@@ -814,6 +814,30 @@ out:
return nr_pages;
}
+/*
+ * ra_min is mainly determined by the size of cache memory. Reasonable?
+ *
+ * Table of concrete numbers for 4KB page size:
+ * inactive + free (MB): 4 8 16 32 64 128 256 512 1024
+ * ra_min (KB): 16 16 16 16 20 24 32 48 64
+ */
+static inline void get_readahead_bounds(struct file_ra_state *ra,
+ unsigned long *ra_min,
+ unsigned long *ra_max)
+{
+ unsigned long active;
+ unsigned long inactive;
+ unsigned long free;
+
+ __get_zone_counts(&active, &inactive, &free, NODE_DATA(numa_node_id()));
+
+ free += inactive;
+ *ra_max = min(min(ra->ra_pages, 0xFFFFUL), free / 2);
+ *ra_min = min(min(MIN_RA_PAGES + (free >> 14),
+ DIV_ROUND_UP(64*1024, PAGE_CACHE_SIZE)),
+ *ra_max / 8);
+}
+
#endif /* CONFIG_ADAPTIVE_READAHEAD */
/*
--