Since the VM_MAX_READAHEAD is greatly enlarged and the algorithm more
complex, it becomes necessary to insert some cond_resched() calls in
the read-ahead path.
If desktop users still feel audio jitters with the new read-ahead code,
please try one of the following ways to get rid of it:
1) compile kernel with CONFIG_PREEMPT_VOLUNTARY/CONFIG_PREEMPT
2) reduce the read-ahead request size by running
blockdev --setra 256 /dev/hda # or whatever device you are using
Signed-off-by: Wu Fengguang <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
--- linux-2.6.19-rc5-mm2.orig/fs/mpage.c
+++ linux-2.6.19-rc5-mm2/fs/mpage.c
@@ -407,8 +407,10 @@ mpage_readpages(struct address_space *ma
&last_block_in_bio, &map_bh,
&first_logical_block,
get_block);
- if (!pagevec_add(&lru_pvec, page))
+ if (!pagevec_add(&lru_pvec, page)) {
+ cond_resched();
__pagevec_lru_add(&lru_pvec);
+ }
} else {
page_cache_release(page);
}
--- linux-2.6.19-rc5-mm2.orig/mm/readahead.c
+++ linux-2.6.19-rc5-mm2/mm/readahead.c
@@ -182,8 +182,10 @@ int read_cache_pages(struct address_spac
continue;
}
ret = filler(data, page);
- if (!pagevec_add(&lru_pvec, page))
+ if (!pagevec_add(&lru_pvec, page)) {
+ cond_resched();
__pagevec_lru_add(&lru_pvec);
+ }
if (ret) {
read_cache_pages_invalidate_pages(mapping, pages);
break;
@@ -216,8 +218,10 @@ static int read_pages(struct address_spa
if (!add_to_page_cache(page, mapping,
page->index, GFP_KERNEL)) {
mapping->a_ops->readpage(filp, page);
- if (!pagevec_add(&lru_pvec, page))
+ if (!pagevec_add(&lru_pvec, page)) {
+ cond_resched();
__pagevec_lru_add(&lru_pvec);
+ }
} else
page_cache_release(page);
}
@@ -330,6 +334,7 @@ __do_page_cache_readahead(struct address
read_unlock_irq(&mapping->tree_lock);
page = page_cache_alloc_cold(mapping);
+ cond_resched();
read_lock_irq(&mapping->tree_lock);
if (!page)
break;
--