Hi.
I found the situation where OOM-Killer happens easily.
I will inform you of it.
I tried to fix this problem to make OOM-Killer not happen easily as much as
possible.
As a result, I made a reference patch to fix it.
Any comments are welcome.
(The comments for making much simpler or epoch-making approach are
very welcome.)
------------------------------------------------------------------------------
If the following is satisfied, OOM-Killer happens easily.
(1) A quarter of a summation of each total log size of all filesystems which
use jbd exceeds the memory size of Normal Zone.
(2) We commit a huge number of data which include many metadata to each
filesystem and then we stop committing data to them.
For example, a process creates many files whose size are huge and
which have a huge number of indirect blocks. Then all processes stop I/O
to all filesystems which use jbd.
(3) After (2), we request to get a big size memory.
(NOTE: A oom-killer can happen easily on a system whose architecture is x86.
Because a x86 system can have only a small Normal Zone of less than 1GB.)
The reason is that jbd does not positively release journal heads(jh-s)
even if there are many jh-s which can be released.
Releasing jh-s is only executed at the following timing:
- if free log space becomes a quarter of the total log size
(log_do_checkpoint())
- if a transaction begins to commit (journal_cleanup_checkpoint_list()
which is called by journal_commit_transaction())
(NOTE: A jh-s which corresponds to buffer heads (bh-s) which is a direct block
can be released at journal_try_to_free_buffers() which is called
by try_to_release_page())
Therefore, if we let filesystems do above (2), jh-s remains because
new transaction isn't generated.
However, when the system memory is exhausted, try_to_release_page() can be
called, but it cannot release bh-s which are metadata (indirect blocks
and so on).
Because the mapping to the page is owned by a block device not a filesystem
(ext3).
If the mapping is owned by a block device, try_to_release_page() calls
try_to_free_buffers(). It can release generic bh, but cannot release the bh
which is referring by the jh. Because the reference counter of the bh is
larger than 0.
Therefore it is necessary to release the jh before the bh is released.
To achieve it, I added a new member function into buffer head structure.
The function releases the bh which correspond to a page whose mapping
is block device. And the release target of the bh has private data
(journal head).
The function resembles journal_try_to_free_buffers().
Then I changed try_to_release_page(), which calls try_to_free_buffers()
after the new function.
As a result, I think it becomes difficult for oom-killer to happen
than before because try_to_free_buffers() via try_to_release_page()
which is called when the system memory is exhausted can release bh-s.
Signed-off-by: Toshiyuki Okajima <[email protected]>
---
fs/buffer.c | 23 ++++++++++++++++++++++-
fs/jbd/journal.c | 7 +++++++
fs/jbd/transaction.c | 39 +++++++++++++++++++++++++++++++++++++++
include/linux/buffer_head.h | 7 +++++++
include/linux/jbd.h | 1 +
5 files changed, 76 insertions(+), 1 deletion(-)
diff -Nurp linux-2.6.27-rc8.org/fs/buffer.c linux-2.6.27-rc8/fs/buffer.c
--- linux-2.6.27-rc8.org/fs/buffer.c 2008-09-30 07:24:02.000000000 +0900
+++ linux-2.6.27-rc8/fs/buffer.c 2008-10-17 08:59:37.000000000 +0900
@@ -3111,7 +3111,7 @@ failed:
return 0;
}
-int try_to_free_buffers(struct page *page)
+static int __try_to_free_buffers(struct page *page)
{
struct address_space * const mapping = page->mapping;
struct buffer_head *buffers_to_free = NULL;
@@ -3158,6 +3158,26 @@ out:
}
return ret;
}
+
+int try_to_free_buffers(struct page *page)
+{
+ struct address_space * const mapping = page->mapping;
+ struct buffer_head *head, *bh;
+ const struct buffer_head_operations *bops;
+
+ if (mapping == NULL)
+ return __try_to_free_buffers(page);
+ bh = head = page_buffers(page);
+ do {
+ if ((bops = bh->b_ops) != NULL
+ && !(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)))
+ && atomic_read(&bh->b_count) > 0)
+ if (bops->release_buffer != NULL)
+ bops->release_buffer(bh);
+ bh = bh->b_this_page;
+ } while (bh != head);
+ return __try_to_free_buffers(page);
+}
EXPORT_SYMBOL(try_to_free_buffers);
void block_sync_page(struct page *page)
@@ -3234,6 +3254,7 @@ struct buffer_head *alloc_buffer_head(gf
{
struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
if (ret) {
+ ret->b_ops = NULL;
INIT_LIST_HEAD(&ret->b_assoc_buffers);
get_cpu_var(bh_accounting).nr++;
recalc_bh_state();
diff -Nurp linux-2.6.27-rc8.org/fs/jbd/journal.c linux-2.6.27-rc8/fs/jbd/journal.c
--- linux-2.6.27-rc8.org/fs/jbd/journal.c 2008-09-30 07:24:02.000000000 +0900
+++ linux-2.6.27-rc8/fs/jbd/journal.c 2008-10-17 08:59:37.000000000 +0900
@@ -79,11 +79,16 @@ EXPORT_SYMBOL(journal_wipe);
EXPORT_SYMBOL(journal_blocks_per_page);
EXPORT_SYMBOL(journal_invalidatepage);
EXPORT_SYMBOL(journal_try_to_free_buffers);
+EXPORT_SYMBOL(journal_try_to_free_one_buffer);
EXPORT_SYMBOL(journal_force_commit);
static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
static void __journal_abort_soft (journal_t *journal, int errno);
+static const struct buffer_head_operations jbd_bh_ops = {
+ .release_buffer = journal_try_to_free_one_buffer
+};
+
/*
* Helper function used to manage commit timeouts
*/
@@ -1749,6 +1754,7 @@ repeat:
set_buffer_jbd(bh);
bh->b_private = jh;
jh->b_bh = bh;
+ bh->b_ops = &jbd_bh_ops;
get_bh(bh);
BUFFER_TRACE(bh, "added journal_head");
}
@@ -1803,6 +1809,7 @@ static void __journal_remove_journal_hea
__func__);
jbd_free(jh->b_committed_data, bh->b_size);
}
+ bh->b_ops = NULL;
bh->b_private = NULL;
jh->b_bh = NULL; /* debug, really */
clear_buffer_jbd(bh);
diff -Nurp linux-2.6.27-rc8.org/fs/jbd/transaction.c linux-2.6.27-rc8/fs/jbd/transaction.c
--- linux-2.6.27-rc8.org/fs/jbd/transaction.c 2008-09-30 07:24:02.000000000 +0900
+++ linux-2.6.27-rc8/fs/jbd/transaction.c 2008-10-17 09:11:49.000000000 +0900
@@ -1648,6 +1648,45 @@ out:
return;
}
+void journal_try_to_free_one_buffer(struct buffer_head *bh)
+{
+ journal_t *journal;
+ struct journal_head *jh;
+ const struct buffer_head_operations *bops;
+
+ jh = journal_grab_journal_head(bh);
+ if (!jh)
+ return;
+ if (!jbd_trylock_bh_state(bh)) {
+ journal_put_journal_head(jh);
+ return;
+ }
+ if ((bops = bh->b_ops) == NULL || bops->release_buffer == NULL)
+ goto skip;
+ if (buffer_locked(bh) || buffer_dirty(bh))
+ goto skip;
+ if (jh->b_next_transaction != NULL)
+ goto skip;
+ journal = (jh->b_cp_transaction != NULL ? jh->b_cp_transaction->t_journal: NULL);
+ if (journal != NULL) {
+ if (spin_trylock(&journal->j_list_lock)) {
+ if (jh->b_cp_transaction != NULL && jh->b_transaction == NULL) {
+ /* written-back checkpointed metadata buffer */
+ if (jh->b_jlist == BJ_None) {
+ JBUFFER_TRACE(jh, "remove from checkpoint list");
+ __journal_remove_checkpoint(jh);
+ journal_remove_journal_head(bh);
+ __brelse(bh);
+ }
+ }
+ spin_unlock(&journal->j_list_lock);
+ }
+ }
+skip:
+ journal_put_journal_head(jh);
+ jbd_unlock_bh_state(bh);
+}
+
/*
* journal_try_to_free_buffers() could race with journal_commit_transaction()
* The latter might still hold the a count on buffers when inspecting
diff -Nurp linux-2.6.27-rc8.org/include/linux/buffer_head.h linux-2.6.27-rc8/include/linux/buffer_head.h
--- linux-2.6.27-rc8.org/include/linux/buffer_head.h 2008-09-30 07:24:02.000000000 +0900
+++ linux-2.6.27-rc8/include/linux/buffer_head.h 2008-10-17 08:59:37.000000000 +0900
@@ -43,6 +43,10 @@ enum bh_state_bits {
#define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512)
+struct buffer_head_operations {
+ void (*release_buffer)(struct buffer_head*);
+};
+
struct page;
struct buffer_head;
struct address_space;
@@ -73,6 +77,9 @@ struct buffer_head {
struct address_space *b_assoc_map; /* mapping this buffer is
associated with */
atomic_t b_count; /* users using this buffer_head */
+ const struct buffer_head_operations *b_ops;
+ /* for a private buffer head
+ to do special work */
};
/*
diff -Nurp linux-2.6.27-rc8.org/include/linux/jbd.h linux-2.6.27-rc8/include/linux/jbd.h
--- linux-2.6.27-rc8.org/include/linux/jbd.h 2008-09-30 07:24:02.000000000 +0900
+++ linux-2.6.27-rc8/include/linux/jbd.h 2008-10-17 08:59:37.000000000 +0900
@@ -890,6 +890,7 @@ extern void journal_sync_buffer (struct
extern void journal_invalidatepage(journal_t *,
struct page *, unsigned long);
extern int journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
+extern void journal_try_to_free_one_buffer(struct buffer_head *);
extern int journal_stop(handle_t *);
extern int journal_flush (journal_t *);
extern void journal_lock_updates (journal_t *);
On Fri, 17 Oct 2008 22:37:16 +0900 (JST)
Toshiyuki Okajima <[email protected]> wrote:
> Hi.
>
> I found the situation where OOM-Killer happens easily.
> I will inform you of it.
> I tried to fix this problem to make OOM-Killer not happen easily as much as
> possible.
> As a result, I made a reference patch to fix it.
>
> Any comments are welcome.
> (The comments for making much simpler or epoch-making approach are
> very welcome.)
>
> ------------------------------------------------------------------------------
>
> If the following is satisfied, OOM-Killer happens easily.
> (1) A quarter of a summation of each total log size of all filesystems which
> use jbd exceeds the memory size of Normal Zone.
> (2) We commit a huge number of data which include many metadata to each
> filesystem and then we stop committing data to them.
> For example, a process creates many files whose size are huge and
> which have a huge number of indirect blocks. Then all processes stop I/O
> to all filesystems which use jbd.
> (3) After (2), we request to get a big size memory.
> (NOTE: A oom-killer can happen easily on a system whose architecture is x86.
> Because a x86 system can have only a small Normal Zone of less than 1GB.)
>
> The reason is that jbd does not positively release journal heads(jh-s)
> even if there are many jh-s which can be released.
>
> Releasing jh-s is only executed at the following timing:
> - if free log space becomes a quarter of the total log size
> (log_do_checkpoint())
> - if a transaction begins to commit (journal_cleanup_checkpoint_list()
> which is called by journal_commit_transaction())
> (NOTE: A jh-s which corresponds to buffer heads (bh-s) which is a direct block
> can be released at journal_try_to_free_buffers() which is called
> by try_to_release_page())
>
> Therefore, if we let filesystems do above (2), jh-s remains because
> new transaction isn't generated.
> However, when the system memory is exhausted, try_to_release_page() can be
> called, but it cannot release bh-s which are metadata (indirect blocks
> and so on).
> Because the mapping to the page is owned by a block device not a filesystem
> (ext3).
>
> If the mapping is owned by a block device, try_to_release_page() calls
> try_to_free_buffers(). It can release generic bh, but cannot release the bh
> which is referring by the jh. Because the reference counter of the bh is
> larger than 0.
> Therefore it is necessary to release the jh before the bh is released.
>
> To achieve it, I added a new member function into buffer head structure.
> The function releases the bh which correspond to a page whose mapping
> is block device. And the release target of the bh has private data
> (journal head).
> The function resembles journal_try_to_free_buffers().
> Then I changed try_to_release_page(), which calls try_to_free_buffers()
> after the new function.
>
> As a result, I think it becomes difficult for oom-killer to happen
> than before because try_to_free_buffers() via try_to_release_page()
> which is called when the system memory is exhausted can release bh-s.
>
OK.
> ---
> fs/buffer.c | 23 ++++++++++++++++++++++-
> fs/jbd/journal.c | 7 +++++++
> fs/jbd/transaction.c | 39 +++++++++++++++++++++++++++++++++++++++
> include/linux/buffer_head.h | 7 +++++++
> include/linux/jbd.h | 1 +
> 5 files changed, 76 insertions(+), 1 deletion(-)
The patch is fairly complex, and increasing the buffer_head size can be
rather costly. An alternative might be to implement a shrinker
callback function for the journal_head slab cache. Did you consider
this?
Hi Andrew.
Thank you for your comment.
Andrew Morton wrote:
> On Fri, 17 Oct 2008 22:37:16 +0900 (JST)
> Toshiyuki Okajima <[email protected]> wrote:
>
>> Hi.
>>
>> I found the situation where OOM-Killer happens easily.
>> I will inform you of it.
>> I tried to fix this problem to make OOM-Killer not happen easily as much as
>> possible.
<SNIP>
> OK.
>
>> ---
>> fs/buffer.c | 23 ++++++++++++++++++++++-
>> fs/jbd/journal.c | 7 +++++++
>> fs/jbd/transaction.c | 39 +++++++++++++++++++++++++++++++++++++++
>> include/linux/buffer_head.h | 7 +++++++
>> include/linux/jbd.h | 1 +
>> 5 files changed, 76 insertions(+), 1 deletion(-)
>
> The patch is fairly complex, and increasing the buffer_head size can be
Yes.
Applying this fix causes the buffer_head size to increase.
The increase of the buffer_head size changes into 60 bytes from 56 bytes
on x86 system.
As a result, the maximum number of buffer heads of one slab changes
into 63 from 64.
(The increase of the size is less than 2%.)
Therefore I think this change influences system performance hardly.
And I rather want to add a new member because I think it is useful for
not only this fix but also the future.
> rather costly. An alternative might be to implement a shrinker
> callback function for the journal_head slab cache. Did you consider
> this?
Yes.
But the unused-list and counters are required by managing the shrink targets
("journal head") if we implement a shrinker.
I thought that comparatively big code changes were necessary for jbd
to accomplish it.
However I will try it.
Best Regards,
Toshiyuki Okajima
Hi Andrew.
> > rather costly. An alternative might be to implement a shrinker
> > callback function for the journal_head slab cache. Did you consider
> > this?
> Yes.
> But the unused-list and counters are required by managing the shrink targets("journal head")
> if we implement a shrinker.
> I thought that comparatively big code changes were necessary for jbd to accomplish it.
> However I will try it.
I managed to build a shrinker callback function for the journal_head slab cache.
This code size is less than before but the logic of it seems to be more complex
than before.
However, I haven't got any troubles while I am testing some easy load operations
on the fixed kernel.
But I think a system may hang up if concurrently several journal_head shrinker
are executed.
So, I will retry to build more appropriate fix.
Please give me comments if you have a nicer idea.
------------------------------------------------------------------------------
The direct data blocks can be released by the member function, releasepage()
of their mapping.
(They have the mapping of their filesystem i-node.)
On the other hand, the indirect data blocks (ext3) are attempted to be released
by try_to_free_buffers().
Because its mapping is a block device, and a block device doesn't have
own a member function to release a page.
But try_to_free_buffers() is a generic function which releases a buffer_head,
and no buffer_head can be released if a buffer_head has private data
(like journal_head) because the buffer_head reference counter is bigger than 0.
Therefore, a buffer_head cannot be released by try_to_free_buffers() even if
its private data can be released.
As a result, oom-killer may happen when a system memory is exhausted even if
a lot of private data can be released.
To solve this situation, a shrinker of journal_heads is required.
A shrinker was made by referring to logics such as shrink_icache_memory.
In order to shrink journal_heads, it is necessary to manage a list of
journal_heads which are required to be checkpointed all over the filesystems
with jbd.
Timing from which the newly additional list is operated:
- when a journal_head is registered into a checkpoint list. It is also
registered into an overall checkpoint list (newly additional list).
- when a journal_head is removed from a checkpoint list. It is also removed
from an overall checkpoint list (newly additional list).
- while a shrinker is working.
A shrinker scans only a necessary number of journal_heads which are connected
from a new list, and releases ones if possible.
Therefore it becomes difficult for oom-killer to happen than before.
Signed-off-by: Toshiyuki Okajima <[email protected]>
---
fs/jbd/checkpoint.c | 77 +++++++++++++++++++++++++++++++++++++++++++
fs/jbd/journal.c | 2 +
include/linux/journal-head.h | 7 +++
3 files changed, 86 insertions(+)
diff -Nurp linux-2.6.27.1.org/fs/jbd/checkpoint.c linux-2.6.27.1/fs/jbd/checkpoint.c
--- linux-2.6.27.1.org/fs/jbd/checkpoint.c 2008-10-16 08:02:53.000000000 +0900
+++ linux-2.6.27.1/fs/jbd/checkpoint.c 2008-10-23 15:07:14.000000000 +0900
@@ -24,6 +24,14 @@
#include <linux/slab.h>
/*
+ * Used for shrinking journal_heads whose I/O are completed
+ */
+static DEFINE_SPINLOCK(jbd_global_lock);
+static LIST_HEAD(jbd_checkpoint_list);
+static int jbd_jh_cache_pressure = 10;
+static int jbd_nr_checkpoint_jh = 0;
+
+/*
* Unlink a buffer from a transaction checkpoint list.
*
* Called with j_list_lock held.
@@ -595,6 +603,10 @@ int __journal_remove_checkpoint(struct j
__buffer_unlink(jh);
jh->b_cp_transaction = NULL;
+ spin_lock(&jbd_global_lock);
+ list_del_init(&jh->b_checkpoint_list);
+ jbd_nr_checkpoint_jh--;
+ spin_unlock(&jbd_global_lock);
if (transaction->t_checkpoint_list != NULL ||
transaction->t_checkpoint_io_list != NULL)
@@ -655,8 +667,73 @@ void __journal_insert_checkpoint(struct
jh->b_cpnext->b_cpprev = jh;
}
transaction->t_checkpoint_list = jh;
+ spin_lock(&jbd_global_lock);
+ list_add(&jh->b_checkpoint_list, &jbd_checkpoint_list);
+ jbd_nr_checkpoint_jh++;
+ spin_unlock(&jbd_global_lock);
+}
+
+static void try_to_free_cp_buf(journal_t *journal, transaction_t *transaction, struct journal_head *jh)
+{
+ transaction_t *transaction2;
+
+ spin_lock(&journal->j_list_lock);
+ if (!list_empty(&jh->b_checkpoint_list)) {
+ transaction2 = jh->b_cp_transaction;
+ BUG_ON(transaction2 == NULL);
+ if (transaction == transaction2) {
+ jbd_lock_bh_state(jh2bh(jh));
+ __try_to_free_cp_buf(jh);
+ }
+ }
+ spin_unlock(&journal->j_list_lock);
}
+static void prune_jbd_jhcache(int nr)
+{
+ struct journal_head *jh;
+ struct list_head *tmp;
+ journal_t *journal;
+ transaction_t *transaction;
+
+ BUG_ON(nr < 0);
+ for (; nr; nr--) {
+ spin_lock(&jbd_global_lock);
+ if ((tmp = jbd_checkpoint_list.prev) == &jbd_checkpoint_list) {
+ spin_unlock(&jbd_global_lock);
+ break;
+ }
+ list_move(tmp, &jbd_checkpoint_list);
+ jh = list_entry(tmp, struct journal_head, b_checkpoint_list);
+ /* Protect a jh from being removed while operating */
+ journal_grab_journal_head(jh2bh(jh));
+ transaction = jh->b_cp_transaction;
+ BUG_ON(transaction == NULL);
+ journal = transaction->t_journal;
+ spin_unlock(&jbd_global_lock);
+ /* Releasing a jh from checkpoint list if possible */
+ try_to_free_cp_buf(journal, transaction, jh);
+ /* For previous count up (actually releasing a jh here) */
+ journal_put_journal_head(jh);
+ cond_resched();
+ }
+}
+
+static int shrink_jbd_jhcache_memory(int nr, gfp_t gfp_mask)
+{
+ if (nr) {
+ if (!(gfp_mask & __GFP_FS))
+ return -1;
+ prune_jbd_jhcache(nr);
+ }
+ return (jbd_nr_checkpoint_jh*100)/jbd_jh_cache_pressure;
+}
+
+struct shrinker jbd_jh_shrinker = {
+ .shrink = shrink_jbd_jhcache_memory,
+ .seeks = DEFAULT_SEEKS,
+};
+
/*
* We've finished with this transaction structure: adios...
*
diff -Nurp linux-2.6.27.1.org/fs/jbd/journal.c linux-2.6.27.1/fs/jbd/journal.c
--- linux-2.6.27.1.org/fs/jbd/journal.c 2008-10-16 08:02:53.000000000 +0900
+++ linux-2.6.27.1/fs/jbd/journal.c 2008-10-23 15:00:44.000000000 +0900
@@ -1890,6 +1890,7 @@ static inline void jbd_remove_debugfs_en
#endif
+extern struct shrinker jbd_jh_shrinker;
struct kmem_cache *jbd_handle_cache;
static int __init journal_init_handle_cache(void)
@@ -1903,6 +1904,7 @@ static int __init journal_init_handle_ca
printk(KERN_EMERG "JBD: failed to create handle cache\n");
return -ENOMEM;
}
+ register_shrinker(&jbd_jh_shrinker);
return 0;
}
diff -Nurp linux-2.6.27.1.org/include/linux/journal-head.h linux-2.6.27.1/include/linux/journal-head.h
--- linux-2.6.27.1.org/include/linux/journal-head.h 2008-10-16 08:02:53.000000000 +0900
+++ linux-2.6.27.1/include/linux/journal-head.h 2008-10-23 15:00:44.000000000 +0900
@@ -87,6 +87,13 @@ struct journal_head {
* [j_list_lock]
*/
struct journal_head *b_cpnext, *b_cpprev;
+
+ /*
+ * Checkpoint journal head list
+ * all over filesystems with jbd in order to shrink.
+ * [jbd_global_lock]
+ */
+ struct list_head b_checkpoint_list;
};
#endif /* JOURNAL_HEAD_H_INCLUDED */
(added linux-fsdevel)
On Thu, 23 Oct 2008 17:41:01 +0900
Toshiyuki Okajima <[email protected]> wrote:
> Hi Andrew.
>
> > > rather costly. An alternative might be to implement a shrinker
> > > callback function for the journal_head slab cache. Did you consider
> > > this?
> > Yes.
> > But the unused-list and counters are required by managing the shrink targets("journal head")
> > if we implement a shrinker.
> > I thought that comparatively big code changes were necessary for jbd to accomplish it.
>
> > However I will try it.
>
> I managed to build a shrinker callback function for the journal_head slab cache.
> This code size is less than before but the logic of it seems to be more complex
> than before.
> However, I haven't got any troubles while I am testing some easy load operations
> on the fixed kernel.
> But I think a system may hang up if concurrently several journal_head shrinker
> are executed.
> So, I will retry to build more appropriate fix.
yeah, that's not very pretty either, is it?
> Please give me comments if you have a nicer idea.
Stepping back a bit...
The basic problem is, I believe, that some client of the blockdev
(ext3) is adding metadata to the blockdev's data structures
(buffer_heads) but we have no means by which the blockdev code can call
back into that client requesting that the metadata be released, yes?
We can fix the problem which you've identified by adding a means for
the blockdev code (def_blk_aops.releasepage()) to call back into ext3,
yes?
If so, how do we do that?
I seem to recall that there's code somewhere in the tree which does
things like taking a copy of bdev->address_space_operations and
reinstalling that, and overwriting selected fields, and then arranging
somehow for the old value to be reinstalled when the client releases
the blockdev. That's plain nasty.
Perhaps what we could do is to add a new
blkdev_register_releasepage(struct block-device *,
int (*)(struct page *, gfp_t)
function and call that from within ext3 initialisation. (This could be
a block_device_operations entry, but is there any point in doing that?)
Within blkdev_register_releasepage(), record the address of that
function in the `struct block_device' (with what locking??) and then
implement def_blk_aops.releasepage(), which calls
bdev->registered_releasepage(). Set def_blk_aops.releaspage() to point
at try_to_free_buffers() to provide the default behaviour.
Then we'd need a blkdev_unregister_releasepage() which restores the old
value. Or, better, make blkdev_register_releasepage()
return the old value and require that clients of the blockdev (ie:
ext3) restore the old value prior to releasing the blockdev.
Or something along these lines, anyway..
Hi Andrew.
Thank you for your useful comment.
Andrew Morton wrote:
> (added linux-fsdevel)
>
> On Thu, 23 Oct 2008 17:41:01 +0900
> Toshiyuki Okajima <[email protected]> wrote:
<SNIP>
> > I managed to build a shrinker callback function for the journal_head slab cache.
> > > This code size is less than before but the logic of it seems to be more complex
> > > than before.
> > > However, I haven't got any troubles while I am testing some easy load operations
> > > on the fixed kernel.
> > > But I think a system may hang up if concurrently several journal_head shrinker
> > > are executed.
> > > So, I will retry to build more appropriate fix.
>
> yeah, that's not very pretty either, is it?
Yes...
I realized fixing only within buffer_head or jbd needs a more complex code.
> > > Please give me comments if you have a nicer idea.
> Stepping back a bit...
>
> The basic problem is, I believe, that some client of the blockdev
> (ext3) is adding metadata to the blockdev's data structures
> (buffer_heads) but we have no means by which the blockdev code can call
> back into that client requesting that the metadata be released, yes?
Yes.
> We can fix the problem which you've identified by adding a means for
> the blockdev code (def_blk_aops.releasepage()) to call back into ext3,
> yes?
Yes.
At first, I tried to fix by using only filesystem approach.
- ver.1: fixing in buffer_head
- ver.2: adding shrinker of journal_head (for releasing buffer_head)
But these approaches become complex code.
So, we should fix the essence of the problem.
> If so, how do we do that?
>
> I seem to recall that there's code somewhere in the tree which does
> things like taking a copy of bdev->address_space_operations and
> reinstalling that, and overwriting selected fields, and then arranging
> somehow for the old value to be reinstalled when the client releases
> the blockdev. That's plain nasty.
uh-huh.
I try to fix this problem again by using your approach,
blkdev_register_releasepage/blkdev_unregister_releasepage().
Best Regards,
Toshiyuki Okajima
[abstract]
__log_wait_for_space() may call journal_abort() when all existing checkpoint
transactions are released by journal_head collectors
(except log_do_checkpoint()).
[details]
The value of journal->j_free is not up to date immediately after checkpoint
transactions are actually released. In order to update it into the actual
value, calling cleanup_journal_tail() is needed. Therefore the value of
journal->j_free in __log_space_left() may be not up to date if
cleanup_journal_tail() hasn't been yet called after checkpoint transactions
are released by journal_head collectors. Because journal_head collectors can
release not only a journal_head but also a checkpoint transaction. Besides it
doesn't update journal->j_free (= it doesn't call cleanup_journal_tail()).
Except, one of journal_head collectors, log_do_checkpoint() updates
journal->j_free by calling cleanup_journal_tail().
Hence the value of journal->j_free in __log_space_left() may be not up to date
after checkpoint transactions are released by journal_head collectors.
If the value of journal->j_free in __log_space_left() is not up to date,
jbd tries to release journal_heads by calling log_do_checkpoint() in
__log_wait_for_space() even if some checkpoint transactions have been released
actually.
Therefore, if all checkpoint transactions have been released by journal_head
collectors, __log_wait_for_space() calls journal_abort().
NOTE: The "journal mode" generates this bug the most easily of the three modes.
Because it is only on the "journal mode" that
journal_try_to_free_buffers() can release a checkpoint transaction.
(Description for ext3:
The direct block which has the filesystem mapping is one of
a checkpoint target on the "journal mode". On the other hand, the direct
block on the "ordered mode" or "writeback mode" is not.)
------------------------------------
journal_head collectors are:
- journal_try_to_free_buffers()
- __journal_clean_checkpoint_list()
- log_do_checkpoint()
------------------------------------
[How to fix]
<now>
journal_head collectors can remove not only a journal_head but also
a checkpoint transaction.
<changes>
journal_head collectors can remove a journal_head only
(except log_do_checkpoint()).
Because journal_head collectors cannot recalculate the value of j_free.
But one of journal_head collectors, log_do_checkpoint() excepts.
(It is difficult to change to use j_free after journal_head collectors
calculate it in __log_wait_for_space() because updating it needs to update
the superblock with some I/O.)
Therefore jbd leaves log_do_checkpoint() to release a checkpoint transaction
which keeps remaining by journal_head collectors (except log_do_checkpoint()).
As a result, jbd can be prevented from "no transactions" error happening
in __log_wait_for_space().
Signed-off-by: Toshiyuki Okajima <[email protected]>
---
fs/jbd/checkpoint.c | 25 +++++++++++++++++++++----
fs/jbd/commit.c | 2 +-
fs/jbd/transaction.c | 2 +-
include/linux/jbd.h | 2 +-
4 files changed, 24 insertions(+), 7 deletions(-)
diff -Nurp linux-2.6.28-rc2.org/fs/jbd/checkpoint.c linux-2.6.28-rc2/fs/jbd/checkpoint.c
--- linux-2.6.28-rc2.org/fs/jbd/checkpoint.c 2008-10-27 04:13:29.000000000 +0900
+++ linux-2.6.28-rc2/fs/jbd/checkpoint.c 2008-10-31 19:21:09.000000000 +0900
@@ -96,7 +96,7 @@ static int __try_to_free_cp_buf(struct j
if (jh->b_jlist == BJ_None && !buffer_locked(bh) &&
!buffer_dirty(bh) && !buffer_write_io_error(bh)) {
JBUFFER_TRACE(jh, "remove from checkpoint list");
- ret = __journal_remove_checkpoint(jh) + 1;
+ ret = __journal_remove_checkpoint(jh, false) + 1;
jbd_unlock_bh_state(bh);
journal_remove_journal_head(bh);
BUFFER_TRACE(bh, "release");
@@ -221,7 +221,7 @@ restart:
* Now in whatever state the buffer currently is, we know that
* it has been written out and so we can drop it from the list
*/
- released = __journal_remove_checkpoint(jh);
+ released = __journal_remove_checkpoint(jh, true);
jbd_unlock_bh_state(bh);
journal_remove_journal_head(bh);
__brelse(bh);
@@ -287,7 +287,7 @@ static int __process_buffer(journal_t *j
ret = -EIO;
J_ASSERT_JH(jh, !buffer_jbddirty(bh));
BUFFER_TRACE(bh, "remove from checkpoint");
- __journal_remove_checkpoint(jh);
+ __journal_remove_checkpoint(jh, true);
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
journal_remove_journal_head(bh);
@@ -366,6 +366,16 @@ restart:
struct journal_head *jh;
int retry = 0, err;
+ /*
+ * Remove an oldest checkpoint transaction only if it has no
+ * journal head.
+ */
+ if (transaction->t_checkpoint_list == NULL
+ && transaction->t_checkpoint_io_list == NULL) {
+ __journal_drop_transaction(journal, transaction);
+ wake_up(&journal->j_wait_logspace);
+ goto out;
+ }
while (!retry && transaction->t_checkpoint_list) {
struct buffer_head *bh;
@@ -614,12 +624,16 @@ out:
*
* The function returns 1 if it frees the transaction, 0 otherwise.
*
+ * can_remove:
+ * false - we don't remove a checkpoint transaction.
+ * true - we remove a checkpoint transaction.
+ *
* This function is called with the journal locked.
* This function is called with j_list_lock held.
* This function is called with jbd_lock_bh_state(jh2bh(jh))
*/
-int __journal_remove_checkpoint(struct journal_head *jh)
+int __journal_remove_checkpoint(struct journal_head *jh, bool can_remove)
{
transaction_t *transaction;
journal_t *journal;
@@ -636,6 +650,9 @@ int __journal_remove_checkpoint(struct j
__buffer_unlink(jh);
jh->b_cp_transaction = NULL;
+ if (!can_remove)
+ goto out;
+
if (transaction->t_checkpoint_list != NULL ||
transaction->t_checkpoint_io_list != NULL)
goto out;
diff -Nurp linux-2.6.28-rc2.org/fs/jbd/commit.c linux-2.6.28-rc2/fs/jbd/commit.c
--- linux-2.6.28-rc2.org/fs/jbd/commit.c 2008-10-27 04:13:29.000000000 +0900
+++ linux-2.6.28-rc2/fs/jbd/commit.c 2008-10-31 18:02:37.000000000 +0900
@@ -833,7 +833,7 @@ restart_loop:
cp_transaction = jh->b_cp_transaction;
if (cp_transaction) {
JBUFFER_TRACE(jh, "remove from old cp transaction");
- __journal_remove_checkpoint(jh);
+ __journal_remove_checkpoint(jh, false);
}
/* Only re-checkpoint the buffer_head if it is marked
diff -Nurp linux-2.6.28-rc2.org/fs/jbd/transaction.c linux-2.6.28-rc2/fs/jbd/transaction.c
--- linux-2.6.28-rc2.org/fs/jbd/transaction.c 2008-10-27 04:13:29.000000000 +0900
+++ linux-2.6.28-rc2/fs/jbd/transaction.c 2008-10-31 18:02:37.000000000 +0900
@@ -1648,7 +1648,7 @@ __journal_try_to_free_buffer(journal_t *
/* written-back checkpointed metadata buffer */
if (jh->b_jlist == BJ_None) {
JBUFFER_TRACE(jh, "remove from checkpoint list");
- __journal_remove_checkpoint(jh);
+ __journal_remove_checkpoint(jh, false);
journal_remove_journal_head(bh);
__brelse(bh);
}
diff -Nurp linux-2.6.28-rc2.org/include/linux/jbd.h linux-2.6.28-rc2/include/linux/jbd.h
--- linux-2.6.28-rc2.org/include/linux/jbd.h 2008-10-27 04:13:29.000000000 +0900
+++ linux-2.6.28-rc2/include/linux/jbd.h 2008-10-31 18:02:37.000000000 +0900
@@ -844,7 +844,7 @@ extern void journal_commit_transaction(j
/* Checkpoint list management */
int __journal_clean_checkpoint_list(journal_t *journal);
-int __journal_remove_checkpoint(struct journal_head *);
+int __journal_remove_checkpoint(struct journal_head *, bool);
void __journal_insert_checkpoint(struct journal_head *, transaction_t *);
/* Buffer IO */
Toshiyuki-san,
I authored a patch a few days ago which I am about to push to Linus,
since it two people who have reported this problem has confirmed that
it solves the problem for them. That patch can be found here:
http://lkml.org/lkml/2008/11/1/61
As you can see, it has a rather different theory about the root cause
of the problem; but it makes sense to me, and it has empircally solved
the problem.
So I read your proposed description of the root cause of the problem
with interest. If I understand you correctly, your concern is that
various functions in jbd2 are cleaning up the memory associated with
tracking the transactions, thus leaving potentially leaving
journal->j_checkpoint_transactions to be NULL, even though the on-disk
tail of the journal hasn't been updated yet in the jbd superblock.
Your solution to this is to avoid cleaning up the in-memory
representation of the transaction until log_do_checkpoint() has a
chance to clean it up.
Your reasoning and your general diagnosis is sound and I agree with
your observation. However, I disagree with your belief that the
fundamental problem is that journal->j_free is being left "out of
date", and that this is the issue that must be addressed. This is
because your proposed solution of deferring dropping the in-memory
transaction structure has a number of disadvantages. For one, it adds
a lot more code complexity; for another, it means that we are tieing
up memory until we have a chance to call log_do_checkpoint.
Therefore, I believe my original strategy of fixing
__log_wait_for_space() is the correct one, since it was a change in in
that function which introduced the regression in the first place.
However, your insight that the problem is that cleanup_journal_tail()
can sometimes free up space even if journal->j_checkpoint_transactions
is NULL is very important, and it will be more efficient to try to
call cleanup_journal_tail() before trying to wait on the current
transaction to finish.
So here is my revised patch, which includes your key insight, but
which does not make a large number of changes in other parts of the
jbd code, and which allows transactions to be dropped as soon as we no
longer need to track any buffers associated with them, even though
cleanup_journal_tail() hasn't been called yet.
- Ted
jbd: don't give up looking for space so easily in __log_wait_for_space wait
From: Theodore Ts'o <[email protected]>
Commit be07c4ed introducd a regression because it assumed that if
there were no transactions ready to be checkpointed, that no progress
could be made on making space available in the journal, and so the
journal should be aborted. This assumption is false; it could be the
case that simply calling cleanup_journal_tail() will recover the
necessary space, or, for small journals, the currently committing
transaction could be responsible for chewing up the required space in
the log, so we need to wait for the currently committing transaction
to finish before trying to force a checkpoint operation.
This patch fixes the bug reported by Meelis Roos at:
http://bugzilla.kernel.org/show_bug.cgi?id=11937
Signed-off-by: "Theodore Ts'o" <[email protected]>
Cc: Duane Griffin <[email protected]>
---
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
index 1bd8d4a..5e856de 100644
--- a/fs/jbd/checkpoint.c
+++ b/fs/jbd/checkpoint.c
@@ -128,25 +128,42 @@ void __log_wait_for_space(journal_t *journal)
/*
* Test again, another process may have checkpointed while we
* were waiting for the checkpoint lock. If there are no
- * outstanding transactions there is nothing to checkpoint and
- * we can't make progress. Abort the journal in this case.
+ * transactions ready to be checkpointed, try to recover
+ * journal space by calling cleanup_journal_tail(), and if
+ * that doesn't work, by waiting for the currently committing
+ * transaction to complete. If there is absolutely no way
+ * to make progress, this is either a BUG or corrupted
+ * filesystem, so abort the journal and leave a stack
+ * trace for forensic evidence.
*/
spin_lock(&journal->j_state_lock);
spin_lock(&journal->j_list_lock);
nblocks = jbd_space_needed(journal);
if (__log_space_left(journal) < nblocks) {
int chkpt = journal->j_checkpoint_transactions != NULL;
+ int tid = 0;
+ if (journal->j_committing_transaction)
+ tid = journal->j_committing_transaction->t_tid;
spin_unlock(&journal->j_list_lock);
spin_unlock(&journal->j_state_lock);
if (chkpt) {
log_do_checkpoint(journal);
+ } else if (cleanup_journal_tail(journal) == 0) {
+ /* We were able to recover space; yay! */
+ ;
+ } else if (tid) {
+ log_wait_commit(journal, tid);
} else {
- printk(KERN_ERR "%s: no transactions\n",
- __func__);
+ printk(KERN_ERR "%s: needed %d blocks and "
+ "only had %d space available\n",
+ __func__, nblocks,
+ __log_space_left(journal));
+ printk(KERN_ERR "%s: no way to get more "
+ "journal space\n", __func__);
+ WARN_ON(1);
journal_abort(journal, 0);
}
-
spin_lock(&journal->j_state_lock);
} else {
spin_unlock(&journal->j_list_lock);
Commit 23f8b79e introducd a regression because it assumed that if
there were no transactions ready to be checkpointed, that no progress
could be made on making space available in the journal, and so the
journal should be aborted. This assumption is false; it could be the
case that simply calling jbd2_cleanup_journal_tail() will recover the
necessary space, or, for small journals, the currently committing
transaction could be responsible for chewing up the required space in
the log, so we need to wait for the currently committing transaction
to finish before trying to force a checkpoint operation.
This patch fixes a bug reported by Mihai Harpau at:
https://bugzilla.redhat.com/show_bug.cgi?id=469582
This patch fixes a bug reported by François Valenduc at:
http://bugzilla.kernel.org/show_bug.cgi?id=11840
Signed-off-by: "Theodore Ts'o" <[email protected]>
Cc: Duane Griffin <[email protected]>
Cc: Toshiyuki Okajima <[email protected]>
---
fs/jbd2/checkpoint.c | 28 +++++++++++++++++++++++-----
1 files changed, 23 insertions(+), 5 deletions(-)
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 9203c33..65605e9 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -129,25 +129,43 @@ void __jbd2_log_wait_for_space(journal_t *journal)
/*
* Test again, another process may have checkpointed while we
* were waiting for the checkpoint lock. If there are no
- * outstanding transactions there is nothing to checkpoint and
- * we can't make progress. Abort the journal in this case.
+ * transactions ready to be checkpointed, try to recover
+ * journal space by calling cleanup_journal_tail(), and if
+ * that doesn't work, by waiting for the currently committing
+ * transaction to complete. If there is absolutely no way
+ * to make progress, this is either a BUG or corrupted
+ * filesystem, so abort the journal and leave a stack
+ * trace for forensic evidence.
*/
spin_lock(&journal->j_state_lock);
spin_lock(&journal->j_list_lock);
nblocks = jbd_space_needed(journal);
if (__jbd2_log_space_left(journal) < nblocks) {
int chkpt = journal->j_checkpoint_transactions != NULL;
+ int tid = 0;
+ if (journal->j_committing_transaction)
+ tid = journal->j_committing_transaction->t_tid;
spin_unlock(&journal->j_list_lock);
spin_unlock(&journal->j_state_lock);
if (chkpt) {
jbd2_log_do_checkpoint(journal);
+ } else if (jbd2_cleanup_journal_tail(journal) == 0) {
+ /* We were able to recover space; yay! */
+ ;
+ } else if (tid) {
+ jbd2_log_wait_commit(journal, tid);
} else {
- printk(KERN_ERR "%s: no transactions\n",
- __func__);
+ printk(KERN_ERR "%s: needed %d blocks and "
+ "only had %d space available\n",
+ __func__, nblocks,
+ __jbd2_log_space_left(journal));
+ printk(KERN_ERR "%s: no way to get more "
+ "journal space in %s\n", __func__,
+ journal->j_devname);
+ WARN_ON(1);
jbd2_journal_abort(journal, 0);
}
Commit be07c4ed introducd a regression because it assumed that if
there were no transactions ready to be checkpointed, that no progress
could be made on making space available in the journal, and so the
journal should be aborted. This assumption is false; it could be the
case that simply calling cleanup_journal_tail() will recover the
necessary space, or, for small journals, the currently committing
transaction could be responsible for chewing up the required space in
the log, so we need to wait for the currently committing transaction
to finish before trying to force a checkpoint operation.
This patch fixes the bug reported by Meelis Roos at:
http://bugzilla.kernel.org/show_bug.cgi?id=11937
Signed-off-by: "Theodore Ts'o" <[email protected]>
Cc: Duane Griffin <[email protected]>
Cc: Toshiyuki Okajima <[email protected]>
---
fs/jbd/checkpoint.c | 27 ++++++++++++++++++++++-----
1 files changed, 22 insertions(+), 5 deletions(-)
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
index 1bd8d4a..5e856de 100644
--- a/fs/jbd/checkpoint.c
+++ b/fs/jbd/checkpoint.c
@@ -128,25 +128,42 @@ void __log_wait_for_space(journal_t *journal)
/*
* Test again, another process may have checkpointed while we
* were waiting for the checkpoint lock. If there are no
- * outstanding transactions there is nothing to checkpoint and
- * we can't make progress. Abort the journal in this case.
+ * transactions ready to be checkpointed, try to recover
+ * journal space by calling cleanup_journal_tail(), and if
+ * that doesn't work, by waiting for the currently committing
+ * transaction to complete. If there is absolutely no way
+ * to make progress, this is either a BUG or corrupted
+ * filesystem, so abort the journal and leave a stack
+ * trace for forensic evidence.
*/
spin_lock(&journal->j_state_lock);
spin_lock(&journal->j_list_lock);
nblocks = jbd_space_needed(journal);
if (__log_space_left(journal) < nblocks) {
int chkpt = journal->j_checkpoint_transactions != NULL;
+ int tid = 0;
+ if (journal->j_committing_transaction)
+ tid = journal->j_committing_transaction->t_tid;
spin_unlock(&journal->j_list_lock);
spin_unlock(&journal->j_state_lock);
if (chkpt) {
log_do_checkpoint(journal);
+ } else if (cleanup_journal_tail(journal) == 0) {
+ /* We were able to recover space; yay! */
+ ;
+ } else if (tid) {
+ log_wait_commit(journal, tid);
} else {
- printk(KERN_ERR "%s: no transactions\n",
- __func__);
+ printk(KERN_ERR "%s: needed %d blocks and "
+ "only had %d space available\n",
+ __func__, nblocks,
+ __log_space_left(journal));
+ printk(KERN_ERR "%s: no way to get more "
+ "journal space\n", __func__);
+ WARN_ON(1);
journal_abort(journal, 0);
}
Ted-san,
thank you for your reading my mail.
Theodore Tso wrote:
> Toshiyuki-san,
>
> I authored a patch a few days ago which I am about to push to Linus,
> since it two people who have reported this problem has confirmed that
> it solves the problem for them. That patch can be found here:
>
> http://lkml.org/lkml/2008/11/1/61
I haven't checked it yet. So, I check it now.
> As you can see, it has a rather different theory about the root cause
> of the problem; but it makes sense to me, and it has empircally solved
> the problem.
uh-huh.
> So I read your proposed description of the root cause of the problem
> with interest. If I understand you correctly, your concern is that
> various functions in jbd2 are cleaning up the memory associated with
> tracking the transactions, thus leaving potentially leaving
> journal->j_checkpoint_transactions to be NULL, even though the on-disk
> tail of the journal hasn't been updated yet in the jbd superblock.
Yes.
> Your solution to this is to avoid cleaning up the in-memory
> representation of the transaction until log_do_checkpoint() has a
> chance to clean it up.
Yes.
> Your reasoning and your general diagnosis is sound and I agree with
> your observation. However, I disagree with your belief that the
> fundamental problem is that journal->j_free is being left "out of
> date", and that this is the issue that must be addressed. This is
> because your proposed solution of deferring dropping the in-memory
> transaction structure has a number of disadvantages. For one, it adds
> a lot more code complexity; for another, it means that we are tieing
> up memory until we have a chance to call log_do_checkpoint.
Yes, I have concerned about it, but I thought it simple to release
a checkpoint transaction in log_do_checkpoint() only.
However, I agree that jbd should try to release a checkpoint transaction
at any places if possible.
> Therefore, I believe my original strategy of fixing
> __log_wait_for_space() is the correct one, since it was a change in in
> that function which introduced the regression in the first place.
OK.
> However, your insight that the problem is that cleanup_journal_tail()
> can sometimes free up space even if journal->j_checkpoint_transactions
> is NULL is very important, and it will be more efficient to try to
> call cleanup_journal_tail() before trying to wait on the current
> transaction to finish.
I agree this idea because also it seems to be able to solve the root cause
which I am thinking.
> So here is my revised patch, which includes your key insight, but
> which does not make a large number of changes in other parts of the
> jbd code, and which allows transactions to be dropped as soon as we no
> longer need to track any buffers associated with them, even though
> cleanup_journal_tail() hasn't been called yet.
>
> - Ted
> jbd: don't give up looking for space so easily in __log_wait_for_space wait
>
> From: Theodore Ts'o <[email protected]>
Your revised patch looks good to me because your new logic is reasonable.
But I think that here need to be changed from "int tid = 0"
into "tid_t tid = 0".
> int chkpt = journal->j_checkpoint_transactions != NULL;
> + int tid = 0;
And the bug which you have already known is needed to fix.
("Re: ext3: kernel BUG at fs/jbd/journal.c:412!" you posted
at Thu, 6 Nov 2008 12:13:22 -0500)
I tested your revised patch with the "journal mode", and then I confirmed
that "no transactions" error doesn't happen.
* You know, this error can happen the most easily by the "journal mode" of
three modes.
P.S.
Another problem can happen with an original kernel plus your revised patch
instead of "no transactions" error.
This problem is the one that I have been working since one month ago.
First patch to try to fix it was posted by me on 17/10/2008 JST.
"[RFC][PATCH] JBD: release checkpoint journal heads through try_to_release_page
when the memory is exhausted"
I found "no transactions" error happen easily with the "journal mode" when
I was fixing this problem. So, I have needed a patch which fixes
"no transactions" error in order to fix it. Because my fix patch enables
journal_heads release more efficiently, and then "no transactions" error can
happen more easily than before by jbd to which my patch is applied on not only
the "journal mode" but also "ordered mode" or "writeback mode".
I will post the revised patch which fixes this problem later.
Best Regards,
Toshiyuki Okajima
Hi.
I found it possible that even if a lot of pages can be logically released,
they cannot be released by try_to_release_page, and then they keep remaining.
This case enables an oom-killer to happen easily.
Details of the root cause and my patch which fixes it are shown below.
---
The direct data blocks can be released by the member function, releasepage()
of their mapping of the filesystem i-node.
(If an ext3 has the i-node, ext3_releasepage() is used as releasepage().)
On the other hand, the indirect data blocks (ext3) are attempted to be released
by try_to_free_buffers(). (And other metadata are also done by it.)
Because a block device has its mapping, and doesn't have own member function
to release a page.
But try_to_free_buffers() is a generic function which releases buffer_heads
(and a page), and no buffer_head can be released if a buffer_head has private
data (like journal_head) because the buffer_head's reference counter is bigger
than 0. Therefore, try_to_free_buffers() cannot release a buffer_head even if
it is possible to release its private data.
As a result, oom-killer may happen when a system memory is exhausted even if
it is possible to release a lot of private data and their pages, because
try_to_free_buffers() doesn't release such pages.
In order to solve this situation, we add a member function into a block device
to release private data and then the page.
This member function is:
- registered at a filesystem initialization time (get_sb_bdev())
- unregistered at a filesystem unmount time (kill_block_super())
This member function's pointer is located in a bdev_inode structure.
Besides, a client which registers it is also added into this structure.
A client for a filesystem is its superblock.
If we use an ext3, this additional member function can do equal processing to
ext3_releasepage() by using the superblock. And a block device's releasepage()
is necessary to call this additional member function. Therefore we need a
member function, 'releasepage' of the mapping of a block device.
Changing like them becomes possible to release private data and then the page
via try_to_release_page().
Therefore it becomes difficult for oom-killer to happen than before.
Because this patch enables journal_heads to be released more efficiently
in case of ext3.
I will post patches to solve it (ext3/ext4 version):
(1) [patch 1/3] vfs: release block-device-mapping buffer_heads which have the
filesystem private data for avoiding oom-killer
(2) [patch 2/3] ext3: release block-device-mapping buffer_heads which have the
filesystem private data for avoiding oom-killer
(3) [patch 3/3] ext4: release block-device-mapping buffer_heads which have the
filesystem private data for avoiding oom-killer
[Additional information]
I have confirmed that JBD on 2.6.28-rc4 to which my patch was applied could keep
running for long time without oom-killer under the heavy loads.
(Of course, JBD without the patch cannot keep running for long time
under the same situation.)
* This patch needs Ted's fix which was posted at "Wed, 5 Nov 2008 09:05:07 -0500"
* as "[PATCH] jbd: don't give up looking for space so easily in
* __log_wait_for_space".
* Because "no transactions" error happens easily by releasing journal_heads
* efficiently with my patch.
* But linux-2.6.28-rc4 includes his patch. Therefore I don't care about this.
Any comments are welcome.
Best Regards,
Toshiyuki Okajima
[PATCH 1/3] vfs: release block-device-mapping buffer_heads which have the filesystem
private data for avoiding oom-killer
Implement blkdev_releasepage() to release the buffer_heads and page after we
release private data which are a client's. One of the clients is a filesystem.
blkdev_releasepage() can call the client's releasepage() which is registered
by blkdev_register_client_releasepage() to release its private data.
Signed-off-by: Toshiyuki Okajima <[email protected]>
---
fs/block_dev.c | 62 +++++++++++++++++++++++++++++++++++++++++++++++++++++
fs/super.c | 22 ++++++++++++++++++
include/linux/fs.h | 9 +++++++
3 files changed, 93 insertions(+)
diff -Nurp linux-2.6.28-rc4.orig/fs/block_dev.c linux-2.6.28-rc4/fs/block_dev.c
--- linux-2.6.28-rc4.orig/fs/block_dev.c 2008-11-10 09:36:15.000000000 +0900
+++ linux-2.6.28-rc4/fs/block_dev.c 2008-11-10 18:33:52.000000000 +0900
@@ -29,6 +29,9 @@
struct bdev_inode {
struct block_device bdev;
+ void *client;
+ int (*client_releasepage)(void*, struct page*, gfp_t);
+ rwlock_t client_lock;
struct inode vfs_inode;
};
@@ -260,6 +263,9 @@ static struct inode *bdev_alloc_inode(st
struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
if (!ei)
return NULL;
+ ei->client = NULL;
+ ei->client_releasepage = NULL;
+ rwlock_init(&ei->client_lock);
return &ei->vfs_inode;
}
@@ -1208,6 +1214,61 @@ static long block_ioctl(struct file *fil
return blkdev_ioctl(bdev, mode, cmd, arg);
}
+/*
+ * blkdev_releasepage: execute ei->client_releasepage() if it exists.
+ * Otherwise, execute try_to_free_buffers().
+ * ei->client_releasepage() releases private client's page if possible.
+ * Because a buffer_head's using counter is bigger than 0 if a client has
+ * a page for private usage. If so, try_to_free_buffers() cannot release it.
+ * Therefore a client must try to release a page itself.
+ */
+static int blkdev_releasepage(struct page *page, gfp_t wait)
+{
+ struct bdev_inode *ei = BDEV_I(page->mapping->host);
+ int ret;
+
+ read_lock(&ei->client_lock);
+ if (ei->client_releasepage != NULL)
+ ret = (*ei->client_releasepage)(ei->client, page, wait);
+ else
+ ret = try_to_free_buffers(page);
+ read_unlock(&ei->client_lock);
+ return ret;
+}
+
+/*
+ * blkdev_register_client_releasepage: register client_releasepage.
+ */
+int blkdev_register_client_releasepage(struct block_device *bdev,
+ void *client, int (*releasepage)(void*, struct page*, gfp_t))
+{
+ struct bdev_inode *ei = BDEV_I(bdev->bd_inode);
+ int ret = 1;
+
+ write_lock(&ei->client_lock);
+ if (ei->client == NULL && ei->client_releasepage == NULL) {
+ ei->client = client;
+ ei->client_releasepage = releasepage;
+ } else if (ei->client != client
+ || ei->client_releasepage != releasepage)
+ ret = 0;
+ write_unlock(&ei->client_lock);
+ return ret;
+}
+
+/*
+ * blkdev_unregister_client_releasepage: unregister client_releasepage.
+ */
+void blkdev_unregister_client_releasepage(struct block_device *bdev)
+{
+ struct bdev_inode *ei = BDEV_I(bdev->bd_inode);
+
+ write_lock(&ei->client_lock);
+ ei->client = NULL;
+ ei->client_releasepage = NULL;
+ write_unlock(&ei->client_lock);
+}
+
static const struct address_space_operations def_blk_aops = {
.readpage = blkdev_readpage,
.writepage = blkdev_writepage,
@@ -1215,6 +1276,7 @@ static const struct address_space_operat
.write_begin = blkdev_write_begin,
.write_end = blkdev_write_end,
.writepages = generic_writepages,
+ .releasepage = blkdev_releasepage,
.direct_IO = blkdev_direct_IO,
};
diff -Nurp linux-2.6.28-rc4.orig/fs/super.c linux-2.6.28-rc4/fs/super.c
--- linux-2.6.28-rc4.orig/fs/super.c 2008-11-10 09:36:15.000000000 +0900
+++ linux-2.6.28-rc4/fs/super.c 2008-11-11 09:25:04.000000000 +0900
@@ -801,6 +801,18 @@ int get_sb_bdev(struct file_system_type
s->s_flags |= MS_ACTIVE;
}
+ /*
+ * register a client function which releases a page whose mapping is
+ * block device
+ */
+ if (fs_type->release_metadata != NULL
+ && !blkdev_register_client_releasepage(bdev, s,
+ fs_type->release_metadata)) {
+ up_write(&s->s_umount);
+ deactivate_super(s);
+ error = -EBUSY;
+ goto error_bdev;
+ }
return simple_set_mnt(mnt, s);
@@ -819,6 +831,16 @@ void kill_block_super(struct super_block
struct block_device *bdev = sb->s_bdev;
fmode_t mode = sb->s_mode;
+ /*
+ * unregister a client function which releases a page whose mapping is
+ * block device
+ *
+ * This is sure to be unmounting here, and it releases all own data
+ * itself. Therefore the filesystem's function which is owned by the
+ * block device, which releases its data is not needed any more.
+ */
+ if (sb->s_type->release_metadata != NULL)
+ blkdev_unregister_client_releasepage(bdev);
generic_shutdown_super(sb);
sync_blockdev(bdev);
close_bdev_exclusive(bdev, mode);
diff -Nurp linux-2.6.28-rc4.orig/include/linux/fs.h linux-2.6.28-rc4/include/linux/fs.h
--- linux-2.6.28-rc4.orig/include/linux/fs.h 2008-11-10 09:36:15.000000000 +0900
+++ linux-2.6.28-rc4/include/linux/fs.h 2008-11-11 09:01:12.000000000 +0900
@@ -1538,6 +1538,7 @@ struct file_system_type {
int (*get_sb) (struct file_system_type *, int,
const char *, void *, struct vfsmount *);
void (*kill_sb) (struct super_block *);
+ int (*release_metadata)(void*, struct page*, gfp_t);
struct module *owner;
struct file_system_type * next;
struct list_head fs_supers;
@@ -1699,8 +1700,16 @@ extern void bd_set_size(struct block_dev
extern void bd_forget(struct inode *inode);
extern void bdput(struct block_device *);
extern struct block_device *open_by_devnum(dev_t, fmode_t);
+extern int blkdev_register_client_releasepage(struct block_device *,
+ void *, int (*releasepage)(void *, struct page*, gfp_t));
+extern void blkdev_unregister_client_releasepage(struct block_device *);
#else
static inline void bd_forget(struct inode *inode) {}
+static inline int blkdev_register_client_releasepage(struct block_device *,
+ void *, int (*releasepage)(void *, struct page*, gfp_t))
+{ return 1; }
+static inline void blkdev_unregister_client_releasepage(struct block_device *)
+{}
#endif
extern const struct file_operations def_blk_fops;
extern const struct file_operations def_chr_fops;
[PATCH 2/3] ext3: release block-device-mapping buffer_heads which have the filesystem
private data for avoiding oom-killer
Implement releasepage() of a client of a block device to release its private
data (journal_heads) and the page. The client is an ext3 filesystem.
This function is called from releasepage() of the block-device mapping,
blkdev_releasepage().
Signed-off-by: Toshiyuki Okajima <[email protected]>
---
fs/ext3/inode.c | 16 ++++++++++++++++
fs/ext3/super.c | 2 ++
include/linux/ext3_fs.h | 2 ++
3 files changed, 20 insertions(+)
diff -Nurp linux-2.6.28-rc4.orig/fs/ext3/inode.c linux-2.6.28-rc4/fs/ext3/inode.c
--- linux-2.6.28-rc4.orig/fs/ext3/inode.c 2008-11-10 09:36:15.000000000 +0900
+++ linux-2.6.28-rc4/fs/ext3/inode.c 2008-11-11 09:31:13.000000000 +0900
@@ -1680,6 +1680,28 @@ static int ext3_releasepage(struct page
}
/*
+ * ext3_release_metadata: try to release own page.
+ * The mapping of this page is block device's. Therefore try_to_free_buffers()
+ * cannot release it. As a result, a function which releases own page is
+ * required.
+ */
+int ext3_release_metadata(void *client, struct page *page, gfp_t wait)
+{
+ struct super_block *sb = (struct super_block*)client;
+ journal_t *journal;
+
+ WARN_ON(PageChecked(page));
+ if (!page_has_buffers(page))
+ return 0;
+ BUG_ON(EXT3_SB(sb) == NULL);
+ journal = EXT3_SB(sb)->s_journal;
+ if (journal != NULL)
+ return journal_try_to_free_buffers(journal, page, wait);
+ else
+ return try_to_free_buffers(page);
+}
+
+/*
* If the O_DIRECT write will extend the file then add this inode to the
* orphan list. So recovery will truncate it back to the original size
* if the machine crashes during the write.
diff -Nurp linux-2.6.28-rc4.orig/fs/ext3/super.c linux-2.6.28-rc4/fs/ext3/super.c
--- linux-2.6.28-rc4.orig/fs/ext3/super.c 2008-11-10 09:36:15.000000000 +0900
+++ linux-2.6.28-rc4/fs/ext3/super.c 2008-11-11 09:01:53.000000000 +0900
@@ -2972,6 +2972,8 @@ static struct file_system_type ext3_fs_t
.name = "ext3",
.get_sb = ext3_get_sb,
.kill_sb = kill_block_super,
+ .release_metadata
+ = ext3_release_metadata,
.fs_flags = FS_REQUIRES_DEV,
};
diff -Nurp linux-2.6.28-rc4.orig/include/linux/ext3_fs.h linux-2.6.28-rc4/include/linux/ext3_fs.h
--- linux-2.6.28-rc4.orig/include/linux/ext3_fs.h 2008-11-10 09:36:15.000000000 +0900
+++ linux-2.6.28-rc4/include/linux/ext3_fs.h 2008-11-10 18:33:52.000000000 +0900
@@ -841,6 +841,8 @@ extern void ext3_get_inode_flags(struct
extern void ext3_set_aops(struct inode *inode);
extern int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len);
+extern int ext3_release_metadata(void *client, struct page *page,
+ gfp_t wait);
/* ioctl.c */
extern int ext3_ioctl (struct inode *, struct file *, unsigned int,
[PATCH 3/3] ext4: release block-device-mapping buffer_heads which have the filesystem
private data for avoiding oom-killer
Implement releasepage() of a client of a block device to release its private
data (journal_heads) and the page. The client is an ext4 filesystem.
This function is called from releasepage() of the block-device mapping,
blkdev_releasepage().
Signed-off-by: Toshiyuki Okajima <[email protected]>
---
fs/ext4/ext4.h | 2 ++
fs/ext4/inode.c | 22 ++++++++++++++++++++++
fs/ext4/super.c | 4 ++++
3 files changed, 28 insertions(+)
diff -Nurp linux-2.6.28-rc4.orig/fs/ext4/ext4.h linux-2.6.28-rc4/fs/ext4/ext4.h
--- linux-2.6.28-rc4.orig/fs/ext4/ext4.h 2008-11-10 09:36:15.000000000 +0900
+++ linux-2.6.28-rc4/fs/ext4/ext4.h 2008-11-12 10:44:25.000000000 +0900
@@ -1095,6 +1095,8 @@ extern int ext4_chunk_trans_blocks(struc
extern int ext4_block_truncate_page(handle_t *handle,
struct address_space *mapping, loff_t from);
extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page);
+extern int ext4_release_metadata(void *client, struct page *page,
+ gfp_t wait);
/* ioctl.c */
extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
diff -Nurp linux-2.6.28-rc4.orig/fs/ext4/inode.c linux-2.6.28-rc4/fs/ext4/inode.c
--- linux-2.6.28-rc4.orig/fs/ext4/inode.c 2008-11-10 09:36:15.000000000 +0900
+++ linux-2.6.28-rc4/fs/ext4/inode.c 2008-11-12 10:42:55.000000000 +0900
@@ -3002,6 +3002,28 @@ static int ext4_releasepage(struct page
}
/*
+ * ext4_release_metadata: try to release own page.
+ * The mapping of this page is block device's. Therefore try_to_free_buffers()
+ * cannot release it. As a result, a function which releases own page is
+ * required.
+ */
+int ext4_release_metadata(void *client, struct page *page, gfp_t wait)
+{
+ struct super_block *sb = (struct super_block*)client;
+ journal_t *journal;
+
+ WARN_ON(PageChecked(page));
+ if (!page_has_buffers(page))
+ return 0;
+ BUG_ON(EXT4_SB(sb) == NULL);
+ journal = EXT4_SB(sb)->s_journal;
+ if (journal != NULL)
+ return jbd2_journal_try_to_free_buffers(journal, page, wait);
+ else
+ return try_to_free_buffers(page);
+}
+
+/*
* If the O_DIRECT write will extend the file then add this inode to the
* orphan list. So recovery will truncate it back to the original size
* if the machine crashes during the write.
diff -Nurp linux-2.6.28-rc4.orig/fs/ext4/super.c linux-2.6.28-rc4/fs/ext4/super.c
--- linux-2.6.28-rc4.orig/fs/ext4/super.c 2008-11-10 09:36:15.000000000 +0900
+++ linux-2.6.28-rc4/fs/ext4/super.c 2008-11-12 12:04:58.000000000 +0900
@@ -3543,6 +3543,8 @@ static struct file_system_type ext4_fs_t
.name = "ext4",
.get_sb = ext4_get_sb,
.kill_sb = kill_block_super,
+ .release_metadata
+ = ext4_release_metadata,
.fs_flags = FS_REQUIRES_DEV,
};
@@ -3562,6 +3564,8 @@ static struct file_system_type ext4dev_f
.name = "ext4dev",
.get_sb = ext4dev_get_sb,
.kill_sb = kill_block_super,
+ .release_metadata
+ = ext4_release_metadata,
.fs_flags = FS_REQUIRES_DEV,
};
MODULE_ALIAS("ext4dev");