2009-06-22 09:38:56

by Kamezawa Hiroyuki

[permalink] [raw]
Subject: [RFC][PATCH] cgroup: fix permanent wait in rmdir

previous discussion was this => http://marc.info/?t=124478543600001&r=1&w=2

I think this is a minimum fix (in code size and behavior) and because
we can take a BIG LOCK, this kind of check is necessary, anyway.
Any comments are welcome.
==
From: KAMEZAWA Hiroyuki <[email protected]>

Now, cgroup has a logic to wait until ready-to-rmdir for avoiding
frequent -EBUSY at rmdir.
(See Commit ec64f51545fffbc4cb968f0cea56341a4b07e85a
cgroup: fix frequent -EBUSY at rmdir.

Nishimura-san reported bad case for waiting and This is a fix to
make it reliable. A thread waiting for thread cannot be waken up
when a refcnt gotten by css_tryget() isn't put immediately.
(Original code assumed css_put() will be called soon.)

memcg has this case and this is a fix for the problem. This adds
retry_rmdir() callback to subsys and check we can sleep or not
before sleeping and export CGRP_WAIT_ON_RMDIR flag to subsys.

Note: another solution will be adding "rmdir state" to subsys.
But it will be much complicated than this do-enough-check solution.

Reported-by: Daisuke Nishimura <[email protected]>
Signed-off-by: KAMEZAWA Hiroyuki <[email protected]>
---
Documentation/cgroups/cgroups.txt | 11 +++++++++++
include/linux/cgroup.h | 9 +++++++++
kernel/cgroup.c | 25 +++++++++++++++++++++----
mm/memcontrol.c | 29 ++++++++++++++++++++++++++---
4 files changed, 67 insertions(+), 7 deletions(-)

Index: linux-2.6.30-git18/include/linux/cgroup.h
===================================================================
--- linux-2.6.30-git18.orig/include/linux/cgroup.h
+++ linux-2.6.30-git18/include/linux/cgroup.h
@@ -192,6 +192,14 @@ struct cgroup {
struct rcu_head rcu_head;
};

+void __cgroup_wakeup_rmdir_waiters(void);
+static inline void cgroup_wakeup_rmdir_waiters(const struct cgroup *cgrp)
+{
+ if (unlikely(test_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags)))
+ __cgroup_wakeup_rmdir_waiters();
+}
+
+
/*
* A css_set is a structure holding pointers to a set of
* cgroup_subsys_state objects. This saves space in the task struct
@@ -374,6 +382,7 @@ struct cgroup_subsys {
struct cgroup_subsys_state *(*create)(struct cgroup_subsys *ss,
struct cgroup *cgrp);
int (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
+ int (*retry_rmdir)(struct cgroup_subsys *ss, struct cgroup *cgrp);
void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
int (*can_attach)(struct cgroup_subsys *ss,
struct cgroup *cgrp, struct task_struct *tsk);
Index: linux-2.6.30-git18/kernel/cgroup.c
===================================================================
--- linux-2.6.30-git18.orig/kernel/cgroup.c
+++ linux-2.6.30-git18/kernel/cgroup.c
@@ -636,6 +636,23 @@ static int cgroup_call_pre_destroy(struc
}
return ret;
}
+/*
+ * Call subsys's retry_rmdir() handler. If this returns non-Zero, we retry
+ * rmdir immediately and call pre_destroy again.
+ */
+static int cgroup_check_retry_rmdir(struct cgroup *cgrp)
+{
+ struct cgroup_subsys *ss;
+ int ret = 0;
+
+ for_each_subsys(cgrp->root, ss)
+ if (ss->pre_destroy) {
+ ret = ss->retry_rmdir(ss, cgrp);
+ if (ret)
+ break;
+ }
+ return ret;
+}

static void free_cgroup_rcu(struct rcu_head *obj)
{
@@ -738,10 +755,9 @@ static void cgroup_d_remove_dir(struct d
*/
DECLARE_WAIT_QUEUE_HEAD(cgroup_rmdir_waitq);

-static void cgroup_wakeup_rmdir_waiters(const struct cgroup *cgrp)
+void __cgroup_wakeup_rmdir_waiters(void)
{
- if (unlikely(test_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags)))
- wake_up_all(&cgroup_rmdir_waitq);
+ wake_up_all(&cgroup_rmdir_waitq);
}

static int rebind_subsystems(struct cgroupfs_root *root,
@@ -2722,7 +2738,8 @@ again:

if (!cgroup_clear_css_refs(cgrp)) {
mutex_unlock(&cgroup_mutex);
- schedule();
+ if (!cgroup_check_retry_rmdir(cgrp))
+ schedule();
finish_wait(&cgroup_rmdir_waitq, &wait);
clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
if (signal_pending(current))
Index: linux-2.6.30-git18/mm/memcontrol.c
===================================================================
--- linux-2.6.30-git18.orig/mm/memcontrol.c
+++ linux-2.6.30-git18/mm/memcontrol.c
@@ -179,7 +179,6 @@ struct mem_cgroup {

/* set when res.limit == memsw.limit */
bool memsw_is_minimum;
-
/*
* statistics. This must be placed at the end of memcg.
*/
@@ -1428,6 +1427,9 @@ __mem_cgroup_commit_charge_swapin(struct
return;
if (!ptr)
return;
+ /* We access ptr->css.cgroup later. keep 1 refcnt here. */
+ css_get(&ptr->css);
+
pc = lookup_page_cgroup(page);
mem_cgroup_lru_del_before_commit_swapcache(page);
__mem_cgroup_commit_charge(ptr, pc, ctype);
@@ -1457,8 +1459,16 @@ __mem_cgroup_commit_charge_swapin(struct
}
rcu_read_unlock();
}
- /* add this page(page_cgroup) to the LRU we want. */
-
+ /*
+ * At swapin, "ptr" is got from swap_cgroup and not from task. Then,
+ * this ptr can be under rmdir(). Under race with rmdir(), we may
+ * charge against cgroup which a thread is waiting for restart rmdir().
+ * It can be waken up when css's refcnt goes to 0 but we charged...
+ * Because we can't do css_get()->charge in atomic, at swapin, we have
+ * to check there is no waiter for rmdir.
+ */
+ cgroup_wakeup_rmdir_waiters(ptr->css.cgroup);
+ css_put(&ptr->css);
}

void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
@@ -2556,6 +2566,7 @@ mem_cgroup_create(struct cgroup_subsys *

if (parent)
mem->swappiness = get_swappiness(parent);
+
atomic_set(&mem->refcnt, 1);
return &mem->css;
free_out:
@@ -2571,6 +2582,17 @@ static int mem_cgroup_pre_destroy(struct
return mem_cgroup_force_empty(mem, false);
}

+static int mem_cgroup_retry_rmdir(struct cgroup_subsys *ss,
+ struct cgroup *cont)
+{
+ struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
+
+ if (res_counter_read_u64(&memcg->res, RES_USAGE))
+ return 1;
+ return 0;
+}
+
+
static void mem_cgroup_destroy(struct cgroup_subsys *ss,
struct cgroup *cont)
{
@@ -2610,6 +2632,7 @@ struct cgroup_subsys mem_cgroup_subsys =
.subsys_id = mem_cgroup_subsys_id,
.create = mem_cgroup_create,
.pre_destroy = mem_cgroup_pre_destroy,
+ .retry_rmdir = mem_cgroup_retry_rmdir,
.destroy = mem_cgroup_destroy,
.populate = mem_cgroup_populate,
.attach = mem_cgroup_move_task,
Index: linux-2.6.30-git18/Documentation/cgroups/cgroups.txt
===================================================================
--- linux-2.6.30-git18.orig/Documentation/cgroups/cgroups.txt
+++ linux-2.6.30-git18/Documentation/cgroups/cgroups.txt
@@ -500,6 +500,17 @@ there are not tasks in the cgroup. If pr
rmdir() will fail with it. From this behavior, pre_destroy() can be
called multiple times against a cgroup.

+int retry_rmdir(struct cgroup_subsys *ss, struct cgroup *cgrp);
+
+Called at rmdir right after the kernel finds there are remaining refcnt on
+subsystems after pre_destroy(). When retry_rmdir() returns 0, the caller enter
+sleep and wakes up when css's refcnt goes down to 0 by css_put().
+When this returns 1, the caller doesn't sleep and retry rmdir immediately.
+This is useful when the subsys knows remaining css's refcnt is not temporal
+and to calling pre_destroy() again is proper way to remove that.
+(or proper way to retrun -EBUSY.)
+
+
int can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct task_struct *task)
(cgroup_mutex held by caller)


2009-06-22 10:52:52

by Ingo Molnar

[permalink] [raw]
Subject: Re: [RFC][PATCH] cgroup: fix permanent wait in rmdir


FYI, there's a new cgroup related list corruption warning/crash that
i've seen a lot of times in latest -tip tests:

[ 478.555544] ------------[ cut here ]------------
[ 478.556523] WARNING: at lib/list_debug.c:26 __list_add+0x70/0xa0()
[ 478.556523] Hardware name:
[ 478.556523] list_add corruption. next->prev should be prev (ffff88003e640448), but was ffff88003fa1a6e8. (next=ffff88003fa1a8a0).
[ 478.556523] Modules linked in:
[ 478.556523] Pid: 470, comm: kswapd0 Not tainted 2.6.30-tip #10989
[ 478.556523] Call Trace:
[ 478.556523] [<ffffffff81306150>] ? __list_add+0x70/0xa0
[ 478.556523] [<ffffffff810598dc>] warn_slowpath_common+0x8c/0xc0
[ 478.556523] [<ffffffff81059999>] warn_slowpath_fmt+0x69/0x70
[ 478.556523] [<ffffffff81086e3b>] ? __lock_acquired+0x18b/0x2b0
[ 478.556523] [<ffffffff811022f0>] ? page_check_address+0x110/0x1a0
[ 478.556523] [<ffffffff812ebcf2>] ? cpumask_any_but+0x42/0xb0
[ 478.556523] [<ffffffff8108c528>] ? __lock_release+0x38/0x90
[ 478.556523] [<ffffffff811024e1>] ? page_referenced_one+0x91/0x120
[ 478.556523] [<ffffffff81306150>] __list_add+0x70/0xa0
[ 478.556523] [<ffffffff8111dc63>] mem_cgroup_add_lru_list+0x63/0x70
[ 478.556523] [<ffffffff810eaee4>] move_active_pages_to_lru+0xf4/0x180
[ 478.556523] [<ffffffff810eb758>] ? shrink_active_list+0x1f8/0x2a0
[ 478.556523] [<ffffffff810eb758>] ? shrink_active_list+0x1f8/0x2a0
[ 478.556523] [<ffffffff810eb794>] shrink_active_list+0x234/0x2a0
[ 478.556523] [<ffffffff810ec3c3>] shrink_zone+0x173/0x1f0
[ 478.556523] [<ffffffff810ece0a>] balance_pgdat+0x4da/0x4e0
[ 478.556523] [<ffffffff810eb240>] ? isolate_pages_global+0x0/0x60
[ 478.556523] [<ffffffff810ed3b6>] kswapd+0x106/0x150
[ 478.556523] [<ffffffff810752f0>] ? autoremove_wake_function+0x0/0x40
[ 478.556523] [<ffffffff810ed2b0>] ? kswapd+0x0/0x150
[ 478.556523] [<ffffffff8107516e>] kthread+0x9e/0xb0
[ 478.556523] [<ffffffff8100d2ba>] child_rip+0xa/0x20
[ 478.556523] [<ffffffff8100cc40>] ? restore_args+0x0/0x30
[ 478.556523] [<ffffffff81075085>] ? kthreadd+0xb5/0x100
[ 478.556523] [<ffffffff810750d0>] ? kthread+0x0/0xb0
[ 478.556523] [<ffffffff8100d2b0>] ? child_rip+0x0/0x20
[ 478.556523] ---[ end trace 9f3122957c34141e ]---
[ 484.923530] ------------[ cut here ]------------
[ 484.924525] WARNING: at lib/list_debug.c:26 __list_add+0x70/0xa0()
[ 484.924525] Hardware name:
[ 484.924525] list_add corruption. next->prev should be prev (ffff88003e640448), but was ffff88003fa192e8. (next=ffff88003fa14d88).
[ 484.941152] Modules linked in:
[ 484.941152] Pid: 470, comm: kswapd0 Tainted: G W 2.6.30-tip #10989
[ 484.941152] Call Trace:
[ 484.941152] [<ffffffff81306150>] ? __list_add+0x70/0xa0
[ 484.941152] [<ffffffff810598dc>] warn_slowpath_common+0x8c/0xc0
[ 484.941152] [<ffffffff81059999>] warn_slowpath_fmt+0x69/0x70
[ 484.941152] [<ffffffff81086e3b>] ? __lock_acquired+0x18b/0x2b0
[ 484.941152] [<ffffffff811022f0>] ? page_check_address+0x110/0x1a0
[ 484.941152] [<ffffffff812ebcf2>] ? cpumask_any_but+0x42/0xb0
[ 484.941152] [<ffffffff8108c528>] ? __lock_release+0x38/0x90
[ 484.941152] [<ffffffff811024e1>] ? page_referenced_one+0x91/0x120
[ 484.941152] [<ffffffff81306150>] __list_add+0x70/0xa0
[ 484.941152] [<ffffffff8111dc63>] mem_cgroup_add_lru_list+0x63/0x70
[ 484.941152] [<ffffffff810eaee4>] move_active_pages_to_lru+0xf4/0x180
[ 484.941152] [<ffffffff810eb758>] ? shrink_active_list+0x1f8/0x2a0
[ 484.941152] [<ffffffff810eb758>] ? shrink_active_list+0x1f8/0x2a0
[ 484.941152] [<ffffffff810eb794>] shrink_active_list+0x234/0x2a0
[ 484.941152] [<ffffffff810ec3c3>] shrink_zone+0x173/0x1f0
[ 484.941152] [<ffffffff810ece0a>] balance_pgdat+0x4da/0x4e0
[ 484.941152] [<ffffffff810eb240>] ? isolate_pages_global+0x0/0x60
[ 484.941152] [<ffffffff810ed3b6>] kswapd+0x106/0x150
[ 484.941152] [<ffffffff810752f0>] ? autoremove_wake_function+0x0/0x40
[ 484.941152] [<ffffffff810ed2b0>] ? kswapd+0x0/0x150
[ 484.941152] [<ffffffff8107516e>] kthread+0x9e/0xb0
[ 484.941152] [<ffffffff8100d2ba>] child_rip+0xa/0x20
[ 484.941152] [<ffffffff8100cc40>] ? restore_args+0x0/0x30
[ 484.941152] [<ffffffff81075085>] ? kthreadd+0xb5/0x100
[ 484.941152] [<ffffffff810750d0>] ? kthread+0x0/0xb0
[ 484.941152] [<ffffffff8100d2b0>] ? child_rip+0x0/0x20
[ 484.941152] ---[ end trace 9f3122957c34141f ]---
[ 485.365631] ------------[ cut here ]------------
[ 485.368029] WARNING: at lib/list_debug.c:26 __list_add+0x70/0xa0()

has this been reported before? Is there a fix for it i missed?

Thanks,

Ingo

2009-06-22 11:27:31

by Kamezawa Hiroyuki

[permalink] [raw]
Subject: Re: [RFC][PATCH] cgroup: fix permanent wait in rmdir

Ingo Molnar wrote:
>
> FYI, there's a new cgroup related list corruption warning/crash that
> i've seen a lot of times in latest -tip tests:
>
> [ 478.555544] ------------[ cut here ]------------
> [ 478.556523] WARNING: at lib/list_debug.c:26 __list_add+0x70/0xa0()
> [ 478.556523] Hardware name:
> [ 478.556523] list_add corruption. next->prev should be prev
> (ffff88003e640448), but was ffff88003fa1a6e8. (next=ffff88003fa1a8a0).
> [ 478.556523] Modules linked in:
> [ 478.556523] Pid: 470, comm: kswapd0 Not tainted 2.6.30-tip #10989
> [ 478.556523] Call Trace:
> [ 478.556523] [<ffffffff81306150>] ? __list_add+0x70/0xa0
> [ 478.556523] [<ffffffff810598dc>] warn_slowpath_common+0x8c/0xc0
> [ 478.556523] [<ffffffff81059999>] warn_slowpath_fmt+0x69/0x70
> [ 478.556523] [<ffffffff81086e3b>] ? __lock_acquired+0x18b/0x2b0
> [ 478.556523] [<ffffffff811022f0>] ? page_check_address+0x110/0x1a0
> [ 478.556523] [<ffffffff812ebcf2>] ? cpumask_any_but+0x42/0xb0
> [ 478.556523] [<ffffffff8108c528>] ? __lock_release+0x38/0x90
> [ 478.556523] [<ffffffff811024e1>] ? page_referenced_one+0x91/0x120
> [ 478.556523] [<ffffffff81306150>] __list_add+0x70/0xa0
> [ 478.556523] [<ffffffff8111dc63>] mem_cgroup_add_lru_list+0x63/0x70
> [ 478.556523] [<ffffffff810eaee4>] move_active_pages_to_lru+0xf4/0x180
> [ 478.556523] [<ffffffff810eb758>] ? shrink_active_list+0x1f8/0x2a0
> [ 478.556523] [<ffffffff810eb758>] ? shrink_active_list+0x1f8/0x2a0
> [ 478.556523] [<ffffffff810eb794>] shrink_active_list+0x234/0x2a0
> [ 478.556523] [<ffffffff810ec3c3>] shrink_zone+0x173/0x1f0
> [ 478.556523] [<ffffffff810ece0a>] balance_pgdat+0x4da/0x4e0
> [ 478.556523] [<ffffffff810eb240>] ? isolate_pages_global+0x0/0x60
> [ 478.556523] [<ffffffff810ed3b6>] kswapd+0x106/0x150
> [ 478.556523] [<ffffffff810752f0>] ? autoremove_wake_function+0x0/0x40
> [ 478.556523] [<ffffffff810ed2b0>] ? kswapd+0x0/0x150
> [ 478.556523] [<ffffffff8107516e>] kthread+0x9e/0xb0
> [ 478.556523] [<ffffffff8100d2ba>] child_rip+0xa/0x20
> [ 478.556523] [<ffffffff8100cc40>] ? restore_args+0x0/0x30
> [ 478.556523] [<ffffffff81075085>] ? kthreadd+0xb5/0x100
> [ 478.556523] [<ffffffff810750d0>] ? kthread+0x0/0xb0
> [ 478.556523] [<ffffffff8100d2b0>] ? child_rip+0x0/0x20
> [ 478.556523] ---[ end trace 9f3122957c34141e ]---
> [ 484.923530] ------------[ cut here ]------------
> [ 484.924525] WARNING: at lib/list_debug.c:26 __list_add+0x70/0xa0()
> [ 484.924525] Hardware name:
> [ 484.924525] list_add corruption. next->prev should be prev
> (ffff88003e640448), but was ffff88003fa192e8. (next=ffff88003fa14d88).
> [ 484.941152] Modules linked in:
> [ 484.941152] Pid: 470, comm: kswapd0 Tainted: G W 2.6.30-tip
> #10989
> [ 484.941152] Call Trace:
> [ 484.941152] [<ffffffff81306150>] ? __list_add+0x70/0xa0
> [ 484.941152] [<ffffffff810598dc>] warn_slowpath_common+0x8c/0xc0
> [ 484.941152] [<ffffffff81059999>] warn_slowpath_fmt+0x69/0x70
> [ 484.941152] [<ffffffff81086e3b>] ? __lock_acquired+0x18b/0x2b0
> [ 484.941152] [<ffffffff811022f0>] ? page_check_address+0x110/0x1a0
> [ 484.941152] [<ffffffff812ebcf2>] ? cpumask_any_but+0x42/0xb0
> [ 484.941152] [<ffffffff8108c528>] ? __lock_release+0x38/0x90
> [ 484.941152] [<ffffffff811024e1>] ? page_referenced_one+0x91/0x120
> [ 484.941152] [<ffffffff81306150>] __list_add+0x70/0xa0
> [ 484.941152] [<ffffffff8111dc63>] mem_cgroup_add_lru_list+0x63/0x70
> [ 484.941152] [<ffffffff810eaee4>] move_active_pages_to_lru+0xf4/0x180
> [ 484.941152] [<ffffffff810eb758>] ? shrink_active_list+0x1f8/0x2a0
> [ 484.941152] [<ffffffff810eb758>] ? shrink_active_list+0x1f8/0x2a0
> [ 484.941152] [<ffffffff810eb794>] shrink_active_list+0x234/0x2a0
> [ 484.941152] [<ffffffff810ec3c3>] shrink_zone+0x173/0x1f0
> [ 484.941152] [<ffffffff810ece0a>] balance_pgdat+0x4da/0x4e0
> [ 484.941152] [<ffffffff810eb240>] ? isolate_pages_global+0x0/0x60
> [ 484.941152] [<ffffffff810ed3b6>] kswapd+0x106/0x150
> [ 484.941152] [<ffffffff810752f0>] ? autoremove_wake_function+0x0/0x40
> [ 484.941152] [<ffffffff810ed2b0>] ? kswapd+0x0/0x150
> [ 484.941152] [<ffffffff8107516e>] kthread+0x9e/0xb0
> [ 484.941152] [<ffffffff8100d2ba>] child_rip+0xa/0x20
> [ 484.941152] [<ffffffff8100cc40>] ? restore_args+0x0/0x30
> [ 484.941152] [<ffffffff81075085>] ? kthreadd+0xb5/0x100
> [ 484.941152] [<ffffffff810750d0>] ? kthread+0x0/0xb0
> [ 484.941152] [<ffffffff8100d2b0>] ? child_rip+0x0/0x20
> [ 484.941152] ---[ end trace 9f3122957c34141f ]---
> [ 485.365631] ------------[ cut here ]------------
> [ 485.368029] WARNING: at lib/list_debug.c:26 __list_add+0x70/0xa0()
>
> has this been reported before? Is there a fix for it i missed?
>
Hmm, looks new one.

Maybe related to my latest post which modifies __isolate_lru_page()
memcg: fix lru rotation in isolate_pages

I'll dig tomorrow, sorry.

Thanks,
-Kame

2009-06-22 12:17:52

by Balbir Singh

[permalink] [raw]
Subject: Re: [RFC][PATCH] cgroup: fix permanent wait in rmdir

* KAMEZAWA Hiroyuki <[email protected]> [2009-06-22 20:27:15]:

> Ingo Molnar wrote:
> >
> > FYI, there's a new cgroup related list corruption warning/crash that
> > i've seen a lot of times in latest -tip tests:
> >
> > [ 478.555544] ------------[ cut here ]------------
> > [ 478.556523] WARNING: at lib/list_debug.c:26 __list_add+0x70/0xa0()
> > [ 478.556523] Hardware name:
> > [ 478.556523] list_add corruption. next->prev should be prev
> > (ffff88003e640448), but was ffff88003fa1a6e8. (next=ffff88003fa1a8a0).
> > [ 478.556523] Modules linked in:
> > [ 478.556523] Pid: 470, comm: kswapd0 Not tainted 2.6.30-tip #10989
> > [ 478.556523] Call Trace:
> > [ 478.556523] [<ffffffff81306150>] ? __list_add+0x70/0xa0
> > [ 478.556523] [<ffffffff810598dc>] warn_slowpath_common+0x8c/0xc0
> > [ 478.556523] [<ffffffff81059999>] warn_slowpath_fmt+0x69/0x70
> > [ 478.556523] [<ffffffff81086e3b>] ? __lock_acquired+0x18b/0x2b0
> > [ 478.556523] [<ffffffff811022f0>] ? page_check_address+0x110/0x1a0
> > [ 478.556523] [<ffffffff812ebcf2>] ? cpumask_any_but+0x42/0xb0
> > [ 478.556523] [<ffffffff8108c528>] ? __lock_release+0x38/0x90
> > [ 478.556523] [<ffffffff811024e1>] ? page_referenced_one+0x91/0x120
> > [ 478.556523] [<ffffffff81306150>] __list_add+0x70/0xa0
> > [ 478.556523] [<ffffffff8111dc63>] mem_cgroup_add_lru_list+0x63/0x70
> > [ 478.556523] [<ffffffff810eaee4>] move_active_pages_to_lru+0xf4/0x180
> > [ 478.556523] [<ffffffff810eb758>] ? shrink_active_list+0x1f8/0x2a0
> > [ 478.556523] [<ffffffff810eb758>] ? shrink_active_list+0x1f8/0x2a0
> > [ 478.556523] [<ffffffff810eb794>] shrink_active_list+0x234/0x2a0
> > [ 478.556523] [<ffffffff810ec3c3>] shrink_zone+0x173/0x1f0
> > [ 478.556523] [<ffffffff810ece0a>] balance_pgdat+0x4da/0x4e0
> > [ 478.556523] [<ffffffff810eb240>] ? isolate_pages_global+0x0/0x60
> > [ 478.556523] [<ffffffff810ed3b6>] kswapd+0x106/0x150
> > [ 478.556523] [<ffffffff810752f0>] ? autoremove_wake_function+0x0/0x40
> > [ 478.556523] [<ffffffff810ed2b0>] ? kswapd+0x0/0x150
> > [ 478.556523] [<ffffffff8107516e>] kthread+0x9e/0xb0
> > [ 478.556523] [<ffffffff8100d2ba>] child_rip+0xa/0x20
> > [ 478.556523] [<ffffffff8100cc40>] ? restore_args+0x0/0x30
> > [ 478.556523] [<ffffffff81075085>] ? kthreadd+0xb5/0x100
> > [ 478.556523] [<ffffffff810750d0>] ? kthread+0x0/0xb0
> > [ 478.556523] [<ffffffff8100d2b0>] ? child_rip+0x0/0x20
> > [ 478.556523] ---[ end trace 9f3122957c34141e ]---
> > [ 484.923530] ------------[ cut here ]------------
> > [ 484.924525] WARNING: at lib/list_debug.c:26 __list_add+0x70/0xa0()
> > [ 484.924525] Hardware name:
> > [ 484.924525] list_add corruption. next->prev should be prev
> > (ffff88003e640448), but was ffff88003fa192e8. (next=ffff88003fa14d88).
> > [ 484.941152] Modules linked in:
> > [ 484.941152] Pid: 470, comm: kswapd0 Tainted: G W 2.6.30-tip
> > #10989
> > [ 484.941152] Call Trace:
> > [ 484.941152] [<ffffffff81306150>] ? __list_add+0x70/0xa0
> > [ 484.941152] [<ffffffff810598dc>] warn_slowpath_common+0x8c/0xc0
> > [ 484.941152] [<ffffffff81059999>] warn_slowpath_fmt+0x69/0x70
> > [ 484.941152] [<ffffffff81086e3b>] ? __lock_acquired+0x18b/0x2b0
> > [ 484.941152] [<ffffffff811022f0>] ? page_check_address+0x110/0x1a0
> > [ 484.941152] [<ffffffff812ebcf2>] ? cpumask_any_but+0x42/0xb0
> > [ 484.941152] [<ffffffff8108c528>] ? __lock_release+0x38/0x90
> > [ 484.941152] [<ffffffff811024e1>] ? page_referenced_one+0x91/0x120
> > [ 484.941152] [<ffffffff81306150>] __list_add+0x70/0xa0
> > [ 484.941152] [<ffffffff8111dc63>] mem_cgroup_add_lru_list+0x63/0x70
> > [ 484.941152] [<ffffffff810eaee4>] move_active_pages_to_lru+0xf4/0x180
> > [ 484.941152] [<ffffffff810eb758>] ? shrink_active_list+0x1f8/0x2a0
> > [ 484.941152] [<ffffffff810eb758>] ? shrink_active_list+0x1f8/0x2a0
> > [ 484.941152] [<ffffffff810eb794>] shrink_active_list+0x234/0x2a0
> > [ 484.941152] [<ffffffff810ec3c3>] shrink_zone+0x173/0x1f0
> > [ 484.941152] [<ffffffff810ece0a>] balance_pgdat+0x4da/0x4e0
> > [ 484.941152] [<ffffffff810eb240>] ? isolate_pages_global+0x0/0x60
> > [ 484.941152] [<ffffffff810ed3b6>] kswapd+0x106/0x150
> > [ 484.941152] [<ffffffff810752f0>] ? autoremove_wake_function+0x0/0x40
> > [ 484.941152] [<ffffffff810ed2b0>] ? kswapd+0x0/0x150
> > [ 484.941152] [<ffffffff8107516e>] kthread+0x9e/0xb0
> > [ 484.941152] [<ffffffff8100d2ba>] child_rip+0xa/0x20
> > [ 484.941152] [<ffffffff8100cc40>] ? restore_args+0x0/0x30
> > [ 484.941152] [<ffffffff81075085>] ? kthreadd+0xb5/0x100
> > [ 484.941152] [<ffffffff810750d0>] ? kthread+0x0/0xb0
> > [ 484.941152] [<ffffffff8100d2b0>] ? child_rip+0x0/0x20
> > [ 484.941152] ---[ end trace 9f3122957c34141f ]---
> > [ 485.365631] ------------[ cut here ]------------
> > [ 485.368029] WARNING: at lib/list_debug.c:26 __list_add+0x70/0xa0()
> >
> > has this been reported before? Is there a fix for it i missed?
> >
> Hmm, looks new one.
>
> Maybe related to my latest post which modifies __isolate_lru_page()
> memcg: fix lru rotation in isolate_pages
>
> I'll dig tomorrow, sorry.
>

Hi, Ingo

Thanks for the bug report, looks new to me as well. Could you share
how you see this? Is it just regular use of a machine, some workload
running? Looks like it is happening in the reclaim path,
interesting...

--
Balbir

2009-06-22 12:20:27

by Kamezawa Hiroyuki

[permalink] [raw]
Subject: Re: [RFC][PATCH] cgroup: fix permanent wait in rmdir

KAMEZAWA Hiroyuki wrote:
> Ingo Molnar wrote:
>>
>> FYI, there's a new cgroup related list corruption warning/crash that
>> i've seen a lot of times in latest -tip tests:
>>
>> [ 478.555544] ------------[ cut here ]------------
>> [ 478.556523] WARNING: at lib/list_debug.c:26 __list_add+0x70/0xa0()
>> [ 478.556523] Hardware name:
>> [ 478.556523] list_add corruption. next->prev should be prev
>> (ffff88003e640448), but was ffff88003fa1a6e8. (next=ffff88003fa1a8a0).
>> [ 478.556523] Modules linked in:
>> [ 478.556523] Pid: 470, comm: kswapd0 Not tainted 2.6.30-tip #10989
>> [ 478.556523] Call Trace:
>> [ 478.556523] [<ffffffff81306150>] ? __list_add+0x70/0xa0
>> [ 478.556523] [<ffffffff810598dc>] warn_slowpath_common+0x8c/0xc0
>> [ 478.556523] [<ffffffff81059999>] warn_slowpath_fmt+0x69/0x70
>> [ 478.556523] [<ffffffff81086e3b>] ? __lock_acquired+0x18b/0x2b0
>> [ 478.556523] [<ffffffff811022f0>] ? page_check_address+0x110/0x1a0
>> [ 478.556523] [<ffffffff812ebcf2>] ? cpumask_any_but+0x42/0xb0
>> [ 478.556523] [<ffffffff8108c528>] ? __lock_release+0x38/0x90
>> [ 478.556523] [<ffffffff811024e1>] ? page_referenced_one+0x91/0x120
>> [ 478.556523] [<ffffffff81306150>] __list_add+0x70/0xa0
>> [ 478.556523] [<ffffffff8111dc63>] mem_cgroup_add_lru_list+0x63/0x70
>> [ 478.556523] [<ffffffff810eaee4>] move_active_pages_to_lru+0xf4/0x180
>> [ 478.556523] [<ffffffff810eb758>] ? shrink_active_list+0x1f8/0x2a0
>> [ 478.556523] [<ffffffff810eb758>] ? shrink_active_list+0x1f8/0x2a0
>> [ 478.556523] [<ffffffff810eb794>] shrink_active_list+0x234/0x2a0
>> [ 478.556523] [<ffffffff810ec3c3>] shrink_zone+0x173/0x1f0
>> [ 478.556523] [<ffffffff810ece0a>] balance_pgdat+0x4da/0x4e0
>> [ 478.556523] [<ffffffff810eb240>] ? isolate_pages_global+0x0/0x60
>> [ 478.556523] [<ffffffff810ed3b6>] kswapd+0x106/0x150
>> [ 478.556523] [<ffffffff810752f0>] ? autoremove_wake_function+0x0/0x40
>> [ 478.556523] [<ffffffff810ed2b0>] ? kswapd+0x0/0x150
>> [ 478.556523] [<ffffffff8107516e>] kthread+0x9e/0xb0
>> [ 478.556523] [<ffffffff8100d2ba>] child_rip+0xa/0x20
>> [ 478.556523] [<ffffffff8100cc40>] ? restore_args+0x0/0x30
>> [ 478.556523] [<ffffffff81075085>] ? kthreadd+0xb5/0x100
>> [ 478.556523] [<ffffffff810750d0>] ? kthread+0x0/0xb0
>> [ 478.556523] [<ffffffff8100d2b0>] ? child_rip+0x0/0x20
>> [ 478.556523] ---[ end trace 9f3122957c34141e ]---
>> [ 484.923530] ------------[ cut here ]------------
>> [ 484.924525] WARNING: at lib/list_debug.c:26 __list_add+0x70/0xa0()
>> [ 484.924525] Hardware name:
>> [ 484.924525] list_add corruption. next->prev should be prev
>> (ffff88003e640448), but was ffff88003fa192e8. (next=ffff88003fa14d88).
>> [ 484.941152] Modules linked in:
>> [ 484.941152] Pid: 470, comm: kswapd0 Tainted: G W 2.6.30-tip
>> #10989
>> [ 484.941152] Call Trace:
>> [ 484.941152] [<ffffffff81306150>] ? __list_add+0x70/0xa0
>> [ 484.941152] [<ffffffff810598dc>] warn_slowpath_common+0x8c/0xc0
>> [ 484.941152] [<ffffffff81059999>] warn_slowpath_fmt+0x69/0x70
>> [ 484.941152] [<ffffffff81086e3b>] ? __lock_acquired+0x18b/0x2b0
>> [ 484.941152] [<ffffffff811022f0>] ? page_check_address+0x110/0x1a0
>> [ 484.941152] [<ffffffff812ebcf2>] ? cpumask_any_but+0x42/0xb0
>> [ 484.941152] [<ffffffff8108c528>] ? __lock_release+0x38/0x90
>> [ 484.941152] [<ffffffff811024e1>] ? page_referenced_one+0x91/0x120
>> [ 484.941152] [<ffffffff81306150>] __list_add+0x70/0xa0
>> [ 484.941152] [<ffffffff8111dc63>] mem_cgroup_add_lru_list+0x63/0x70
>> [ 484.941152] [<ffffffff810eaee4>] move_active_pages_to_lru+0xf4/0x180
>> [ 484.941152] [<ffffffff810eb758>] ? shrink_active_list+0x1f8/0x2a0
>> [ 484.941152] [<ffffffff810eb758>] ? shrink_active_list+0x1f8/0x2a0
>> [ 484.941152] [<ffffffff810eb794>] shrink_active_list+0x234/0x2a0
>> [ 484.941152] [<ffffffff810ec3c3>] shrink_zone+0x173/0x1f0
>> [ 484.941152] [<ffffffff810ece0a>] balance_pgdat+0x4da/0x4e0
>> [ 484.941152] [<ffffffff810eb240>] ? isolate_pages_global+0x0/0x60
>> [ 484.941152] [<ffffffff810ed3b6>] kswapd+0x106/0x150
>> [ 484.941152] [<ffffffff810752f0>] ? autoremove_wake_function+0x0/0x40
>> [ 484.941152] [<ffffffff810ed2b0>] ? kswapd+0x0/0x150
>> [ 484.941152] [<ffffffff8107516e>] kthread+0x9e/0xb0
>> [ 484.941152] [<ffffffff8100d2ba>] child_rip+0xa/0x20
>> [ 484.941152] [<ffffffff8100cc40>] ? restore_args+0x0/0x30
>> [ 484.941152] [<ffffffff81075085>] ? kthreadd+0xb5/0x100
>> [ 484.941152] [<ffffffff810750d0>] ? kthread+0x0/0xb0
>> [ 484.941152] [<ffffffff8100d2b0>] ? child_rip+0x0/0x20
>> [ 484.941152] ---[ end trace 9f3122957c34141f ]---
>> [ 485.365631] ------------[ cut here ]------------
>> [ 485.368029] WARNING: at lib/list_debug.c:26 __list_add+0x70/0xa0()
>>
>> has this been reported before? Is there a fix for it i missed?
>>
> Hmm, looks new one.
>
> Maybe related to my latest post which modifies __isolate_lru_page()
> memcg: fix lru rotation in isolate_pages
>
> I'll dig tomorrow, sorry.
>
Ah, while I test 2.6.30-git18 (includes above patch), I don't see
above stack dump (with LIST_DEBUG=y) under quick memory pressure test...

Thanks,
-Kame



2009-06-22 12:26:30

by Ingo Molnar

[permalink] [raw]
Subject: Re: [RFC][PATCH] cgroup: fix permanent wait in rmdir


* KAMEZAWA Hiroyuki <[email protected]> wrote:

> KAMEZAWA Hiroyuki wrote:
> > Ingo Molnar wrote:
> >>
> >> FYI, there's a new cgroup related list corruption warning/crash that
> >> i've seen a lot of times in latest -tip tests:
> >>
> >> [ 478.555544] ------------[ cut here ]------------
> >> [ 478.556523] WARNING: at lib/list_debug.c:26 __list_add+0x70/0xa0()
> >> [ 478.556523] Hardware name:
> >> [ 478.556523] list_add corruption. next->prev should be prev
> >> (ffff88003e640448), but was ffff88003fa1a6e8. (next=ffff88003fa1a8a0).
> >> [ 478.556523] Modules linked in:
> >> [ 478.556523] Pid: 470, comm: kswapd0 Not tainted 2.6.30-tip #10989
> >> [ 478.556523] Call Trace:
> >> [ 478.556523] [<ffffffff81306150>] ? __list_add+0x70/0xa0
> >> [ 478.556523] [<ffffffff810598dc>] warn_slowpath_common+0x8c/0xc0
> >> [ 478.556523] [<ffffffff81059999>] warn_slowpath_fmt+0x69/0x70
> >> [ 478.556523] [<ffffffff81086e3b>] ? __lock_acquired+0x18b/0x2b0
> >> [ 478.556523] [<ffffffff811022f0>] ? page_check_address+0x110/0x1a0
> >> [ 478.556523] [<ffffffff812ebcf2>] ? cpumask_any_but+0x42/0xb0
> >> [ 478.556523] [<ffffffff8108c528>] ? __lock_release+0x38/0x90
> >> [ 478.556523] [<ffffffff811024e1>] ? page_referenced_one+0x91/0x120
> >> [ 478.556523] [<ffffffff81306150>] __list_add+0x70/0xa0
> >> [ 478.556523] [<ffffffff8111dc63>] mem_cgroup_add_lru_list+0x63/0x70
> >> [ 478.556523] [<ffffffff810eaee4>] move_active_pages_to_lru+0xf4/0x180
> >> [ 478.556523] [<ffffffff810eb758>] ? shrink_active_list+0x1f8/0x2a0
> >> [ 478.556523] [<ffffffff810eb758>] ? shrink_active_list+0x1f8/0x2a0
> >> [ 478.556523] [<ffffffff810eb794>] shrink_active_list+0x234/0x2a0
> >> [ 478.556523] [<ffffffff810ec3c3>] shrink_zone+0x173/0x1f0
> >> [ 478.556523] [<ffffffff810ece0a>] balance_pgdat+0x4da/0x4e0
> >> [ 478.556523] [<ffffffff810eb240>] ? isolate_pages_global+0x0/0x60
> >> [ 478.556523] [<ffffffff810ed3b6>] kswapd+0x106/0x150
> >> [ 478.556523] [<ffffffff810752f0>] ? autoremove_wake_function+0x0/0x40
> >> [ 478.556523] [<ffffffff810ed2b0>] ? kswapd+0x0/0x150
> >> [ 478.556523] [<ffffffff8107516e>] kthread+0x9e/0xb0
> >> [ 478.556523] [<ffffffff8100d2ba>] child_rip+0xa/0x20
> >> [ 478.556523] [<ffffffff8100cc40>] ? restore_args+0x0/0x30
> >> [ 478.556523] [<ffffffff81075085>] ? kthreadd+0xb5/0x100
> >> [ 478.556523] [<ffffffff810750d0>] ? kthread+0x0/0xb0
> >> [ 478.556523] [<ffffffff8100d2b0>] ? child_rip+0x0/0x20
> >> [ 478.556523] ---[ end trace 9f3122957c34141e ]---
> >> [ 484.923530] ------------[ cut here ]------------
> >> [ 484.924525] WARNING: at lib/list_debug.c:26 __list_add+0x70/0xa0()
> >> [ 484.924525] Hardware name:
> >> [ 484.924525] list_add corruption. next->prev should be prev
> >> (ffff88003e640448), but was ffff88003fa192e8. (next=ffff88003fa14d88).
> >> [ 484.941152] Modules linked in:
> >> [ 484.941152] Pid: 470, comm: kswapd0 Tainted: G W 2.6.30-tip
> >> #10989
> >> [ 484.941152] Call Trace:
> >> [ 484.941152] [<ffffffff81306150>] ? __list_add+0x70/0xa0
> >> [ 484.941152] [<ffffffff810598dc>] warn_slowpath_common+0x8c/0xc0
> >> [ 484.941152] [<ffffffff81059999>] warn_slowpath_fmt+0x69/0x70
> >> [ 484.941152] [<ffffffff81086e3b>] ? __lock_acquired+0x18b/0x2b0
> >> [ 484.941152] [<ffffffff811022f0>] ? page_check_address+0x110/0x1a0
> >> [ 484.941152] [<ffffffff812ebcf2>] ? cpumask_any_but+0x42/0xb0
> >> [ 484.941152] [<ffffffff8108c528>] ? __lock_release+0x38/0x90
> >> [ 484.941152] [<ffffffff811024e1>] ? page_referenced_one+0x91/0x120
> >> [ 484.941152] [<ffffffff81306150>] __list_add+0x70/0xa0
> >> [ 484.941152] [<ffffffff8111dc63>] mem_cgroup_add_lru_list+0x63/0x70
> >> [ 484.941152] [<ffffffff810eaee4>] move_active_pages_to_lru+0xf4/0x180
> >> [ 484.941152] [<ffffffff810eb758>] ? shrink_active_list+0x1f8/0x2a0
> >> [ 484.941152] [<ffffffff810eb758>] ? shrink_active_list+0x1f8/0x2a0
> >> [ 484.941152] [<ffffffff810eb794>] shrink_active_list+0x234/0x2a0
> >> [ 484.941152] [<ffffffff810ec3c3>] shrink_zone+0x173/0x1f0
> >> [ 484.941152] [<ffffffff810ece0a>] balance_pgdat+0x4da/0x4e0
> >> [ 484.941152] [<ffffffff810eb240>] ? isolate_pages_global+0x0/0x60
> >> [ 484.941152] [<ffffffff810ed3b6>] kswapd+0x106/0x150
> >> [ 484.941152] [<ffffffff810752f0>] ? autoremove_wake_function+0x0/0x40
> >> [ 484.941152] [<ffffffff810ed2b0>] ? kswapd+0x0/0x150
> >> [ 484.941152] [<ffffffff8107516e>] kthread+0x9e/0xb0
> >> [ 484.941152] [<ffffffff8100d2ba>] child_rip+0xa/0x20
> >> [ 484.941152] [<ffffffff8100cc40>] ? restore_args+0x0/0x30
> >> [ 484.941152] [<ffffffff81075085>] ? kthreadd+0xb5/0x100
> >> [ 484.941152] [<ffffffff810750d0>] ? kthread+0x0/0xb0
> >> [ 484.941152] [<ffffffff8100d2b0>] ? child_rip+0x0/0x20
> >> [ 484.941152] ---[ end trace 9f3122957c34141f ]---
> >> [ 485.365631] ------------[ cut here ]------------
> >> [ 485.368029] WARNING: at lib/list_debug.c:26 __list_add+0x70/0xa0()
> >>
> >> has this been reported before? Is there a fix for it i missed?
> >>
> > Hmm, looks new one.
> >
> > Maybe related to my latest post which modifies __isolate_lru_page()
> > memcg: fix lru rotation in isolate_pages
> >
> > I'll dig tomorrow, sorry.
> >
> Ah, while I test 2.6.30-git18 (includes above patch), I don't see
> above stack dump (with LIST_DEBUG=y) under quick memory pressure
> test...

Note, it still occurs even with latest -git (f234012).

Ingo

2009-06-22 12:32:35

by Kamezawa Hiroyuki

[permalink] [raw]
Subject: Re: [RFC][PATCH] cgroup: fix permanent wait in rmdir

Ingo Molnar wrote:
>> Ah, while I test 2.6.30-git18 (includes above patch), I don't see
>> above stack dump (with LIST_DEBUG=y) under quick memory pressure
>> test...
>
> Note, it still occurs even with latest -git (f234012).
>
Could you try this ? (Sorry, I can't send a patch right now)
== vmscan.c
865 static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
866 struct list_head *src, struct list_head *dst,
867 unsigned long *scanned, int order, int mode, int file)
868 {
869 unsigned long nr_taken = 0;
870 unsigned long scan;
871
<snip>
930 /* Check that we have not crossed a zone
boundary. */
931 if (unlikely(page_zone_id(cursor_page) !=
zone_id))
932 continue;
933 if (__isolate_lru_page(cursor_page, mode,
file) == 0) {
934 list_move(&cursor_page->lru, dst);
935 mem_cgroup_del_lru(page);
936 nr_taken++;
937 scan++;
938 }


change line 935
from
mem_cgroup_del_lru(page);
to
mem_cgroup_del_lru(cursor_page);


Thanks,
-Kame



2009-06-22 23:59:51

by Kamezawa Hiroyuki

[permalink] [raw]
Subject: [BUGFIX][PATCH] fix bad page removal from LRU (Was Re: [RFC][PATCH] cgroup: fix permanent wait in rmdir

On Mon, 22 Jun 2009 12:52:31 +0200
Ingo Molnar <[email protected]> wrote:

>
> FYI, there's a new cgroup related list corruption warning/crash that
> i've seen a lot of times in latest -tip tests:
>
> [ 478.555544] ------------[ cut here ]------------
> [ 478.556523] WARNING: at lib/list_debug.c:26 __list_add+0x70/0xa0()
> [ 478.556523] Hardware name:
> [ 478.556523] list_add corruption. next->prev should be prev (ffff88003e640448), but was ffff88003fa1a6e8. (next=ffff88003fa1a8a0).
> [ 478.556523] Modules linked in:
> [ 478.556523] Pid: 470, comm: kswapd0 Not tainted 2.6.30-tip #10989
> [ 478.556523] Call Trace:
> [ 478.556523] [<ffffffff81306150>] ? __list_add+0x70/0xa0
> [ 478.556523] [<ffffffff810598dc>] warn_slowpath_common+0x8c/0xc0
> [ 478.556523] [<ffffffff81059999>] warn_slowpath_fmt+0x69/0x70
> [ 478.556523] [<ffffffff81086e3b>] ? __lock_acquired+0x18b/0x2b0
> [ 478.556523] [<ffffffff811022f0>] ? page_check_address+0x110/0x1a0
> [ 478.556523] [<ffffffff812ebcf2>] ? cpumask_any_but+0x42/0xb0
> [ 478.556523] [<ffffffff8108c528>] ? __lock_release+0x38/0x90
> [ 478.556523] [<ffffffff811024e1>] ? page_referenced_one+0x91/0x120
> [ 478.556523] [<ffffffff81306150>] __list_add+0x70/0xa0
> [ 478.556523] [<ffffffff8111dc63>] mem_cgroup_add_lru_list+0x63/0x70
> [ 478.556523] [<ffffffff810eaee4>] move_active_pages_to_lru+0xf4/0x180
> [ 478.556523] [<ffffffff810eb758>] ? shrink_active_list+0x1f8/0x2a0
> [ 478.556523] [<ffffffff810eb758>] ? shrink_active_list+0x1f8/0x2a0
> [ 478.556523] [<ffffffff810eb794>] shrink_active_list+0x234/0x2a0
> [ 478.556523] [<ffffffff810ec3c3>] shrink_zone+0x173/0x1f0
> [ 478.556523] [<ffffffff810ece0a>] balance_pgdat+0x4da/0x4e0
> [ 478.556523] [<ffffffff810eb240>] ? isolate_pages_global+0x0/0x60
> [ 478.556523] [<ffffffff810ed3b6>] kswapd+0x106/0x150
> [ 478.556523] [<ffffffff810752f0>] ? autoremove_wake_function+0x0/0x40
> [ 478.556523] [<ffffffff810ed2b0>] ? kswapd+0x0/0x150
> [ 478.556523] [<ffffffff8107516e>] kthread+0x9e/0xb0
> [ 478.556523] [<ffffffff8100d2ba>] child_rip+0xa/0x20
> [ 478.556523] [<ffffffff8100cc40>] ? restore_args+0x0/0x30
> [ 478.556523] [<ffffffff81075085>] ? kthreadd+0xb5/0x100
> [ 478.556523] [<ffffffff810750d0>] ? kthread+0x0/0xb0
> [ 478.556523] [<ffffffff8100d2b0>] ? child_rip+0x0/0x20
> [ 478.556523] ---[ end trace 9f3122957c34141e ]---
> [ 484.923530] ------------[ cut here ]------------
> [ 484.924525] WARNING: at lib/list_debug.c:26 __list_add+0x70/0xa0()
> [ 484.924525] Hardware name:
> [ 484.924525] list_add corruption. next->prev should be prev (ffff88003e640448), but was ffff88003fa192e8. (next=ffff88003fa14d88).
> [ 484.941152] Modules linked in:
> [ 484.941152] Pid: 470, comm: kswapd0 Tainted: G W 2.6.30-tip #10989
> [ 484.941152] Call Trace:
> [ 484.941152] [<ffffffff81306150>] ? __list_add+0x70/0xa0
> [ 484.941152] [<ffffffff810598dc>] warn_slowpath_common+0x8c/0xc0
> [ 484.941152] [<ffffffff81059999>] warn_slowpath_fmt+0x69/0x70
> [ 484.941152] [<ffffffff81086e3b>] ? __lock_acquired+0x18b/0x2b0
> [ 484.941152] [<ffffffff811022f0>] ? page_check_address+0x110/0x1a0
> [ 484.941152] [<ffffffff812ebcf2>] ? cpumask_any_but+0x42/0xb0
> [ 484.941152] [<ffffffff8108c528>] ? __lock_release+0x38/0x90
> [ 484.941152] [<ffffffff811024e1>] ? page_referenced_one+0x91/0x120
> [ 484.941152] [<ffffffff81306150>] __list_add+0x70/0xa0
> [ 484.941152] [<ffffffff8111dc63>] mem_cgroup_add_lru_list+0x63/0x70
> [ 484.941152] [<ffffffff810eaee4>] move_active_pages_to_lru+0xf4/0x180
> [ 484.941152] [<ffffffff810eb758>] ? shrink_active_list+0x1f8/0x2a0
> [ 484.941152] [<ffffffff810eb758>] ? shrink_active_list+0x1f8/0x2a0
> [ 484.941152] [<ffffffff810eb794>] shrink_active_list+0x234/0x2a0
> [ 484.941152] [<ffffffff810ec3c3>] shrink_zone+0x173/0x1f0
> [ 484.941152] [<ffffffff810ece0a>] balance_pgdat+0x4da/0x4e0
> [ 484.941152] [<ffffffff810eb240>] ? isolate_pages_global+0x0/0x60
> [ 484.941152] [<ffffffff810ed3b6>] kswapd+0x106/0x150
> [ 484.941152] [<ffffffff810752f0>] ? autoremove_wake_function+0x0/0x40
> [ 484.941152] [<ffffffff810ed2b0>] ? kswapd+0x0/0x150
> [ 484.941152] [<ffffffff8107516e>] kthread+0x9e/0xb0
> [ 484.941152] [<ffffffff8100d2ba>] child_rip+0xa/0x20
> [ 484.941152] [<ffffffff8100cc40>] ? restore_args+0x0/0x30
> [ 484.941152] [<ffffffff81075085>] ? kthreadd+0xb5/0x100
> [ 484.941152] [<ffffffff810750d0>] ? kthread+0x0/0xb0
> [ 484.941152] [<ffffffff8100d2b0>] ? child_rip+0x0/0x20
> [ 484.941152] ---[ end trace 9f3122957c34141f ]---
> [ 485.365631] ------------[ cut here ]------------
> [ 485.368029] WARNING: at lib/list_debug.c:26 __list_add+0x70/0xa0()
>
> has this been reported before? Is there a fix for it i missed?
>

I think this is a fix for the problem. Sorry for regression.
fix for "memcg: fix lru rotation in isolate_pages" patch in 2.6.30-git18.

==
From: KAMEZAWA Hiroyuki <[email protected]>

A page isolated is "cursor_page" not "page".
This causes list corruption finally.

Signed-off-by: KAMEZAWA Hiroyuki <[email protected]>
---
mm/vmscan.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

Index: linux-2.6.30-git18/mm/vmscan.c
===================================================================
--- linux-2.6.30-git18.orig/mm/vmscan.c
+++ linux-2.6.30-git18/mm/vmscan.c
@@ -932,7 +932,7 @@ static unsigned long isolate_lru_pages(u
continue;
if (__isolate_lru_page(cursor_page, mode, file) == 0) {
list_move(&cursor_page->lru, dst);
- mem_cgroup_del_lru(page);
+ mem_cgroup_del_lru(cursor_page);
nr_taken++;
scan++;
}



2009-06-23 00:24:11

by Kamezawa Hiroyuki

[permalink] [raw]
Subject: Re: [RFC][PATCH] cgroup: fix permanent wait in rmdir

On Mon, 22 Jun 2009 18:37:07 +0900
KAMEZAWA Hiroyuki <[email protected]> wrote:

> previous discussion was this => http://marc.info/?t=124478543600001&r=1&w=2
>
> I think this is a minimum fix (in code size and behavior) and because
> we can take a BIG LOCK, this kind of check is necessary, anyway.
> Any comments are welcome.

I'll split this into 2 patches...and I found I should check page-migration, too.
Then, modifing swap account logic is not help, at last.

Thanks,
-Kame

> ==
> From: KAMEZAWA Hiroyuki <[email protected]>
>
> Now, cgroup has a logic to wait until ready-to-rmdir for avoiding
> frequent -EBUSY at rmdir.
> (See Commit ec64f51545fffbc4cb968f0cea56341a4b07e85a
> cgroup: fix frequent -EBUSY at rmdir.
>
> Nishimura-san reported bad case for waiting and This is a fix to
> make it reliable. A thread waiting for thread cannot be waken up
> when a refcnt gotten by css_tryget() isn't put immediately.
> (Original code assumed css_put() will be called soon.)
>
> memcg has this case and this is a fix for the problem. This adds
> retry_rmdir() callback to subsys and check we can sleep or not
> before sleeping and export CGRP_WAIT_ON_RMDIR flag to subsys.
>
> Note: another solution will be adding "rmdir state" to subsys.
> But it will be much complicated than this do-enough-check solution.
>
> Reported-by: Daisuke Nishimura <[email protected]>
> Signed-off-by: KAMEZAWA Hiroyuki <[email protected]>
> ---
> Documentation/cgroups/cgroups.txt | 11 +++++++++++
> include/linux/cgroup.h | 9 +++++++++
> kernel/cgroup.c | 25 +++++++++++++++++++++----
> mm/memcontrol.c | 29 ++++++++++++++++++++++++++---
> 4 files changed, 67 insertions(+), 7 deletions(-)
>
> Index: linux-2.6.30-git18/include/linux/cgroup.h
> ===================================================================
> --- linux-2.6.30-git18.orig/include/linux/cgroup.h
> +++ linux-2.6.30-git18/include/linux/cgroup.h
> @@ -192,6 +192,14 @@ struct cgroup {
> struct rcu_head rcu_head;
> };
>
> +void __cgroup_wakeup_rmdir_waiters(void);
> +static inline void cgroup_wakeup_rmdir_waiters(const struct cgroup *cgrp)
> +{
> + if (unlikely(test_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags)))
> + __cgroup_wakeup_rmdir_waiters();
> +}
> +
> +
> /*
> * A css_set is a structure holding pointers to a set of
> * cgroup_subsys_state objects. This saves space in the task struct
> @@ -374,6 +382,7 @@ struct cgroup_subsys {
> struct cgroup_subsys_state *(*create)(struct cgroup_subsys *ss,
> struct cgroup *cgrp);
> int (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
> + int (*retry_rmdir)(struct cgroup_subsys *ss, struct cgroup *cgrp);
> void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
> int (*can_attach)(struct cgroup_subsys *ss,
> struct cgroup *cgrp, struct task_struct *tsk);
> Index: linux-2.6.30-git18/kernel/cgroup.c
> ===================================================================
> --- linux-2.6.30-git18.orig/kernel/cgroup.c
> +++ linux-2.6.30-git18/kernel/cgroup.c
> @@ -636,6 +636,23 @@ static int cgroup_call_pre_destroy(struc
> }
> return ret;
> }
> +/*
> + * Call subsys's retry_rmdir() handler. If this returns non-Zero, we retry
> + * rmdir immediately and call pre_destroy again.
> + */
> +static int cgroup_check_retry_rmdir(struct cgroup *cgrp)
> +{
> + struct cgroup_subsys *ss;
> + int ret = 0;
> +
> + for_each_subsys(cgrp->root, ss)
> + if (ss->pre_destroy) {
> + ret = ss->retry_rmdir(ss, cgrp);
> + if (ret)
> + break;
> + }
> + return ret;
> +}
>
> static void free_cgroup_rcu(struct rcu_head *obj)
> {
> @@ -738,10 +755,9 @@ static void cgroup_d_remove_dir(struct d
> */
> DECLARE_WAIT_QUEUE_HEAD(cgroup_rmdir_waitq);
>
> -static void cgroup_wakeup_rmdir_waiters(const struct cgroup *cgrp)
> +void __cgroup_wakeup_rmdir_waiters(void)
> {
> - if (unlikely(test_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags)))
> - wake_up_all(&cgroup_rmdir_waitq);
> + wake_up_all(&cgroup_rmdir_waitq);
> }
>
> static int rebind_subsystems(struct cgroupfs_root *root,
> @@ -2722,7 +2738,8 @@ again:
>
> if (!cgroup_clear_css_refs(cgrp)) {
> mutex_unlock(&cgroup_mutex);
> - schedule();
> + if (!cgroup_check_retry_rmdir(cgrp))
> + schedule();
> finish_wait(&cgroup_rmdir_waitq, &wait);
> clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
> if (signal_pending(current))
> Index: linux-2.6.30-git18/mm/memcontrol.c
> ===================================================================
> --- linux-2.6.30-git18.orig/mm/memcontrol.c
> +++ linux-2.6.30-git18/mm/memcontrol.c
> @@ -179,7 +179,6 @@ struct mem_cgroup {
>
> /* set when res.limit == memsw.limit */
> bool memsw_is_minimum;
> -
> /*
> * statistics. This must be placed at the end of memcg.
> */
> @@ -1428,6 +1427,9 @@ __mem_cgroup_commit_charge_swapin(struct
> return;
> if (!ptr)
> return;
> + /* We access ptr->css.cgroup later. keep 1 refcnt here. */
> + css_get(&ptr->css);
> +
> pc = lookup_page_cgroup(page);
> mem_cgroup_lru_del_before_commit_swapcache(page);
> __mem_cgroup_commit_charge(ptr, pc, ctype);
> @@ -1457,8 +1459,16 @@ __mem_cgroup_commit_charge_swapin(struct
> }
> rcu_read_unlock();
> }
> - /* add this page(page_cgroup) to the LRU we want. */
> -
> + /*
> + * At swapin, "ptr" is got from swap_cgroup and not from task. Then,
> + * this ptr can be under rmdir(). Under race with rmdir(), we may
> + * charge against cgroup which a thread is waiting for restart rmdir().
> + * It can be waken up when css's refcnt goes to 0 but we charged...
> + * Because we can't do css_get()->charge in atomic, at swapin, we have
> + * to check there is no waiter for rmdir.
> + */
> + cgroup_wakeup_rmdir_waiters(ptr->css.cgroup);
> + css_put(&ptr->css);
> }
>
> void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
> @@ -2556,6 +2566,7 @@ mem_cgroup_create(struct cgroup_subsys *
>
> if (parent)
> mem->swappiness = get_swappiness(parent);
> +
> atomic_set(&mem->refcnt, 1);
> return &mem->css;
> free_out:
> @@ -2571,6 +2582,17 @@ static int mem_cgroup_pre_destroy(struct
> return mem_cgroup_force_empty(mem, false);
> }
>
> +static int mem_cgroup_retry_rmdir(struct cgroup_subsys *ss,
> + struct cgroup *cont)
> +{
> + struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
> +
> + if (res_counter_read_u64(&memcg->res, RES_USAGE))
> + return 1;
> + return 0;
> +}
> +
> +
> static void mem_cgroup_destroy(struct cgroup_subsys *ss,
> struct cgroup *cont)
> {
> @@ -2610,6 +2632,7 @@ struct cgroup_subsys mem_cgroup_subsys =
> .subsys_id = mem_cgroup_subsys_id,
> .create = mem_cgroup_create,
> .pre_destroy = mem_cgroup_pre_destroy,
> + .retry_rmdir = mem_cgroup_retry_rmdir,
> .destroy = mem_cgroup_destroy,
> .populate = mem_cgroup_populate,
> .attach = mem_cgroup_move_task,
> Index: linux-2.6.30-git18/Documentation/cgroups/cgroups.txt
> ===================================================================
> --- linux-2.6.30-git18.orig/Documentation/cgroups/cgroups.txt
> +++ linux-2.6.30-git18/Documentation/cgroups/cgroups.txt
> @@ -500,6 +500,17 @@ there are not tasks in the cgroup. If pr
> rmdir() will fail with it. From this behavior, pre_destroy() can be
> called multiple times against a cgroup.
>
> +int retry_rmdir(struct cgroup_subsys *ss, struct cgroup *cgrp);
> +
> +Called at rmdir right after the kernel finds there are remaining refcnt on
> +subsystems after pre_destroy(). When retry_rmdir() returns 0, the caller enter
> +sleep and wakes up when css's refcnt goes down to 0 by css_put().
> +When this returns 1, the caller doesn't sleep and retry rmdir immediately.
> +This is useful when the subsys knows remaining css's refcnt is not temporal
> +and to calling pre_destroy() again is proper way to remove that.
> +(or proper way to retrun -EBUSY.)
> +
> +
> int can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
> struct task_struct *task)
> (cgroup_mutex held by caller)
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at http://www.tux.org/lkml/
>

2009-06-23 00:48:09

by Balbir Singh

[permalink] [raw]
Subject: Re: [BUGFIX][PATCH] fix bad page removal from LRU (Was Re: [RFC][PATCH] cgroup: fix permanent wait in rmdir

* KAMEZAWA Hiroyuki <[email protected]> [2009-06-23 08:57:55]:

> I think this is a fix for the problem. Sorry for regression.
> fix for "memcg: fix lru rotation in isolate_pages" patch in 2.6.30-git18.
>
> ==
> From: KAMEZAWA Hiroyuki <[email protected]>
>
> A page isolated is "cursor_page" not "page".
> This causes list corruption finally.
>
> Signed-off-by: KAMEZAWA Hiroyuki <[email protected]>
> ---
> mm/vmscan.c | 2 +-
> 1 file changed, 1 insertion(+), 1 deletion(-)
>
> Index: linux-2.6.30-git18/mm/vmscan.c
> ===================================================================
> --- linux-2.6.30-git18.orig/mm/vmscan.c
> +++ linux-2.6.30-git18/mm/vmscan.c
> @@ -932,7 +932,7 @@ static unsigned long isolate_lru_pages(u
> continue;
> if (__isolate_lru_page(cursor_page, mode, file) == 0) {
> list_move(&cursor_page->lru, dst);
> - mem_cgroup_del_lru(page);
> + mem_cgroup_del_lru(cursor_page);
> nr_taken++;
> scan++;
> }

Good catch!

Reviewed-by: Balbir Singh <[email protected]>

--
Balbir

2009-06-23 04:15:31

by Daisuke Nishimura

[permalink] [raw]
Subject: Re: [RFC][PATCH] cgroup: fix permanent wait in rmdir

On Tue, 23 Jun 2009 09:22:23 +0900, KAMEZAWA Hiroyuki <[email protected]> wrote:
> On Mon, 22 Jun 2009 18:37:07 +0900
> KAMEZAWA Hiroyuki <[email protected]> wrote:
>
> > previous discussion was this => http://marc.info/?t=124478543600001&r=1&w=2
> >
> > I think this is a minimum fix (in code size and behavior) and because
> > we can take a BIG LOCK, this kind of check is necessary, anyway.
> > Any comments are welcome.
>
> I'll split this into 2 patches...and I found I should check page-migration, too.
I'll wait a new version, but can you explain in advance this page-migration case ?

> > +static int mem_cgroup_retry_rmdir(struct cgroup_subsys *ss,
> > + struct cgroup *cont)
> > +{
> > + struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
> > +
> > + if (res_counter_read_u64(&memcg->res, RES_USAGE))
It should be &mem->res.

> > + return 1;
> > + return 0;
> > +}
> > +
> > +


Thanks,
Daisuke Nishimura.

> Then, modifing swap account logic is not help, at last.
>
> Thanks,
> -Kame
>

2009-06-23 04:46:08

by Kamezawa Hiroyuki

[permalink] [raw]
Subject: Re: [RFC][PATCH] cgroup: fix permanent wait in rmdir

On Tue, 23 Jun 2009 13:13:33 +0900
Daisuke Nishimura <[email protected]> wrote:

> On Tue, 23 Jun 2009 09:22:23 +0900, KAMEZAWA Hiroyuki <[email protected]> wrote:
> > On Mon, 22 Jun 2009 18:37:07 +0900
> > KAMEZAWA Hiroyuki <[email protected]> wrote:
> >
> > > previous discussion was this => http://marc.info/?t=124478543600001&r=1&w=2
> > >
> > > I think this is a minimum fix (in code size and behavior) and because
> > > we can take a BIG LOCK, this kind of check is necessary, anyway.
> > > Any comments are welcome.
> >
> > I'll split this into 2 patches...and I found I should check page-migration, too.
> I'll wait a new version, but can you explain in advance this page-migration case ?
>

Not far from swap-in case.

Assume cgroup "A" which includes file caches. A task in other group mmap file caches
and do page migration and rmdir against "A" is called at the same time.

In mem_cgroup_prepare_migration(), following check is used.

==
lock_page_cgroup(pc);
if (PageCgroupUsed(pc)) {
mem = pc->mem_cgroup;
css_get(&mem->css);
}
unlock_page_cgroup(pc);
<======================================(*)
if (mem) {
<==============================(**)
try_charge();
...
}
==

At (*), we grab css refcnt which can be under pre_destroy() and
At (**), pre_destroy may returns 0 but charge may be done after the end of pre_destroy().


> > > +static int mem_cgroup_retry_rmdir(struct cgroup_subsys *ss,
> > > + struct cgroup *cont)
> > > +{
> > > + struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
> > > +
> > > + if (res_counter_read_u64(&memcg->res, RES_USAGE))
> It should be &mem->res.
>
yes.
too many typos in my patches in these days..

Thanks,
-Kame

2009-06-23 04:56:18

by Daisuke Nishimura

[permalink] [raw]
Subject: Re: [RFC][PATCH] cgroup: fix permanent wait in rmdir

On Tue, 23 Jun 2009 13:44:20 +0900, KAMEZAWA Hiroyuki <[email protected]> wrote:
> On Tue, 23 Jun 2009 13:13:33 +0900
> Daisuke Nishimura <[email protected]> wrote:
>
> > On Tue, 23 Jun 2009 09:22:23 +0900, KAMEZAWA Hiroyuki <[email protected]> wrote:
> > > On Mon, 22 Jun 2009 18:37:07 +0900
> > > KAMEZAWA Hiroyuki <[email protected]> wrote:
> > >
> > > > previous discussion was this => http://marc.info/?t=124478543600001&r=1&w=2
> > > >
> > > > I think this is a minimum fix (in code size and behavior) and because
> > > > we can take a BIG LOCK, this kind of check is necessary, anyway.
> > > > Any comments are welcome.
> > >
> > > I'll split this into 2 patches...and I found I should check page-migration, too.
> > I'll wait a new version, but can you explain in advance this page-migration case ?
> >
>
> Not far from swap-in case.
>
> Assume cgroup "A" which includes file caches. A task in other group mmap file caches
> and do page migration and rmdir against "A" is called at the same time.
>
> In mem_cgroup_prepare_migration(), following check is used.
>
> ==
> lock_page_cgroup(pc);
> if (PageCgroupUsed(pc)) {
> mem = pc->mem_cgroup;
> css_get(&mem->css);
> }
> unlock_page_cgroup(pc);
> <======================================(*)
> if (mem) {
> <==============================(**)
> try_charge();
> ...
> }
> ==
>
> At (*), we grab css refcnt which can be under pre_destroy() and
> At (**), pre_destroy may returns 0 but charge may be done after the end of pre_destroy().
>
Ah I see, you're right.

Thank you for your clarification.

Daisuke Nishimura.

>
> > > > +static int mem_cgroup_retry_rmdir(struct cgroup_subsys *ss,
> > > > + struct cgroup *cont)
> > > > +{
> > > > + struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
> > > > +
> > > > + if (res_counter_read_u64(&memcg->res, RES_USAGE))
> > It should be &mem->res.
> >
> yes.
> too many typos in my patches in these days..
>
> Thanks,
> -Kame
>

2009-06-23 07:30:36

by Daisuke Nishimura

[permalink] [raw]
Subject: Re: [BUGFIX][PATCH] fix bad page removal from LRU (Was Re: [RFC][PATCH] cgroup: fix permanent wait in rmdir

On Tue, 23 Jun 2009 08:57:55 +0900, KAMEZAWA Hiroyuki <[email protected]> wrote:
> On Mon, 22 Jun 2009 12:52:31 +0200
> Ingo Molnar <[email protected]> wrote:
>
> >
> > FYI, there's a new cgroup related list corruption warning/crash that
> > i've seen a lot of times in latest -tip tests:
> >
> > [ 478.555544] ------------[ cut here ]------------
> > [ 478.556523] WARNING: at lib/list_debug.c:26 __list_add+0x70/0xa0()
> > [ 478.556523] Hardware name:
> > [ 478.556523] list_add corruption. next->prev should be prev (ffff88003e640448), but was ffff88003fa1a6e8. (next=ffff88003fa1a8a0).
> > [ 478.556523] Modules linked in:
> > [ 478.556523] Pid: 470, comm: kswapd0 Not tainted 2.6.30-tip #10989
> > [ 478.556523] Call Trace:
> > [ 478.556523] [<ffffffff81306150>] ? __list_add+0x70/0xa0
> > [ 478.556523] [<ffffffff810598dc>] warn_slowpath_common+0x8c/0xc0
> > [ 478.556523] [<ffffffff81059999>] warn_slowpath_fmt+0x69/0x70
> > [ 478.556523] [<ffffffff81086e3b>] ? __lock_acquired+0x18b/0x2b0
> > [ 478.556523] [<ffffffff811022f0>] ? page_check_address+0x110/0x1a0
> > [ 478.556523] [<ffffffff812ebcf2>] ? cpumask_any_but+0x42/0xb0
> > [ 478.556523] [<ffffffff8108c528>] ? __lock_release+0x38/0x90
> > [ 478.556523] [<ffffffff811024e1>] ? page_referenced_one+0x91/0x120
> > [ 478.556523] [<ffffffff81306150>] __list_add+0x70/0xa0
> > [ 478.556523] [<ffffffff8111dc63>] mem_cgroup_add_lru_list+0x63/0x70
> > [ 478.556523] [<ffffffff810eaee4>] move_active_pages_to_lru+0xf4/0x180
> > [ 478.556523] [<ffffffff810eb758>] ? shrink_active_list+0x1f8/0x2a0
> > [ 478.556523] [<ffffffff810eb758>] ? shrink_active_list+0x1f8/0x2a0
> > [ 478.556523] [<ffffffff810eb794>] shrink_active_list+0x234/0x2a0
> > [ 478.556523] [<ffffffff810ec3c3>] shrink_zone+0x173/0x1f0
> > [ 478.556523] [<ffffffff810ece0a>] balance_pgdat+0x4da/0x4e0
> > [ 478.556523] [<ffffffff810eb240>] ? isolate_pages_global+0x0/0x60
> > [ 478.556523] [<ffffffff810ed3b6>] kswapd+0x106/0x150
> > [ 478.556523] [<ffffffff810752f0>] ? autoremove_wake_function+0x0/0x40
> > [ 478.556523] [<ffffffff810ed2b0>] ? kswapd+0x0/0x150
> > [ 478.556523] [<ffffffff8107516e>] kthread+0x9e/0xb0
> > [ 478.556523] [<ffffffff8100d2ba>] child_rip+0xa/0x20
> > [ 478.556523] [<ffffffff8100cc40>] ? restore_args+0x0/0x30
> > [ 478.556523] [<ffffffff81075085>] ? kthreadd+0xb5/0x100
> > [ 478.556523] [<ffffffff810750d0>] ? kthread+0x0/0xb0
> > [ 478.556523] [<ffffffff8100d2b0>] ? child_rip+0x0/0x20
> > [ 478.556523] ---[ end trace 9f3122957c34141e ]---
> > [ 484.923530] ------------[ cut here ]------------
> > [ 484.924525] WARNING: at lib/list_debug.c:26 __list_add+0x70/0xa0()
> > [ 484.924525] Hardware name:
> > [ 484.924525] list_add corruption. next->prev should be prev (ffff88003e640448), but was ffff88003fa192e8. (next=ffff88003fa14d88).
> > [ 484.941152] Modules linked in:
> > [ 484.941152] Pid: 470, comm: kswapd0 Tainted: G W 2.6.30-tip #10989
> > [ 484.941152] Call Trace:
> > [ 484.941152] [<ffffffff81306150>] ? __list_add+0x70/0xa0
> > [ 484.941152] [<ffffffff810598dc>] warn_slowpath_common+0x8c/0xc0
> > [ 484.941152] [<ffffffff81059999>] warn_slowpath_fmt+0x69/0x70
> > [ 484.941152] [<ffffffff81086e3b>] ? __lock_acquired+0x18b/0x2b0
> > [ 484.941152] [<ffffffff811022f0>] ? page_check_address+0x110/0x1a0
> > [ 484.941152] [<ffffffff812ebcf2>] ? cpumask_any_but+0x42/0xb0
> > [ 484.941152] [<ffffffff8108c528>] ? __lock_release+0x38/0x90
> > [ 484.941152] [<ffffffff811024e1>] ? page_referenced_one+0x91/0x120
> > [ 484.941152] [<ffffffff81306150>] __list_add+0x70/0xa0
> > [ 484.941152] [<ffffffff8111dc63>] mem_cgroup_add_lru_list+0x63/0x70
> > [ 484.941152] [<ffffffff810eaee4>] move_active_pages_to_lru+0xf4/0x180
> > [ 484.941152] [<ffffffff810eb758>] ? shrink_active_list+0x1f8/0x2a0
> > [ 484.941152] [<ffffffff810eb758>] ? shrink_active_list+0x1f8/0x2a0
> > [ 484.941152] [<ffffffff810eb794>] shrink_active_list+0x234/0x2a0
> > [ 484.941152] [<ffffffff810ec3c3>] shrink_zone+0x173/0x1f0
> > [ 484.941152] [<ffffffff810ece0a>] balance_pgdat+0x4da/0x4e0
> > [ 484.941152] [<ffffffff810eb240>] ? isolate_pages_global+0x0/0x60
> > [ 484.941152] [<ffffffff810ed3b6>] kswapd+0x106/0x150
> > [ 484.941152] [<ffffffff810752f0>] ? autoremove_wake_function+0x0/0x40
> > [ 484.941152] [<ffffffff810ed2b0>] ? kswapd+0x0/0x150
> > [ 484.941152] [<ffffffff8107516e>] kthread+0x9e/0xb0
> > [ 484.941152] [<ffffffff8100d2ba>] child_rip+0xa/0x20
> > [ 484.941152] [<ffffffff8100cc40>] ? restore_args+0x0/0x30
> > [ 484.941152] [<ffffffff81075085>] ? kthreadd+0xb5/0x100
> > [ 484.941152] [<ffffffff810750d0>] ? kthread+0x0/0xb0
> > [ 484.941152] [<ffffffff8100d2b0>] ? child_rip+0x0/0x20
> > [ 484.941152] ---[ end trace 9f3122957c34141f ]---
> > [ 485.365631] ------------[ cut here ]------------
> > [ 485.368029] WARNING: at lib/list_debug.c:26 __list_add+0x70/0xa0()
> >
> > has this been reported before? Is there a fix for it i missed?
> >
>
> I think this is a fix for the problem. Sorry for regression.
> fix for "memcg: fix lru rotation in isolate_pages" patch in 2.6.30-git18.
>
> ==
> From: KAMEZAWA Hiroyuki <[email protected]>
>
> A page isolated is "cursor_page" not "page".
> This causes list corruption finally.
>
> Signed-off-by: KAMEZAWA Hiroyuki <[email protected]>

I reproduced this bug and have confirmed that this patch fixes it.

Tested-by: Daisuke Nishimura <[email protected]>

> ---
> mm/vmscan.c | 2 +-
> 1 file changed, 1 insertion(+), 1 deletion(-)
>
> Index: linux-2.6.30-git18/mm/vmscan.c
> ===================================================================
> --- linux-2.6.30-git18.orig/mm/vmscan.c
> +++ linux-2.6.30-git18/mm/vmscan.c
> @@ -932,7 +932,7 @@ static unsigned long isolate_lru_pages(u
> continue;
> if (__isolate_lru_page(cursor_page, mode, file) == 0) {
> list_move(&cursor_page->lru, dst);
> - mem_cgroup_del_lru(page);
> + mem_cgroup_del_lru(cursor_page);
> nr_taken++;
> scan++;
> }
>
>
>
>

2009-06-23 07:31:38

by Kamezawa Hiroyuki

[permalink] [raw]
Subject: Re: [BUGFIX][PATCH] fix bad page removal from LRU (Was Re: [RFC][PATCH] cgroup: fix permanent wait in rmdir

On Tue, 23 Jun 2009 16:27:59 +0900
Daisuke Nishimura <[email protected]> wrote:

> On Tue, 23 Jun 2009 08:57:55 +0900, KAMEZAWA Hiroyuki <[email protected]> wrote:
> > On Mon, 22 Jun 2009 12:52:31 +0200
> > Ingo Molnar <[email protected]> wrote:
> >
> > >
> > > FYI, there's a new cgroup related list corruption warning/crash that
> > > i've seen a lot of times in latest -tip tests:
> > >
> > > [ 478.555544] ------------[ cut here ]------------
> > > [ 478.556523] WARNING: at lib/list_debug.c:26 __list_add+0x70/0xa0()
> > > [ 478.556523] Hardware name:
> > > [ 478.556523] list_add corruption. next->prev should be prev (ffff88003e640448), but was ffff88003fa1a6e8. (next=ffff88003fa1a8a0).
> > > [ 478.556523] Modules linked in:
> > > [ 478.556523] Pid: 470, comm: kswapd0 Not tainted 2.6.30-tip #10989
> > > [ 478.556523] Call Trace:
> > > [ 478.556523] [<ffffffff81306150>] ? __list_add+0x70/0xa0
> > > [ 478.556523] [<ffffffff810598dc>] warn_slowpath_common+0x8c/0xc0
> > > [ 478.556523] [<ffffffff81059999>] warn_slowpath_fmt+0x69/0x70
> > > [ 478.556523] [<ffffffff81086e3b>] ? __lock_acquired+0x18b/0x2b0
> > > [ 478.556523] [<ffffffff811022f0>] ? page_check_address+0x110/0x1a0
> > > [ 478.556523] [<ffffffff812ebcf2>] ? cpumask_any_but+0x42/0xb0
> > > [ 478.556523] [<ffffffff8108c528>] ? __lock_release+0x38/0x90
> > > [ 478.556523] [<ffffffff811024e1>] ? page_referenced_one+0x91/0x120
> > > [ 478.556523] [<ffffffff81306150>] __list_add+0x70/0xa0
> > > [ 478.556523] [<ffffffff8111dc63>] mem_cgroup_add_lru_list+0x63/0x70
> > > [ 478.556523] [<ffffffff810eaee4>] move_active_pages_to_lru+0xf4/0x180
> > > [ 478.556523] [<ffffffff810eb758>] ? shrink_active_list+0x1f8/0x2a0
> > > [ 478.556523] [<ffffffff810eb758>] ? shrink_active_list+0x1f8/0x2a0
> > > [ 478.556523] [<ffffffff810eb794>] shrink_active_list+0x234/0x2a0
> > > [ 478.556523] [<ffffffff810ec3c3>] shrink_zone+0x173/0x1f0
> > > [ 478.556523] [<ffffffff810ece0a>] balance_pgdat+0x4da/0x4e0
> > > [ 478.556523] [<ffffffff810eb240>] ? isolate_pages_global+0x0/0x60
> > > [ 478.556523] [<ffffffff810ed3b6>] kswapd+0x106/0x150
> > > [ 478.556523] [<ffffffff810752f0>] ? autoremove_wake_function+0x0/0x40
> > > [ 478.556523] [<ffffffff810ed2b0>] ? kswapd+0x0/0x150
> > > [ 478.556523] [<ffffffff8107516e>] kthread+0x9e/0xb0
> > > [ 478.556523] [<ffffffff8100d2ba>] child_rip+0xa/0x20
> > > [ 478.556523] [<ffffffff8100cc40>] ? restore_args+0x0/0x30
> > > [ 478.556523] [<ffffffff81075085>] ? kthreadd+0xb5/0x100
> > > [ 478.556523] [<ffffffff810750d0>] ? kthread+0x0/0xb0
> > > [ 478.556523] [<ffffffff8100d2b0>] ? child_rip+0x0/0x20
> > > [ 478.556523] ---[ end trace 9f3122957c34141e ]---
> > > [ 484.923530] ------------[ cut here ]------------
> > > [ 484.924525] WARNING: at lib/list_debug.c:26 __list_add+0x70/0xa0()
> > > [ 484.924525] Hardware name:
> > > [ 484.924525] list_add corruption. next->prev should be prev (ffff88003e640448), but was ffff88003fa192e8. (next=ffff88003fa14d88).
> > > [ 484.941152] Modules linked in:
> > > [ 484.941152] Pid: 470, comm: kswapd0 Tainted: G W 2.6.30-tip #10989
> > > [ 484.941152] Call Trace:
> > > [ 484.941152] [<ffffffff81306150>] ? __list_add+0x70/0xa0
> > > [ 484.941152] [<ffffffff810598dc>] warn_slowpath_common+0x8c/0xc0
> > > [ 484.941152] [<ffffffff81059999>] warn_slowpath_fmt+0x69/0x70
> > > [ 484.941152] [<ffffffff81086e3b>] ? __lock_acquired+0x18b/0x2b0
> > > [ 484.941152] [<ffffffff811022f0>] ? page_check_address+0x110/0x1a0
> > > [ 484.941152] [<ffffffff812ebcf2>] ? cpumask_any_but+0x42/0xb0
> > > [ 484.941152] [<ffffffff8108c528>] ? __lock_release+0x38/0x90
> > > [ 484.941152] [<ffffffff811024e1>] ? page_referenced_one+0x91/0x120
> > > [ 484.941152] [<ffffffff81306150>] __list_add+0x70/0xa0
> > > [ 484.941152] [<ffffffff8111dc63>] mem_cgroup_add_lru_list+0x63/0x70
> > > [ 484.941152] [<ffffffff810eaee4>] move_active_pages_to_lru+0xf4/0x180
> > > [ 484.941152] [<ffffffff810eb758>] ? shrink_active_list+0x1f8/0x2a0
> > > [ 484.941152] [<ffffffff810eb758>] ? shrink_active_list+0x1f8/0x2a0
> > > [ 484.941152] [<ffffffff810eb794>] shrink_active_list+0x234/0x2a0
> > > [ 484.941152] [<ffffffff810ec3c3>] shrink_zone+0x173/0x1f0
> > > [ 484.941152] [<ffffffff810ece0a>] balance_pgdat+0x4da/0x4e0
> > > [ 484.941152] [<ffffffff810eb240>] ? isolate_pages_global+0x0/0x60
> > > [ 484.941152] [<ffffffff810ed3b6>] kswapd+0x106/0x150
> > > [ 484.941152] [<ffffffff810752f0>] ? autoremove_wake_function+0x0/0x40
> > > [ 484.941152] [<ffffffff810ed2b0>] ? kswapd+0x0/0x150
> > > [ 484.941152] [<ffffffff8107516e>] kthread+0x9e/0xb0
> > > [ 484.941152] [<ffffffff8100d2ba>] child_rip+0xa/0x20
> > > [ 484.941152] [<ffffffff8100cc40>] ? restore_args+0x0/0x30
> > > [ 484.941152] [<ffffffff81075085>] ? kthreadd+0xb5/0x100
> > > [ 484.941152] [<ffffffff810750d0>] ? kthread+0x0/0xb0
> > > [ 484.941152] [<ffffffff8100d2b0>] ? child_rip+0x0/0x20
> > > [ 484.941152] ---[ end trace 9f3122957c34141f ]---
> > > [ 485.365631] ------------[ cut here ]------------
> > > [ 485.368029] WARNING: at lib/list_debug.c:26 __list_add+0x70/0xa0()
> > >
> > > has this been reported before? Is there a fix for it i missed?
> > >
> >
> > I think this is a fix for the problem. Sorry for regression.
> > fix for "memcg: fix lru rotation in isolate_pages" patch in 2.6.30-git18.
> >
> > ==
> > From: KAMEZAWA Hiroyuki <[email protected]>
> >
> > A page isolated is "cursor_page" not "page".
> > This causes list corruption finally.
> >
> > Signed-off-by: KAMEZAWA Hiroyuki <[email protected]>
>
> I reproduced this bug and have confirmed that this patch fixes it.
>
> Tested-by: Daisuke Nishimura <[email protected]>
>
Thank you!!

-Kame