2014-04-24 21:02:23

by Tejun Heo

[permalink] [raw]
Subject: [PATCHSET cgroup/for-3.16] cgroup: implement css->id

Hello,

Until now, cgroup->id has been used to identify all the associated
csses and css_from_id() takes cgroup ID and returns the matching css
by looking up the cgroup and then dereferencing the css associated
with it; however, now that the lifetimes of cgroup and css are
separate, this is incorrect and breaks on the unified hierarchy when a
controller is disabled and enabled back again before the previous
instance is released.

This series adds css->id which is a subsystem-unique ID and converts
css_from_id() to look up by the new css->id instead. memcg is the
only user of css_from_id() and also converted to use css->id instead.

0001-cgroup-make-flags-and-subsys_masks-unsigned-int.patch
0002-cgroup-memcg-allocate-cgroup-ID-from-1.patch
0003-cgroup-protect-cgroup_root-cgroup_idr-with-a-spinloc.patch
0004-cgroup-use-RCU-free-in-create_css-failure-path.patch
0005-cgroup-update-init_css-into-init_and_link_css.patch
0006-cgroup-memcg-implement-css-id-and-convert-css_from_i.patch

0001-0003 are related prep/cleanups.

0004-0006 add css->id and convert css_from_id() and its user to it.

This patchset is on top of

cgroup/for-3.16 f8f22e53a262 ("cgroup: implement dynamic subtree controller enable/disable on the default hierarchy")
+ [1] [PATCHSET cgroup/for-3.16] cgroup: implement cgroup.populated
+ [2] Misc comment / warning cleanups

and available in the following git branch.

git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git review-css_id

diffstat follows.

include/linux/cgroup.h | 21 ++++--
kernel/cgroup.c | 164 ++++++++++++++++++++++++++++++++-----------------
mm/memcontrol.c | 10 --
3 files changed, 126 insertions(+), 69 deletions(-)

Thanks.

--
tejun

[1] http://lkml.kernel.org/g/[email protected]
[2] http://lkml.kernel.org/g/[email protected]
http://lkml.kernel.org/g/[email protected]
http://lkml.kernel.org/g/[email protected]
http://lkml.kernel.org/g/1397757955.3104.11.camel@joe-AO725


2014-04-24 21:02:29

by Tejun Heo

[permalink] [raw]
Subject: [PATCH 1/6] cgroup: make flags and subsys_masks unsigned int

There's no reason to use atomic bitops for cgroup_subsys_state->flags,
cgroup_root->flags and various subsys_masks. This patch updates those
to use bitwise and/or operations instead and converts them form
unsigned long to unsigned int.

This makes the fields occupy (marginally) smaller space and makes it
clear that they don't require atomicity.

This patch doesn't cause any behavior difference.

Signed-off-by: Tejun Heo <[email protected]>
---
include/linux/cgroup.h | 8 ++++----
kernel/cgroup.c | 37 ++++++++++++++++++-------------------
2 files changed, 22 insertions(+), 23 deletions(-)

diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 4b38e2d..c6c703f 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -62,7 +62,7 @@ struct cgroup_subsys_state {
/* the parent css */
struct cgroup_subsys_state *parent;

- unsigned long flags;
+ unsigned int flags;

/* percpu_ref killing and RCU release */
struct rcu_head rcu_head;
@@ -185,7 +185,7 @@ struct cgroup {
u64 serial_nr;

/* the bitmask of subsystems enabled on the child cgroups */
- unsigned long child_subsys_mask;
+ unsigned int child_subsys_mask;

/* Private pointers for each registered subsystem */
struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT];
@@ -312,7 +312,7 @@ struct cgroup_root {
struct kernfs_root *kf_root;

/* The bitmask of subsystems attached to this hierarchy */
- unsigned long subsys_mask;
+ unsigned int subsys_mask;

/* Unique id for this hierarchy. */
int hierarchy_id;
@@ -327,7 +327,7 @@ struct cgroup_root {
struct list_head root_list;

/* Hierarchy-specific flags */
- unsigned long flags;
+ unsigned int flags;

/* IDs for cgroups in this hierarchy */
struct idr cgroup_idr;
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 3873267..21667f3 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -181,7 +181,7 @@ static struct cftype cgroup_base_files[];

static void cgroup_put(struct cgroup *cgrp);
static int rebind_subsystems(struct cgroup_root *dst_root,
- unsigned long ss_mask);
+ unsigned int ss_mask);
static void cgroup_destroy_css_killed(struct cgroup *cgrp);
static int cgroup_destroy_locked(struct cgroup *cgrp);
static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss);
@@ -963,7 +963,7 @@ static struct cgroup *task_cgroup_from_root(struct task_struct *task,
* update of a tasks cgroup pointer by cgroup_attach_task()
*/

-static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask);
+static int cgroup_populate_dir(struct cgroup *cgrp, unsigned int subsys_mask);
static struct kernfs_syscall_ops cgroup_kf_syscall_ops;
static const struct file_operations proc_cgroupstats_operations;

@@ -1079,7 +1079,7 @@ static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
* @cgrp: target cgroup
* @subsys_mask: mask of the subsystem ids whose files should be removed
*/
-static void cgroup_clear_dir(struct cgroup *cgrp, unsigned long subsys_mask)
+static void cgroup_clear_dir(struct cgroup *cgrp, unsigned int subsys_mask)
{
struct cgroup_subsys *ss;
int i;
@@ -1087,15 +1087,14 @@ static void cgroup_clear_dir(struct cgroup *cgrp, unsigned long subsys_mask)
for_each_subsys(ss, i) {
struct cftype *cfts;

- if (!test_bit(i, &subsys_mask))
+ if (!(subsys_mask & (1 << i)))
continue;
list_for_each_entry(cfts, &ss->cfts, node)
cgroup_addrm_files(cgrp, cfts, false);
}
}

-static int rebind_subsystems(struct cgroup_root *dst_root,
- unsigned long ss_mask)
+static int rebind_subsystems(struct cgroup_root *dst_root, unsigned int ss_mask)
{
struct cgroup_subsys *ss;
int ssid, i, ret;
@@ -1128,7 +1127,7 @@ static int rebind_subsystems(struct cgroup_root *dst_root,
* Just warn about it and continue.
*/
if (cgrp_dfl_root_visible) {
- pr_warn("failed to create files (%d) while rebinding 0x%lx to default root\n",
+ pr_warn("failed to create files (%d) while rebinding 0x%x to default root\n",
ret, ss_mask);
pr_warn("you may retry by moving them to a different hierarchy and unbinding\n");
}
@@ -1214,8 +1213,8 @@ static int cgroup_show_options(struct seq_file *seq,
}

struct cgroup_sb_opts {
- unsigned long subsys_mask;
- unsigned long flags;
+ unsigned int subsys_mask;
+ unsigned int flags;
char *release_agent;
bool cpuset_clone_children;
char *name;
@@ -1227,12 +1226,12 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
{
char *token, *o = data;
bool all_ss = false, one_ss = false;
- unsigned long mask = (unsigned long)-1;
+ unsigned int mask = -1U;
struct cgroup_subsys *ss;
int i;

#ifdef CONFIG_CPUSETS
- mask = ~(1UL << cpuset_cgrp_id);
+ mask = ~(1U << cpuset_cgrp_id);
#endif

memset(opts, 0, sizeof(*opts));
@@ -1313,7 +1312,7 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
/* Mutually exclusive option 'all' + subsystem name */
if (all_ss)
return -EINVAL;
- set_bit(i, &opts->subsys_mask);
+ opts->subsys_mask |= (1 << i);
one_ss = true;

break;
@@ -1342,7 +1341,7 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
if (all_ss || (!one_ss && !opts->none && !opts->name))
for_each_subsys(ss, i)
if (!ss->disabled)
- set_bit(i, &opts->subsys_mask);
+ opts->subsys_mask |= (1 << i);

/*
* We either have to specify by name or by subsystems. (So
@@ -1373,7 +1372,7 @@ static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data)
int ret = 0;
struct cgroup_root *root = cgroup_root_from_kf(kf_root);
struct cgroup_sb_opts opts;
- unsigned long added_mask, removed_mask;
+ unsigned int added_mask, removed_mask;

if (root->flags & CGRP_ROOT_SANE_BEHAVIOR) {
pr_err("sane_behavior: remount is not allowed\n");
@@ -1398,7 +1397,7 @@ static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data)
/* Don't allow flags or name to change at remount */
if (((opts.flags ^ root->flags) & CGRP_ROOT_OPTION_MASK) ||
(opts.name && strcmp(opts.name, root->name))) {
- pr_err("option or name mismatch, new: 0x%lx \"%s\", old: 0x%lx \"%s\"\n",
+ pr_err("option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"\n",
opts.flags & CGRP_ROOT_OPTION_MASK, opts.name ?: "",
root->flags & CGRP_ROOT_OPTION_MASK, root->name);
ret = -EINVAL;
@@ -1522,7 +1521,7 @@ static void init_cgroup_root(struct cgroup_root *root,
set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags);
}

-static int cgroup_setup_root(struct cgroup_root *root, unsigned long ss_mask)
+static int cgroup_setup_root(struct cgroup_root *root, unsigned int ss_mask)
{
LIST_HEAD(tmp_links);
struct cgroup *root_cgrp = &root->cgrp;
@@ -2507,7 +2506,7 @@ out_finish:
static int cgroup_subtree_control_write(struct cgroup_subsys_state *dummy_css,
struct cftype *cft, char *buffer)
{
- unsigned long enable_req = 0, disable_req = 0, enable, disable;
+ unsigned int enable_req = 0, disable_req = 0, enable, disable;
struct cgroup *cgrp = dummy_css->cgroup, *child;
struct cgroup_subsys *ss;
char *tok, *p;
@@ -3998,7 +3997,7 @@ static struct cftype cgroup_base_files[] = {
*
* On failure, no file is added.
*/
-static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask)
+static int cgroup_populate_dir(struct cgroup *cgrp, unsigned int subsys_mask)
{
struct cgroup_subsys *ss;
int i, ret = 0;
@@ -4007,7 +4006,7 @@ static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask)
for_each_subsys(ss, i) {
struct cftype *cfts;

- if (!test_bit(i, &subsys_mask))
+ if (!(subsys_mask & (1 << i)))
continue;

list_for_each_entry(cfts, &ss->cfts, node) {
--
1.9.0

2014-04-24 21:02:33

by Tejun Heo

[permalink] [raw]
Subject: [PATCH 5/6] cgroup: update init_css() into init_and_link_css()

init_css() takes the cgroup the new css belongs to as an argument and
initializes the new css's ->cgroup and ->parent pointers but doesn't
acquire the matching reference counts. After the previous patch,
create_css() puts init_css() and reference acquisition right next to
each other. Let's move reference acquistion into init_css() and
rename the function to init_and_link_css(). This makes sense and is
easier to follow. This makes the root csses to hold a reference on
cgrp_dfl_root.cgrp, which is harmless.

Signed-off-by: Tejun Heo <[email protected]>
---
kernel/cgroup.c | 18 ++++++++++--------
1 file changed, 10 insertions(+), 8 deletions(-)

diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 0e2c401..f1c98c5 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -4109,17 +4109,21 @@ static void css_release(struct percpu_ref *ref)
call_rcu(&css->rcu_head, css_free_rcu_fn);
}

-static void init_css(struct cgroup_subsys_state *css, struct cgroup_subsys *ss,
- struct cgroup *cgrp)
+static void init_and_link_css(struct cgroup_subsys_state *css,
+ struct cgroup_subsys *ss, struct cgroup *cgrp)
{
+ cgroup_get(cgrp);
+
css->cgroup = cgrp;
css->ss = ss;
css->flags = 0;

- if (cgrp->parent)
+ if (cgrp->parent) {
css->parent = cgroup_css(cgrp->parent, ss);
- else
+ css_get(css->parent);
+ } else {
css->flags |= CSS_ROOT;
+ }

BUG_ON(cgroup_css(cgrp, ss));
}
@@ -4185,9 +4189,7 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss)
if (IS_ERR(css))
return PTR_ERR(css);

- init_css(css, ss, cgrp);
- cgroup_get(cgrp);
- css_get(css->parent);
+ init_and_link_css(css, ss, cgrp);

err = percpu_ref_init(&css->refcnt, css_release);
if (err)
@@ -4656,7 +4658,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
css = ss->css_alloc(cgroup_css(&cgrp_dfl_root.cgrp, ss));
/* We don't handle early failures gracefully */
BUG_ON(IS_ERR(css));
- init_css(css, ss, &cgrp_dfl_root.cgrp);
+ init_and_link_css(css, ss, &cgrp_dfl_root.cgrp);

/* Update the init_css_set to contain a subsys
* pointer to this state - since the subsystem is
--
1.9.0

2014-04-24 21:02:49

by Tejun Heo

[permalink] [raw]
Subject: [PATCH 6/6] cgroup, memcg: implement css->id and convert css_from_id() to use it

Until now, cgroup->id has been used to identify all the associated
csses and css_from_id() takes cgroup ID and returns the matching css
by looking up the cgroup and then dereferencing the css associated
with it; however, now that the lifetimes of cgroup and css are
separate, this is incorrect and breaks on the unified hierarchy when a
controller is disabled and enabled back again before the previous
instance is released.

This patch adds css->id which is a subsystem-unique ID and converts
css_from_id() to look up by the new css->id instead. memcg is the
only user of css_from_id() and also converted to use css->id instead.

For traditional hierarchies, this shouldn't make any functional
difference.

Signed-off-by: Tejun Heo <[email protected]>
Cc: Johannes Weiner <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Jianyu Zhan <[email protected]>
---
include/linux/cgroup.h | 9 ++++++++
kernel/cgroup.c | 59 ++++++++++++++++++++++++++++++++------------------
mm/memcontrol.c | 4 ++--
3 files changed, 49 insertions(+), 23 deletions(-)

diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 793f70a..2dfabb3 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -62,6 +62,12 @@ struct cgroup_subsys_state {
/* the parent css */
struct cgroup_subsys_state *parent;

+ /*
+ * Subsys-unique ID. 0 is unused and root is always 1. The
+ * matching css can be looked up using css_from_id().
+ */
+ int id;
+
unsigned int flags;

/* percpu_ref killing and RCU release */
@@ -655,6 +661,9 @@ struct cgroup_subsys {
/* link to parent, protected by cgroup_lock() */
struct cgroup_root *root;

+ /* idr for css->id */
+ struct idr css_idr;
+
/*
* List of cftypes. Each entry is the first entry of an array
* terminated by zero length name.
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index f1c98c5..a1a20e8 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -100,8 +100,8 @@ static DECLARE_RWSEM(css_set_rwsem);
#endif

/*
- * Protects cgroup_idr so that IDs can be released without grabbing
- * cgroup_mutex.
+ * Protects cgroup_idr and css_idr so that IDs can be released without
+ * grabbing cgroup_mutex.
*/
static DEFINE_SPINLOCK(cgroup_idr_lock);

@@ -1089,12 +1089,6 @@ static void cgroup_put(struct cgroup *cgrp)
if (WARN_ON_ONCE(cgrp->parent && !cgroup_is_dead(cgrp)))
return;

- /*
- * XXX: cgrp->id is only used to look up css's. As cgroup and
- * css's lifetimes will be decoupled, it should be made
- * per-subsystem and moved to css->id so that lookups are
- * successful until the target css is released.
- */
cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
cgrp->id = -1;

@@ -4104,8 +4098,11 @@ static void css_release(struct percpu_ref *ref)
{
struct cgroup_subsys_state *css =
container_of(ref, struct cgroup_subsys_state, refcnt);
+ struct cgroup_subsys *ss = css->ss;
+
+ RCU_INIT_POINTER(css->cgroup->subsys[ss->id], NULL);
+ cgroup_idr_remove(&ss->css_idr, css->id);

- RCU_INIT_POINTER(css->cgroup->subsys[css->ss->id], NULL);
call_rcu(&css->rcu_head, css_free_rcu_fn);
}

@@ -4195,9 +4192,17 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss)
if (err)
goto err_free_css;

+ err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_NOWAIT);
+ if (err < 0)
+ goto err_free_percpu_ref;
+ css->id = err;
+
err = cgroup_populate_dir(cgrp, 1 << ss->id);
if (err)
- goto err_free_percpu_ref;
+ goto err_free_id;
+
+ /* @css is ready to be brought online now, make it visible */
+ cgroup_idr_replace(&ss->css_idr, css, css->id);

err = online_css(css);
if (err)
@@ -4216,6 +4221,8 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss)

err_clear_dir:
cgroup_clear_dir(css->cgroup, 1 << css->ss->id);
+err_free_id:
+ cgroup_idr_remove(&ss->css_idr, css->id);
err_free_percpu_ref:
percpu_ref_cancel_init(&css->refcnt);
err_free_css:
@@ -4642,7 +4649,7 @@ static struct kernfs_syscall_ops cgroup_kf_syscall_ops = {
.rename = cgroup_rename,
};

-static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
+static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
{
struct cgroup_subsys_state *css;

@@ -4651,6 +4658,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
mutex_lock(&cgroup_tree_mutex);
mutex_lock(&cgroup_mutex);

+ idr_init(&ss->css_idr);
INIT_LIST_HEAD(&ss->cfts);

/* Create the root cgroup state for this subsystem */
@@ -4659,6 +4667,13 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
/* We don't handle early failures gracefully */
BUG_ON(IS_ERR(css));
init_and_link_css(css, ss, &cgrp_dfl_root.cgrp);
+ if (early) {
+ /* idr_alloc() can't be called safely during early init */
+ css->id = 1;
+ } else {
+ css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2, GFP_KERNEL);
+ BUG_ON(css->id < 0);
+ }

/* Update the init_css_set to contain a subsys
* pointer to this state - since the subsystem is
@@ -4709,7 +4724,7 @@ int __init cgroup_init_early(void)
ss->name = cgroup_subsys_name[i];

if (ss->early_init)
- cgroup_init_subsys(ss);
+ cgroup_init_subsys(ss, true);
}
return 0;
}
@@ -4741,8 +4756,16 @@ int __init cgroup_init(void)
mutex_unlock(&cgroup_tree_mutex);

for_each_subsys(ss, ssid) {
- if (!ss->early_init)
- cgroup_init_subsys(ss);
+ if (ss->early_init) {
+ struct cgroup_subsys_state *css =
+ init_css_set.subsys[ss->id];
+
+ css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2,
+ GFP_KERNEL);
+ BUG_ON(css->id < 0);
+ } else {
+ cgroup_init_subsys(ss, false);
+ }

list_add_tail(&init_css_set.e_cset_node[ssid],
&cgrp_dfl_root.cgrp.e_csets[ssid]);
@@ -5196,14 +5219,8 @@ struct cgroup_subsys_state *css_tryget_from_dir(struct dentry *dentry,
*/
struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss)
{
- struct cgroup *cgrp;
-
WARN_ON_ONCE(!rcu_read_lock_held());
-
- cgrp = idr_find(&ss->root->cgroup_idr, id);
- if (cgrp)
- return cgroup_css(cgrp, ss);
- return NULL;
+ return idr_find(&ss->css_idr, id);
}

#ifdef CONFIG_CGROUP_DEBUG
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 1d0b297..c3f82f6 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -527,7 +527,7 @@ static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)

static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
{
- return memcg->css.cgroup->id;
+ return memcg->css.id;
}

static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
@@ -6401,7 +6401,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(css));

- if (css->cgroup->id > MEM_CGROUP_ID_MAX)
+ if (css->id > MEM_CGROUP_ID_MAX)
return -ENOSPC;

if (!parent)
--
1.9.0

2014-04-24 21:03:31

by Tejun Heo

[permalink] [raw]
Subject: [PATCH 4/6] cgroup: use RCU free in create_css() failure path

Currently, when create_css() fails in the middle, the half-initialized
css is freed by invoking cgroup_subsys->css_free() directly. This
patch updates the function so that it invokes RCU free path instead.
As the RCU free path puts the parent css and owning cgroup, their
references are now acquired right after a new css is successfully
allocated.

This doesn't make any visible difference now but is to enable
implementing css->id and RCU protected lookup by such IDs.

Signed-off-by: Tejun Heo <[email protected]>
---
kernel/cgroup.c | 11 +++++------
1 file changed, 5 insertions(+), 6 deletions(-)

diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 7cb9c08..0e2c401 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -4185,12 +4185,14 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss)
if (IS_ERR(css))
return PTR_ERR(css);

+ init_css(css, ss, cgrp);
+ cgroup_get(cgrp);
+ css_get(css->parent);
+
err = percpu_ref_init(&css->refcnt, css_release);
if (err)
goto err_free_css;

- init_css(css, ss, cgrp);
-
err = cgroup_populate_dir(cgrp, 1 << ss->id);
if (err)
goto err_free_percpu_ref;
@@ -4199,9 +4201,6 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss)
if (err)
goto err_clear_dir;

- cgroup_get(cgrp);
- css_get(css->parent);
-
if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
parent->parent) {
pr_warn("%s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
@@ -4218,7 +4217,7 @@ err_clear_dir:
err_free_percpu_ref:
percpu_ref_cancel_init(&css->refcnt);
err_free_css:
- ss->css_free(css);
+ call_rcu(&css->rcu_head, css_free_rcu_fn);
return err;
}

--
1.9.0

2014-04-24 21:02:28

by Tejun Heo

[permalink] [raw]
Subject: [PATCH 2/6] cgroup, memcg: allocate cgroup ID from 1

Currently, cgroup->id is allocated from 0, which is always assigned to
the root cgroup; unfortunately, memcg wants to use ID 0 to indicate
invalid IDs and ends up incrementing all IDs by one.

It's reasonable to reserve 0 for special purposes. This patch updates
cgroup core so that ID 0 is not used and the root cgroups get ID 1.
The ID incrementing is removed form memcg.

Signed-off-by: Tejun Heo <[email protected]>
Cc: Johannes Weiner <[email protected]>
Cc: Michal Hocko <[email protected]>
---
include/linux/cgroup.h | 4 ++--
kernel/cgroup.c | 4 ++--
mm/memcontrol.c | 8 ++------
3 files changed, 6 insertions(+), 10 deletions(-)

diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index c6c703f..793f70a 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -144,8 +144,8 @@ struct cgroup {
/*
* idr allocated in-hierarchy ID.
*
- * The ID of the root cgroup is always 0, and a new cgroup
- * will be assigned with a smallest available ID.
+ * ID 0 is not used, the ID of the root cgroup is always 1, and a
+ * new cgroup will be assigned with a smallest available ID.
*
* Allocating/Removing ID must be protected by cgroup_mutex.
*/
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 21667f3..3fa0463 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1531,7 +1531,7 @@ static int cgroup_setup_root(struct cgroup_root *root, unsigned int ss_mask)
lockdep_assert_held(&cgroup_tree_mutex);
lockdep_assert_held(&cgroup_mutex);

- ret = idr_alloc(&root->cgroup_idr, root_cgrp, 0, 1, GFP_KERNEL);
+ ret = idr_alloc(&root->cgroup_idr, root_cgrp, 1, 2, GFP_KERNEL);
if (ret < 0)
goto out;
root_cgrp->id = ret;
@@ -4225,7 +4225,7 @@ static long cgroup_create(struct cgroup *parent, const char *name,
* Temporarily set the pointer to NULL, so idr_find() won't return
* a half-baked cgroup.
*/
- cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL);
+ cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 2, 0, GFP_KERNEL);
if (cgrp->id < 0) {
err = -ENOMEM;
goto err_unlock;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 29501f0..1d0b297 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -527,18 +527,14 @@ static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)

static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
{
- /*
- * The ID of the root cgroup is 0, but memcg treat 0 as an
- * invalid ID, so we return (cgroup_id + 1).
- */
- return memcg->css.cgroup->id + 1;
+ return memcg->css.cgroup->id;
}

static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
{
struct cgroup_subsys_state *css;

- css = css_from_id(id - 1, &memory_cgrp_subsys);
+ css = css_from_id(id, &memory_cgrp_subsys);
return mem_cgroup_from_css(css);
}

--
1.9.0

2014-04-24 21:06:49

by Tejun Heo

[permalink] [raw]
Subject: [PATCH 3/6] cgroup: protect cgroup_root->cgroup_idr with a spinlock

Currently, cgroup_root->cgroup_idr is protected by cgroup_mutex, which
ends up requiring cgroup_put() to be invoked under sleepable context.
This is okay for now but is an unusual requirement and we'll soon add
css->id which will have the same problem but won't be able to simply
grab cgroup_mutex as removal will have to happen from css_release()
which can't sleep.

Introduce cgroup_idr_lock and idr_alloc/replace/remove() wrappers
which protects the idr operations with the lock and use them for
cgroup_root->cgroup_idr. cgroup_put() no longer needs to grab
cgroup_mutex and css_from_id() is updated to always require RCU read
lock instead of either RCU read lock or cgroup_mutex, which doesn't
affect the existing users.

Signed-off-by: Tejun Heo <[email protected]>
---
kernel/cgroup.c | 51 +++++++++++++++++++++++++++++++++++++++++++--------
1 file changed, 43 insertions(+), 8 deletions(-)

diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 3fa0463..7cb9c08 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -100,6 +100,12 @@ static DECLARE_RWSEM(css_set_rwsem);
#endif

/*
+ * Protects cgroup_idr so that IDs can be released without grabbing
+ * cgroup_mutex.
+ */
+static DEFINE_SPINLOCK(cgroup_idr_lock);
+
+/*
* Protects cgroup_subsys->release_agent_path. Modifying it also requires
* cgroup_mutex. Reading requires either cgroup_mutex or this spinlock.
*/
@@ -190,6 +196,37 @@ static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
bool is_add);
static void cgroup_pidlist_destroy_all(struct cgroup *cgrp);

+/* IDR wrappers which synchronize using cgroup_idr_lock */
+static int cgroup_idr_alloc(struct idr *idr, void *ptr, int start, int end,
+ gfp_t gfp_mask)
+{
+ int ret;
+
+ idr_preload(gfp_mask);
+ spin_lock(&cgroup_idr_lock);
+ ret = idr_alloc(idr, ptr, start, end, gfp_mask);
+ spin_unlock(&cgroup_idr_lock);
+ idr_preload_end();
+ return ret;
+}
+
+static void *cgroup_idr_replace(struct idr *idr, void *ptr, int id)
+{
+ void *ret;
+
+ spin_lock(&cgroup_idr_lock);
+ ret = idr_replace(idr, ptr, id);
+ spin_unlock(&cgroup_idr_lock);
+ return ret;
+}
+
+static void cgroup_idr_remove(struct idr *idr, int id)
+{
+ spin_lock(&cgroup_idr_lock);
+ idr_remove(idr, id);
+ spin_unlock(&cgroup_idr_lock);
+}
+
/**
* cgroup_css - obtain a cgroup's css for the specified subsystem
* @cgrp: the cgroup of interest
@@ -1058,9 +1095,7 @@ static void cgroup_put(struct cgroup *cgrp)
* per-subsystem and moved to css->id so that lookups are
* successful until the target css is released.
*/
- mutex_lock(&cgroup_mutex);
- idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
- mutex_unlock(&cgroup_mutex);
+ cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
cgrp->id = -1;

call_rcu(&cgrp->rcu_head, cgroup_free_rcu);
@@ -1531,7 +1566,7 @@ static int cgroup_setup_root(struct cgroup_root *root, unsigned int ss_mask)
lockdep_assert_held(&cgroup_tree_mutex);
lockdep_assert_held(&cgroup_mutex);

- ret = idr_alloc(&root->cgroup_idr, root_cgrp, 1, 2, GFP_KERNEL);
+ ret = cgroup_idr_alloc(&root->cgroup_idr, root_cgrp, 1, 2, GFP_NOWAIT);
if (ret < 0)
goto out;
root_cgrp->id = ret;
@@ -4225,7 +4260,7 @@ static long cgroup_create(struct cgroup *parent, const char *name,
* Temporarily set the pointer to NULL, so idr_find() won't return
* a half-baked cgroup.
*/
- cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 2, 0, GFP_KERNEL);
+ cgrp->id = cgroup_idr_alloc(&root->cgroup_idr, NULL, 2, 0, GFP_NOWAIT);
if (cgrp->id < 0) {
err = -ENOMEM;
goto err_unlock;
@@ -4268,7 +4303,7 @@ static long cgroup_create(struct cgroup *parent, const char *name,
* @cgrp is now fully operational. If something fails after this
* point, it'll be released via the normal destruction path.
*/
- idr_replace(&root->cgroup_idr, cgrp, cgrp->id);
+ cgroup_idr_replace(&root->cgroup_idr, cgrp, cgrp->id);

err = cgroup_kn_set_ugid(kn);
if (err)
@@ -4302,7 +4337,7 @@ static long cgroup_create(struct cgroup *parent, const char *name,
return 0;

err_free_id:
- idr_remove(&root->cgroup_idr, cgrp->id);
+ cgroup_idr_remove(&root->cgroup_idr, cgrp->id);
err_unlock:
mutex_unlock(&cgroup_mutex);
err_unlock_tree:
@@ -5162,7 +5197,7 @@ struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss)
{
struct cgroup *cgrp;

- cgroup_assert_mutexes_or_rcu_locked();
+ WARN_ON_ONCE(!rcu_read_lock_held());

cgrp = idr_find(&ss->root->cgroup_idr, id);
if (cgrp)
--
1.9.0

2014-04-28 03:33:35

by Zefan Li

[permalink] [raw]
Subject: Re: [PATCH 6/6] cgroup, memcg: implement css->id and convert css_from_id() to use it

On 2014/4/25 5:02, Tejun Heo wrote:
> Until now, cgroup->id has been used to identify all the associated
> csses and css_from_id() takes cgroup ID and returns the matching css
> by looking up the cgroup and then dereferencing the css associated
> with it; however, now that the lifetimes of cgroup and css are
> separate, this is incorrect and breaks on the unified hierarchy when a
> controller is disabled and enabled back again before the previous
> instance is released.
>
> This patch adds css->id which is a subsystem-unique ID and converts
> css_from_id() to look up by the new css->id instead. memcg is the
> only user of css_from_id() and also converted to use css->id instead.
>

netprio_cgroup also needs to be updated.

> For traditional hierarchies, this shouldn't make any functional
> difference.
>
> Signed-off-by: Tejun Heo <[email protected]>
> Cc: Johannes Weiner <[email protected]>
> Cc: Michal Hocko <[email protected]>
> Cc: Jianyu Zhan <[email protected]>
> ---
> include/linux/cgroup.h | 9 ++++++++
> kernel/cgroup.c | 59 ++++++++++++++++++++++++++++++++------------------
> mm/memcontrol.c | 4 ++--
> 3 files changed, 49 insertions(+), 23 deletions(-)
>

2014-04-30 13:11:10

by Michal Hocko

[permalink] [raw]
Subject: Re: [PATCH 2/6] cgroup, memcg: allocate cgroup ID from 1

On Thu 24-04-14 17:02:09, Tejun Heo wrote:
> Currently, cgroup->id is allocated from 0, which is always assigned to
> the root cgroup; unfortunately, memcg wants to use ID 0 to indicate
> invalid IDs and ends up incrementing all IDs by one.
>
> It's reasonable to reserve 0 for special purposes. This patch updates
> cgroup core so that ID 0 is not used and the root cgroups get ID 1.
> The ID incrementing is removed form memcg.
>
> Signed-off-by: Tejun Heo <[email protected]>
> Cc: Johannes Weiner <[email protected]>
> Cc: Michal Hocko <[email protected]>

Sorry for a late response.
Thanks for removing this hack from memcg!
cgroup code change looks good at first glance but I am not familiar with
all the recent changes so I do not feel to add my Reviewed-by.

Anyway
Acked-by: Michal Hocko <[email protected]>

for the change in the semantic and the memcg change.

Thanks!

> ---
> include/linux/cgroup.h | 4 ++--
> kernel/cgroup.c | 4 ++--
> mm/memcontrol.c | 8 ++------
> 3 files changed, 6 insertions(+), 10 deletions(-)
>
> diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
> index c6c703f..793f70a 100644
> --- a/include/linux/cgroup.h
> +++ b/include/linux/cgroup.h
> @@ -144,8 +144,8 @@ struct cgroup {
> /*
> * idr allocated in-hierarchy ID.
> *
> - * The ID of the root cgroup is always 0, and a new cgroup
> - * will be assigned with a smallest available ID.
> + * ID 0 is not used, the ID of the root cgroup is always 1, and a
> + * new cgroup will be assigned with a smallest available ID.
> *
> * Allocating/Removing ID must be protected by cgroup_mutex.
> */
> diff --git a/kernel/cgroup.c b/kernel/cgroup.c
> index 21667f3..3fa0463 100644
> --- a/kernel/cgroup.c
> +++ b/kernel/cgroup.c
> @@ -1531,7 +1531,7 @@ static int cgroup_setup_root(struct cgroup_root *root, unsigned int ss_mask)
> lockdep_assert_held(&cgroup_tree_mutex);
> lockdep_assert_held(&cgroup_mutex);
>
> - ret = idr_alloc(&root->cgroup_idr, root_cgrp, 0, 1, GFP_KERNEL);
> + ret = idr_alloc(&root->cgroup_idr, root_cgrp, 1, 2, GFP_KERNEL);
> if (ret < 0)
> goto out;
> root_cgrp->id = ret;
> @@ -4225,7 +4225,7 @@ static long cgroup_create(struct cgroup *parent, const char *name,
> * Temporarily set the pointer to NULL, so idr_find() won't return
> * a half-baked cgroup.
> */
> - cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL);
> + cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 2, 0, GFP_KERNEL);
> if (cgrp->id < 0) {
> err = -ENOMEM;
> goto err_unlock;
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 29501f0..1d0b297 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -527,18 +527,14 @@ static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
>
> static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
> {
> - /*
> - * The ID of the root cgroup is 0, but memcg treat 0 as an
> - * invalid ID, so we return (cgroup_id + 1).
> - */
> - return memcg->css.cgroup->id + 1;
> + return memcg->css.cgroup->id;
> }
>
> static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
> {
> struct cgroup_subsys_state *css;
>
> - css = css_from_id(id - 1, &memory_cgrp_subsys);
> + css = css_from_id(id, &memory_cgrp_subsys);
> return mem_cgroup_from_css(css);
> }
>
> --
> 1.9.0
>

--
Michal Hocko
SUSE Labs

2014-04-30 13:24:55

by Michal Hocko

[permalink] [raw]
Subject: Re: [PATCH 6/6] cgroup, memcg: implement css->id and convert css_from_id() to use it

On Thu 24-04-14 17:02:13, Tejun Heo wrote:
> Until now, cgroup->id has been used to identify all the associated
> csses and css_from_id() takes cgroup ID and returns the matching css
> by looking up the cgroup and then dereferencing the css associated
> with it; however, now that the lifetimes of cgroup and css are
> separate, this is incorrect and breaks on the unified hierarchy when a
> controller is disabled and enabled back again before the previous
> instance is released.
>
> This patch adds css->id which is a subsystem-unique ID and converts
> css_from_id() to look up by the new css->id instead. memcg is the
> only user of css_from_id() and also converted to use css->id instead.
>
> For traditional hierarchies, this shouldn't make any functional
> difference.
>
> Signed-off-by: Tejun Heo <[email protected]>
> Cc: Johannes Weiner <[email protected]>
> Cc: Michal Hocko <[email protected]>
> Cc: Jianyu Zhan <[email protected]>

Looks good to me
Acked-by: Michal Hocko <[email protected]>

> ---
> include/linux/cgroup.h | 9 ++++++++
> kernel/cgroup.c | 59 ++++++++++++++++++++++++++++++++------------------
> mm/memcontrol.c | 4 ++--
> 3 files changed, 49 insertions(+), 23 deletions(-)
>
> diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
> index 793f70a..2dfabb3 100644
> --- a/include/linux/cgroup.h
> +++ b/include/linux/cgroup.h
> @@ -62,6 +62,12 @@ struct cgroup_subsys_state {
> /* the parent css */
> struct cgroup_subsys_state *parent;
>
> + /*
> + * Subsys-unique ID. 0 is unused and root is always 1. The
> + * matching css can be looked up using css_from_id().
> + */
> + int id;
> +
> unsigned int flags;
>
> /* percpu_ref killing and RCU release */
> @@ -655,6 +661,9 @@ struct cgroup_subsys {
> /* link to parent, protected by cgroup_lock() */
> struct cgroup_root *root;
>
> + /* idr for css->id */
> + struct idr css_idr;
> +
> /*
> * List of cftypes. Each entry is the first entry of an array
> * terminated by zero length name.
> diff --git a/kernel/cgroup.c b/kernel/cgroup.c
> index f1c98c5..a1a20e8 100644
> --- a/kernel/cgroup.c
> +++ b/kernel/cgroup.c
> @@ -100,8 +100,8 @@ static DECLARE_RWSEM(css_set_rwsem);
> #endif
>
> /*
> - * Protects cgroup_idr so that IDs can be released without grabbing
> - * cgroup_mutex.
> + * Protects cgroup_idr and css_idr so that IDs can be released without
> + * grabbing cgroup_mutex.
> */
> static DEFINE_SPINLOCK(cgroup_idr_lock);
>
> @@ -1089,12 +1089,6 @@ static void cgroup_put(struct cgroup *cgrp)
> if (WARN_ON_ONCE(cgrp->parent && !cgroup_is_dead(cgrp)))
> return;
>
> - /*
> - * XXX: cgrp->id is only used to look up css's. As cgroup and
> - * css's lifetimes will be decoupled, it should be made
> - * per-subsystem and moved to css->id so that lookups are
> - * successful until the target css is released.
> - */
> cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
> cgrp->id = -1;
>
> @@ -4104,8 +4098,11 @@ static void css_release(struct percpu_ref *ref)
> {
> struct cgroup_subsys_state *css =
> container_of(ref, struct cgroup_subsys_state, refcnt);
> + struct cgroup_subsys *ss = css->ss;
> +
> + RCU_INIT_POINTER(css->cgroup->subsys[ss->id], NULL);
> + cgroup_idr_remove(&ss->css_idr, css->id);
>
> - RCU_INIT_POINTER(css->cgroup->subsys[css->ss->id], NULL);
> call_rcu(&css->rcu_head, css_free_rcu_fn);
> }
>
> @@ -4195,9 +4192,17 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss)
> if (err)
> goto err_free_css;
>
> + err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_NOWAIT);
> + if (err < 0)
> + goto err_free_percpu_ref;
> + css->id = err;
> +
> err = cgroup_populate_dir(cgrp, 1 << ss->id);
> if (err)
> - goto err_free_percpu_ref;
> + goto err_free_id;
> +
> + /* @css is ready to be brought online now, make it visible */
> + cgroup_idr_replace(&ss->css_idr, css, css->id);
>
> err = online_css(css);
> if (err)
> @@ -4216,6 +4221,8 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss)
>
> err_clear_dir:
> cgroup_clear_dir(css->cgroup, 1 << css->ss->id);
> +err_free_id:
> + cgroup_idr_remove(&ss->css_idr, css->id);
> err_free_percpu_ref:
> percpu_ref_cancel_init(&css->refcnt);
> err_free_css:
> @@ -4642,7 +4649,7 @@ static struct kernfs_syscall_ops cgroup_kf_syscall_ops = {
> .rename = cgroup_rename,
> };
>
> -static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
> +static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
> {
> struct cgroup_subsys_state *css;
>
> @@ -4651,6 +4658,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
> mutex_lock(&cgroup_tree_mutex);
> mutex_lock(&cgroup_mutex);
>
> + idr_init(&ss->css_idr);
> INIT_LIST_HEAD(&ss->cfts);
>
> /* Create the root cgroup state for this subsystem */
> @@ -4659,6 +4667,13 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
> /* We don't handle early failures gracefully */
> BUG_ON(IS_ERR(css));
> init_and_link_css(css, ss, &cgrp_dfl_root.cgrp);
> + if (early) {
> + /* idr_alloc() can't be called safely during early init */
> + css->id = 1;
> + } else {
> + css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2, GFP_KERNEL);
> + BUG_ON(css->id < 0);
> + }
>
> /* Update the init_css_set to contain a subsys
> * pointer to this state - since the subsystem is
> @@ -4709,7 +4724,7 @@ int __init cgroup_init_early(void)
> ss->name = cgroup_subsys_name[i];
>
> if (ss->early_init)
> - cgroup_init_subsys(ss);
> + cgroup_init_subsys(ss, true);
> }
> return 0;
> }
> @@ -4741,8 +4756,16 @@ int __init cgroup_init(void)
> mutex_unlock(&cgroup_tree_mutex);
>
> for_each_subsys(ss, ssid) {
> - if (!ss->early_init)
> - cgroup_init_subsys(ss);
> + if (ss->early_init) {
> + struct cgroup_subsys_state *css =
> + init_css_set.subsys[ss->id];
> +
> + css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2,
> + GFP_KERNEL);
> + BUG_ON(css->id < 0);
> + } else {
> + cgroup_init_subsys(ss, false);
> + }
>
> list_add_tail(&init_css_set.e_cset_node[ssid],
> &cgrp_dfl_root.cgrp.e_csets[ssid]);
> @@ -5196,14 +5219,8 @@ struct cgroup_subsys_state *css_tryget_from_dir(struct dentry *dentry,
> */
> struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss)
> {
> - struct cgroup *cgrp;
> -
> WARN_ON_ONCE(!rcu_read_lock_held());
> -
> - cgrp = idr_find(&ss->root->cgroup_idr, id);
> - if (cgrp)
> - return cgroup_css(cgrp, ss);
> - return NULL;
> + return idr_find(&ss->css_idr, id);
> }
>
> #ifdef CONFIG_CGROUP_DEBUG
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 1d0b297..c3f82f6 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -527,7 +527,7 @@ static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
>
> static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
> {
> - return memcg->css.cgroup->id;
> + return memcg->css.id;
> }
>
> static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
> @@ -6401,7 +6401,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
> struct mem_cgroup *memcg = mem_cgroup_from_css(css);
> struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(css));
>
> - if (css->cgroup->id > MEM_CGROUP_ID_MAX)
> + if (css->id > MEM_CGROUP_ID_MAX)
> return -ENOSPC;
>
> if (!parent)
> --
> 1.9.0
>

--
Michal Hocko
SUSE Labs