zcache_do_preload uses ZCACHE_GFP_MASK to allocate memory that will be sleep,
but zcache_do_preload is called in zcache_put_page where IRQ is disabled
Fix it by use GFP_ATOMIC flag
Signed-off-by: Xiao Guangrong <[email protected]>
---
drivers/staging/zcache/zcache-main.c | 5 -----
1 files changed, 0 insertions(+), 5 deletions(-)
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index 03f690b..d215fb4 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -45,13 +45,8 @@
#include <linux/frontswap.h>
#endif
-#if 0
/* this is more aggressive but may cause other problems? */
#define ZCACHE_GFP_MASK (GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN)
-#else
-#define ZCACHE_GFP_MASK \
- (__GFP_FS | __GFP_NORETRY | __GFP_NOWARN | __GFP_NOMEMALLOC)
-#endif
#define MAX_POOLS_PER_CLIENT 16
--
1.7.7.6
fix:
drivers/staging/zcache/zcache-main.c: In function ‘zcache_comp_op’:
drivers/staging/zcache/zcache-main.c:112:2: warning: ‘ret’ may be used uninitial
Signed-off-by: Xiao Guangrong <[email protected]>
---
drivers/staging/zcache/zcache-main.c | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index 32fe0ba..74a3ac8 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -93,7 +93,7 @@ static inline int zcache_comp_op(enum comp_op op,
u8 *dst, unsigned int *dlen)
{
struct crypto_comp *tfm;
- int ret;
+ int ret = -1;
BUG_ON(!zcache_comp_pcpu_tfms);
tfm = *per_cpu_ptr(zcache_comp_pcpu_tfms, get_cpu());
--
1.7.7.6
In zcache_get_pool_by_id, the refcount of zcache_host is not increased, but
it is always decreased in zcache_put_pool
Signed-off-by: Xiao Guangrong <[email protected]>
---
drivers/staging/zcache/zcache-main.c | 3 ++-
1 files changed, 2 insertions(+), 1 deletions(-)
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index d215fb4..32fe0ba 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -942,8 +942,9 @@ static struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id, uint16_t poolid)
cli = &zcache_clients[cli_id];
if (cli == NULL)
goto out;
- atomic_inc(&cli->refcount);
}
+
+ atomic_inc(&cli->refcount);
if (poolid < MAX_POOLS_PER_CLIENT) {
pool = cli->tmem_pools[poolid];
if (pool != NULL)
--
1.7.7.6
zcache is enabled only if one of CONFIG_CLEANCACHE and CONFIG_FRONTSWAP is
enabled, see the Kconfig:
depends on (CLEANCACHE || FRONTSWAP) && CRYPTO=y && X86
So, we can remove the check in the source code
Signed-off-by: Xiao Guangrong <[email protected]>
---
drivers/staging/zcache/zcache-main.c | 7 ++-----
1 files changed, 2 insertions(+), 5 deletions(-)
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index 74a3ac8..82d752d 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -35,9 +35,6 @@
#include "../zsmalloc/zsmalloc.h"
-#if (!defined(CONFIG_CLEANCACHE) && !defined(CONFIG_FRONTSWAP))
-#error "zcache is useless without CONFIG_CLEANCACHE or CONFIG_FRONTSWAP"
-#endif
#ifdef CONFIG_CLEANCACHE
#include <linux/cleancache.h>
#endif
@@ -2017,7 +2014,7 @@ static int __init zcache_init(void)
goto out;
}
#endif /* CONFIG_SYSFS */
-#if defined(CONFIG_CLEANCACHE) || defined(CONFIG_FRONTSWAP)
+
if (zcache_enabled) {
unsigned int cpu;
@@ -2048,7 +2045,7 @@ static int __init zcache_init(void)
pr_err("zcache: can't create client\n");
goto out;
}
-#endif
+
#ifdef CONFIG_CLEANCACHE
if (zcache_enabled && use_cleancache) {
struct cleancache_ops old_ops;
--
1.7.7.6
These functions are called only when system is initializing, so mark __init
for them to free memory
Signed-off-by: Xiao Guangrong <[email protected]>
---
drivers/staging/zcache/zcache-main.c | 4 ++--
1 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index 82d752d..d194303 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -594,7 +594,7 @@ out:
return;
}
-static void zbud_init(void)
+static void __init zbud_init(void)
{
int i;
@@ -1974,7 +1974,7 @@ static int __init enable_zcache_compressor(char *s)
__setup("zcache=", enable_zcache_compressor);
-static int zcache_comp_init(void)
+static int __init zcache_comp_init(void)
{
int ret = 0;
--
1.7.7.6
zcache_do_preload is called in zcache_put_page where IRQ is disabled, so, need
not care preempt
Signed-off-by: Xiao Guangrong <[email protected]>
---
drivers/staging/zcache/zcache-main.c | 21 +++++++++------------
1 files changed, 9 insertions(+), 12 deletions(-)
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index c18130f..386906d 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -1026,45 +1026,43 @@ static int zcache_do_preload(struct tmem_pool *pool)
goto out;
if (unlikely(zcache_obj_cache == NULL))
goto out;
- preempt_disable();
+
+ /* IRQ has already been disabled. */
kp = &__get_cpu_var(zcache_preloads);
while (kp->nr < ARRAY_SIZE(kp->objnodes)) {
- preempt_enable_no_resched();
objnode = kmem_cache_alloc(zcache_objnode_cache,
ZCACHE_GFP_MASK);
if (unlikely(objnode == NULL)) {
zcache_failed_alloc++;
goto out;
}
- preempt_disable();
- kp = &__get_cpu_var(zcache_preloads);
- if (kp->nr < ARRAY_SIZE(kp->objnodes))
- kp->objnodes[kp->nr++] = objnode;
- else
- kmem_cache_free(zcache_objnode_cache, objnode);
+
+ kp->objnodes[kp->nr++] = objnode;
}
- preempt_enable_no_resched();
+
obj = kmem_cache_alloc(zcache_obj_cache, ZCACHE_GFP_MASK);
if (unlikely(obj == NULL)) {
zcache_failed_alloc++;
goto out;
}
+
page = (void *)__get_free_page(ZCACHE_GFP_MASK);
if (unlikely(page == NULL)) {
zcache_failed_get_free_pages++;
kmem_cache_free(zcache_obj_cache, obj);
goto out;
}
- preempt_disable();
- kp = &__get_cpu_var(zcache_preloads);
+
if (kp->obj == NULL)
kp->obj = obj;
else
kmem_cache_free(zcache_obj_cache, obj);
+
if (kp->page == NULL)
kp->page = page;
else
free_page((unsigned long)page);
+
ret = 0;
out:
return ret;
@@ -1575,7 +1573,6 @@ static int zcache_put_page(int cli_id, int pool_id, struct tmem_oid *oidp,
zcache_failed_pers_puts++;
}
zcache_put_pool(pool);
- preempt_enable_no_resched();
} else {
zcache_put_to_flush++;
if (atomic_read(&pool->obj_count) > 0)
--
1.7.7.6
Need not set global parameters to 0
Signed-off-by: Xiao Guangrong <[email protected]>
---
drivers/staging/zcache/zcache-main.c | 6 ++----
1 files changed, 2 insertions(+), 4 deletions(-)
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index d194303..c18130f 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -599,11 +599,9 @@ static void __init zbud_init(void)
int i;
INIT_LIST_HEAD(&zbud_buddied_list);
- zcache_zbud_buddied_count = 0;
- for (i = 0; i < NCHUNKS; i++) {
+
+ for (i = 0; i < NCHUNKS; i++)
INIT_LIST_HEAD(&zbud_unbuddied[i].list);
- zbud_unbuddied[i].count = 0;
- }
}
#ifdef CONFIG_SYSFS
--
1.7.7.6
Cleanup the code for zcache_do_preload and zcache_put_page
Signed-off-by: Xiao Guangrong <[email protected]>
---
drivers/staging/zcache/zcache-main.c | 37 ++++++++++++++-------------------
1 files changed, 16 insertions(+), 21 deletions(-)
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index 386906d..2ee55c4 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -1040,29 +1040,24 @@ static int zcache_do_preload(struct tmem_pool *pool)
kp->objnodes[kp->nr++] = objnode;
}
- obj = kmem_cache_alloc(zcache_obj_cache, ZCACHE_GFP_MASK);
- if (unlikely(obj == NULL)) {
- zcache_failed_alloc++;
- goto out;
+ if (!kp->obj) {
+ obj = kmem_cache_alloc(zcache_obj_cache, ZCACHE_GFP_MASK);
+ if (unlikely(obj == NULL)) {
+ zcache_failed_alloc++;
+ goto out;
+ }
+ kp->obj = obj;
}
- page = (void *)__get_free_page(ZCACHE_GFP_MASK);
- if (unlikely(page == NULL)) {
- zcache_failed_get_free_pages++;
- kmem_cache_free(zcache_obj_cache, obj);
- goto out;
+ if (!kp->page) {
+ page = (void *)__get_free_page(ZCACHE_GFP_MASK);
+ if (unlikely(page == NULL)) {
+ zcache_failed_get_free_pages++;
+ goto out;
+ }
+ kp->page = page;
}
- if (kp->obj == NULL)
- kp->obj = obj;
- else
- kmem_cache_free(zcache_obj_cache, obj);
-
- if (kp->page == NULL)
- kp->page = page;
- else
- free_page((unsigned long)page);
-
ret = 0;
out:
return ret;
@@ -1572,14 +1567,14 @@ static int zcache_put_page(int cli_id, int pool_id, struct tmem_oid *oidp,
else
zcache_failed_pers_puts++;
}
- zcache_put_pool(pool);
} else {
zcache_put_to_flush++;
if (atomic_read(&pool->obj_count) > 0)
/* the put fails whether the flush succeeds or not */
(void)tmem_flush_page(pool, oidp, index);
- zcache_put_pool(pool);
}
+
+ zcache_put_pool(pool);
out:
return ret;
}
--
1.7.7.6
Introduce get_zcache_client to remove the common code
Signed-off-by: Xiao Guangrong <[email protected]>
---
drivers/staging/zcache/zcache-main.c | 46 +++++++++++++++++-----------------
1 files changed, 23 insertions(+), 23 deletions(-)
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index 2ee55c4..542f181 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -70,6 +70,17 @@ static inline uint16_t get_client_id_from_client(struct zcache_client *cli)
return cli - &zcache_clients[0];
}
+static struct zcache_client *get_zcache_client(uint16_t cli_id)
+{
+ if (cli_id == LOCAL_CLIENT)
+ return &zcache_host;
+
+ if ((unsigned int)cli_id < MAX_CLIENTS)
+ return &zcache_clients[cli_id];
+
+ return NULL;
+}
+
static inline bool is_local_client(struct zcache_client *cli)
{
return cli == &zcache_host;
@@ -929,15 +940,9 @@ static struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id, uint16_t poolid)
struct tmem_pool *pool = NULL;
struct zcache_client *cli = NULL;
- if (cli_id == LOCAL_CLIENT)
- cli = &zcache_host;
- else {
- if (cli_id >= MAX_CLIENTS)
- goto out;
- cli = &zcache_clients[cli_id];
- if (cli == NULL)
- goto out;
- }
+ cli = get_zcache_client(cli_id);
+ if (!cli)
+ goto out;
atomic_inc(&cli->refcount);
if (poolid < MAX_POOLS_PER_CLIENT) {
@@ -962,13 +967,11 @@ static void zcache_put_pool(struct tmem_pool *pool)
int zcache_new_client(uint16_t cli_id)
{
- struct zcache_client *cli = NULL;
+ struct zcache_client *cli;
int ret = -1;
- if (cli_id == LOCAL_CLIENT)
- cli = &zcache_host;
- else if ((unsigned int)cli_id < MAX_CLIENTS)
- cli = &zcache_clients[cli_id];
+ cli = get_zcache_client(cli_id);
+
if (cli == NULL)
goto out;
if (cli->allocated)
@@ -1644,17 +1647,16 @@ static int zcache_flush_object(int cli_id, int pool_id,
static int zcache_destroy_pool(int cli_id, int pool_id)
{
struct tmem_pool *pool = NULL;
- struct zcache_client *cli = NULL;
+ struct zcache_client *cli;
int ret = -1;
if (pool_id < 0)
goto out;
- if (cli_id == LOCAL_CLIENT)
- cli = &zcache_host;
- else if ((unsigned int)cli_id < MAX_CLIENTS)
- cli = &zcache_clients[cli_id];
+
+ cli = get_zcache_client(cli_id);
if (cli == NULL)
goto out;
+
atomic_inc(&cli->refcount);
pool = cli->tmem_pools[pool_id];
if (pool == NULL)
@@ -1680,12 +1682,10 @@ static int zcache_new_pool(uint16_t cli_id, uint32_t flags)
struct tmem_pool *pool;
struct zcache_client *cli = NULL;
- if (cli_id == LOCAL_CLIENT)
- cli = &zcache_host;
- else if ((unsigned int)cli_id < MAX_CLIENTS)
- cli = &zcache_clients[cli_id];
+ cli = get_zcache_client(cli_id);
if (cli == NULL)
goto out;
+
atomic_inc(&cli->refcount);
pool = kmalloc(sizeof(struct tmem_pool), GFP_ATOMIC);
if (pool == NULL) {
--
1.7.7.6
tmem_obj_find and insertion tmem-obj have the some logic, we can integrate
the code
Signed-off-by: Xiao Guangrong <[email protected]>
---
drivers/staging/zcache/tmem.c | 58 +++++++++++++++++++++-------------------
1 files changed, 30 insertions(+), 28 deletions(-)
diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
index 1ca66ea..cdf2d3c 100644
--- a/drivers/staging/zcache/tmem.c
+++ b/drivers/staging/zcache/tmem.c
@@ -72,33 +72,48 @@ void tmem_register_pamops(struct tmem_pamops *m)
* the hashbucket lock must be held.
*/
-/* searches for object==oid in pool, returns locked object if found */
-static struct tmem_obj *tmem_obj_find(struct tmem_hashbucket *hb,
- struct tmem_oid *oidp)
+static struct tmem_obj
+*__tmem_obj_find(struct tmem_hashbucket*hb, struct tmem_oid *oidp,
+ struct rb_node *parent, struct rb_node **link)
{
- struct rb_node *rbnode;
+ struct rb_node **rbnode;
struct tmem_obj *obj;
- rbnode = hb->obj_rb_root.rb_node;
- while (rbnode) {
- BUG_ON(RB_EMPTY_NODE(rbnode));
- obj = rb_entry(rbnode, struct tmem_obj, rb_tree_node);
+ rbnode = &hb->obj_rb_root.rb_node;
+ while (*rbnode) {
+ BUG_ON(RB_EMPTY_NODE(*rbnode));
+ obj = rb_entry(*rbnode, struct tmem_obj,
+ rb_tree_node);
switch (tmem_oid_compare(oidp, &obj->oid)) {
case 0: /* equal */
goto out;
case -1:
- rbnode = rbnode->rb_left;
+ rbnode = &(*rbnode)->rb_left;
break;
case 1:
- rbnode = rbnode->rb_right;
+ rbnode = &(*rbnode)->rb_right;
break;
}
}
+
+ if (parent)
+ parent = &obj->rb_tree_node;
+ if (link)
+ link = rbnode;
+
obj = NULL;
out:
return obj;
}
+
+/* searches for object==oid in pool, returns locked object if found */
+static struct tmem_obj *tmem_obj_find(struct tmem_hashbucket *hb,
+ struct tmem_oid *oidp)
+{
+ return __tmem_obj_find(hb, oidp, NULL, NULL);
+}
+
static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *);
/* free an object that has no more pampds in it */
@@ -131,8 +146,7 @@ static void tmem_obj_init(struct tmem_obj *obj, struct tmem_hashbucket *hb,
struct tmem_oid *oidp)
{
struct rb_root *root = &hb->obj_rb_root;
- struct rb_node **new = &(root->rb_node), *parent = NULL;
- struct tmem_obj *this;
+ struct rb_node **new = NULL, *parent = NULL;
BUG_ON(pool == NULL);
atomic_inc(&pool->obj_count);
@@ -144,22 +158,10 @@ static void tmem_obj_init(struct tmem_obj *obj, struct tmem_hashbucket *hb,
obj->pampd_count = 0;
(*tmem_pamops.new_obj)(obj);
SET_SENTINEL(obj, OBJ);
- while (*new) {
- BUG_ON(RB_EMPTY_NODE(*new));
- this = rb_entry(*new, struct tmem_obj, rb_tree_node);
- parent = *new;
- switch (tmem_oid_compare(oidp, &this->oid)) {
- case 0:
- BUG(); /* already present; should never happen! */
- break;
- case -1:
- new = &(*new)->rb_left;
- break;
- case 1:
- new = &(*new)->rb_right;
- break;
- }
- }
+
+ if (__tmem_obj_find(hb, oidp, parent, new))
+ BUG();
+
rb_link_node(&obj->rb_tree_node, parent, new);
rb_insert_color(&obj->rb_tree_node, root);
}
--
1.7.7.6
On 06/19/2012 03:32 AM, Xiao Guangrong wrote:
> zcache_do_preload uses ZCACHE_GFP_MASK to allocate memory that will be sleep,
> but zcache_do_preload is called in zcache_put_page where IRQ is disabled
>
> Fix it by use GFP_ATOMIC flag
Did you get a might_sleep warning on this? I haven't seen this being an
issue.
GFP_ATOMIC only modifies the existing mask to allow allocation use the
emergency pool. It is __GFP_WAIT not being set that prevents sleep. We
don't want to use the emergency pool since we make large, long lived
allocations with this mask.
--
Seth
On 06/19/2012 03:33 AM, Xiao Guangrong wrote:
> In zcache_get_pool_by_id, the refcount of zcache_host is not increased, but
> it is always decreased in zcache_put_pool
>
> Signed-off-by: Xiao Guangrong <[email protected]>
Acked-by: Seth Jennings <[email protected]>
--
Seth
On 06/19/2012 03:33 AM, Xiao Guangrong wrote:
> fix:
>
> drivers/staging/zcache/zcache-main.c: In function ‘zcache_comp_op’:
> drivers/staging/zcache/zcache-main.c:112:2: warning: ‘ret’ may be used uninitial
>
> Signed-off-by: Xiao Guangrong <[email protected]>
> ---
> drivers/staging/zcache/zcache-main.c | 2 +-
> 1 files changed, 1 insertions(+), 1 deletions(-)
>
> diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
> index 32fe0ba..74a3ac8 100644
> --- a/drivers/staging/zcache/zcache-main.c
> +++ b/drivers/staging/zcache/zcache-main.c
> @@ -93,7 +93,7 @@ static inline int zcache_comp_op(enum comp_op op,
> u8 *dst, unsigned int *dlen)
> {
> struct crypto_comp *tfm;
> - int ret;
> + int ret = -1;
>
> BUG_ON(!zcache_comp_pcpu_tfms);
> tfm = *per_cpu_ptr(zcache_comp_pcpu_tfms, get_cpu());
What about adding a default case in the switch like this?
default:
ret = -EINVAL;
That way we don't assign ret twice.
--
Seth
On 06/19/2012 03:34 AM, Xiao Guangrong wrote:
> zcache is enabled only if one of CONFIG_CLEANCACHE and CONFIG_FRONTSWAP is
> enabled, see the Kconfig:
> depends on (CLEANCACHE || FRONTSWAP) && CRYPTO=y && X86
> So, we can remove the check in the source code
>
> Signed-off-by: Xiao Guangrong <[email protected]>
Acked-by: Seth Jennings <[email protected]>
--
Seth
On 06/19/2012 03:35 AM, Xiao Guangrong wrote:
> These functions are called only when system is initializing, so mark __init
> for them to free memory
>
> Signed-off-by: Xiao Guangrong <[email protected]>
For patches 05-09:
Acked-by: Seth Jennings <[email protected]>
--
Seth
This patch causes a crash, details below.
On 06/19/2012 03:37 AM, Xiao Guangrong wrote:
> tmem_obj_find and insertion tmem-obj have the some logic, we can integrate
> the code
>
> Signed-off-by: Xiao Guangrong <[email protected]>
> ---
> drivers/staging/zcache/tmem.c | 58 +++++++++++++++++++++-------------------
> 1 files changed, 30 insertions(+), 28 deletions(-)
>
> diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
> index 1ca66ea..cdf2d3c 100644
> --- a/drivers/staging/zcache/tmem.c
> +++ b/drivers/staging/zcache/tmem.c
> @@ -72,33 +72,48 @@ void tmem_register_pamops(struct tmem_pamops *m)
> * the hashbucket lock must be held.
> */
>
> -/* searches for object==oid in pool, returns locked object if found */
> -static struct tmem_obj *tmem_obj_find(struct tmem_hashbucket *hb,
> - struct tmem_oid *oidp)
> +static struct tmem_obj
> +*__tmem_obj_find(struct tmem_hashbucket*hb, struct tmem_oid *oidp,
> + struct rb_node *parent, struct rb_node **link)
> {
> - struct rb_node *rbnode;
> + struct rb_node **rbnode;
> struct tmem_obj *obj;
>
> - rbnode = hb->obj_rb_root.rb_node;
> - while (rbnode) {
> - BUG_ON(RB_EMPTY_NODE(rbnode));
> - obj = rb_entry(rbnode, struct tmem_obj, rb_tree_node);
> + rbnode = &hb->obj_rb_root.rb_node;
> + while (*rbnode) {
> + BUG_ON(RB_EMPTY_NODE(*rbnode));
> + obj = rb_entry(*rbnode, struct tmem_obj,
> + rb_tree_node);
> switch (tmem_oid_compare(oidp, &obj->oid)) {
> case 0: /* equal */
> goto out;
> case -1:
> - rbnode = rbnode->rb_left;
> + rbnode = &(*rbnode)->rb_left;
> break;
> case 1:
> - rbnode = rbnode->rb_right;
> + rbnode = &(*rbnode)->rb_right;
> break;
> }
> }
> +
> + if (parent)
> + parent = &obj->rb_tree_node;
> + if (link)
> + link = rbnode;
> +
> obj = NULL;
> out:
> return obj;
> }
>
> +
> +/* searches for object==oid in pool, returns locked object if found */
> +static struct tmem_obj *tmem_obj_find(struct tmem_hashbucket *hb,
> + struct tmem_oid *oidp)
> +{
> + return __tmem_obj_find(hb, oidp, NULL, NULL);
> +}
> +
> static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *);
>
> /* free an object that has no more pampds in it */
> @@ -131,8 +146,7 @@ static void tmem_obj_init(struct tmem_obj *obj, struct tmem_hashbucket *hb,
> struct tmem_oid *oidp)
> {
> struct rb_root *root = &hb->obj_rb_root;
> - struct rb_node **new = &(root->rb_node), *parent = NULL;
> - struct tmem_obj *this;
> + struct rb_node **new = NULL, *parent = NULL;
>
> BUG_ON(pool == NULL);
> atomic_inc(&pool->obj_count);
> @@ -144,22 +158,10 @@ static void tmem_obj_init(struct tmem_obj *obj, struct tmem_hashbucket *hb,
> obj->pampd_count = 0;
> (*tmem_pamops.new_obj)(obj);
> SET_SENTINEL(obj, OBJ);
> - while (*new) {
> - BUG_ON(RB_EMPTY_NODE(*new));
> - this = rb_entry(*new, struct tmem_obj, rb_tree_node);
> - parent = *new;
> - switch (tmem_oid_compare(oidp, &this->oid)) {
> - case 0:
> - BUG(); /* already present; should never happen! */
> - break;
> - case -1:
> - new = &(*new)->rb_left;
> - break;
> - case 1:
> - new = &(*new)->rb_right;
> - break;
> - }
> - }
> +
> + if (__tmem_obj_find(hb, oidp, parent, new))
> + BUG();
> +
> rb_link_node(&obj->rb_tree_node, parent, new);
Getting a NULL deref crash here because new is NULL
[ 56.422031] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 56.423008] IP: [<ffffffff812b8ba4>] tmem_put+0x3a4/0x3d0
static inline void rb_link_node(struct rb_node * node, struct rb_node * parent,
struct rb_node ** rb_link)
{
...
*rb_link = node;
ffffffff812b8ba4: 48 89 38 mov %rdi,(%rax) <--- here
ffffffff812b8ba7: e8 00 00 00 00 callq ffffffff812b8bac <tmem_put+0x3ac>
--
Seth
> From: Seth Jennings [mailto:[email protected]]
> Sent: Tuesday, June 19, 2012 8:29 AM
> To: Xiao Guangrong
> Cc: Andrew Morton; Dan Magenheimer; LKML; [email protected]
> Subject: Re: [PATCH 02/10] zcache: fix refcount leak
>
> On 06/19/2012 03:33 AM, Xiao Guangrong wrote:
>
> > In zcache_get_pool_by_id, the refcount of zcache_host is not increased, but
> > it is always decreased in zcache_put_pool
> >
> > Signed-off-by: Xiao Guangrong <[email protected]>
>
> Acked-by: Seth Jennings <[email protected]>
(Nitin Gupta and Konrad Wilk cc'ed to call their attention
to this patch sequence...)
My preference would be to fix it the opposite way, by
checking and ignoring zcache_host in zcache_put_pool.
The ref-counting is to ensure that a client isn't
accidentally destroyed while in use (for multiple-client
users such as ramster and kvm) and since zcache_host is a static
struct, it should never be deleted so need not be ref-counted.
Dan
On 06/19/2012 02:49 PM, Dan Magenheimer wrote:
> My preference would be to fix it the opposite way, by
> checking and ignoring zcache_host in zcache_put_pool.
> The ref-counting is to ensure that a client isn't
> accidentally destroyed while in use (for multiple-client
> users such as ramster and kvm) and since zcache_host is a static
> struct, it should never be deleted so need not be ref-counted.
If we do that, we'll need to comment it. If we don't, it won't be
obvious why we are refcounting every zcache client except one. It'll
look like a bug.
--
Seth
On 06/19/2012 10:26 PM, Seth Jennings wrote:
> Did you get a might_sleep warning on this? I haven't seen this being an
> issue.
>
No, i did not, i get it just from code review.
> GFP_ATOMIC only modifies the existing mask to allow allocation use the
> emergency pool. It is __GFP_WAIT not being set that prevents sleep. We
> don't want to use the emergency pool since we make large, long lived
> allocations with this mask.
>
Ah, yes, i thought only GFP_ATOMIC can prevent sleep, thank you very much
for pointing it out.
On 06/20/2012 12:49 AM, Seth Jennings wrote:
> This patch causes a crash, details below.
Sorry, i forgot to commit the new changes to my local git tree, and
posted a old patch to the mail list. :(
On 06/20/2012 04:06 AM, Seth Jennings wrote:
> On 06/19/2012 02:49 PM, Dan Magenheimer wrote:
>
>> My preference would be to fix it the opposite way, by
>> checking and ignoring zcache_host in zcache_put_pool.
>> The ref-counting is to ensure that a client isn't
>> accidentally destroyed while in use (for multiple-client
>> users such as ramster and kvm) and since zcache_host is a static
>> struct, it should never be deleted so need not be ref-counted.
>
>
> If we do that, we'll need to comment it. If we don't, it won't be
> obvious why we are refcounting every zcache client except one. It'll
> look like a bug.
Okay, i will fix it like Dan's way and comment it.
On 06/19/2012 10:30 PM, Seth Jennings wrote:
> On 06/19/2012 03:33 AM, Xiao Guangrong wrote:
>
>> fix:
>>
>> drivers/staging/zcache/zcache-main.c: In function ‘zcache_comp_op’:
>> drivers/staging/zcache/zcache-main.c:112:2: warning: ‘ret’ may be used uninitial
>>
>> Signed-off-by: Xiao Guangrong <[email protected]>
>> ---
>> drivers/staging/zcache/zcache-main.c | 2 +-
>> 1 files changed, 1 insertions(+), 1 deletions(-)
>>
>> diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
>> index 32fe0ba..74a3ac8 100644
>> --- a/drivers/staging/zcache/zcache-main.c
>> +++ b/drivers/staging/zcache/zcache-main.c
>> @@ -93,7 +93,7 @@ static inline int zcache_comp_op(enum comp_op op,
>> u8 *dst, unsigned int *dlen)
>> {
>> struct crypto_comp *tfm;
>> - int ret;
>> + int ret = -1;
>>
>> BUG_ON(!zcache_comp_pcpu_tfms);
>> tfm = *per_cpu_ptr(zcache_comp_pcpu_tfms, get_cpu());
>
>
> What about adding a default case in the switch like this?
>
> default:
> ret = -EINVAL;
>
> That way we don't assign ret twice.
Okay, will do it in the next version. Thanks for your review, Seth!
On 06/20/2012 10:54 AM, Xiao Guangrong wrote:
> On 06/20/2012 04:06 AM, Seth Jennings wrote:
>
>> On 06/19/2012 02:49 PM, Dan Magenheimer wrote:
>>
>>> My preference would be to fix it the opposite way, by
>>> checking and ignoring zcache_host in zcache_put_pool.
>>> The ref-counting is to ensure that a client isn't
>>> accidentally destroyed while in use (for multiple-client
>>> users such as ramster and kvm) and since zcache_host is a static
>>> struct, it should never be deleted so need not be ref-counted.
>>
>>
>> If we do that, we'll need to comment it. If we don't, it won't be
>> obvious why we are refcounting every zcache client except one. It'll
>> look like a bug.
>
>
> Okay, i will fix it like Dan's way and comment it.
Hmm...But i notice that zcache_host is the same as other clients, all
of them are static struct:
| static struct zcache_client zcache_host;
| static struct zcache_client zcache_clients[MAX_CLIENTS];
And all of them are not destroyed.
> From: Xiao Guangrong [mailto:[email protected]]
> Subject: Re: [PATCH 02/10] zcache: fix refcount leak
>
> On 06/20/2012 10:54 AM, Xiao Guangrong wrote:
>
> > On 06/20/2012 04:06 AM, Seth Jennings wrote:
> >
> >> On 06/19/2012 02:49 PM, Dan Magenheimer wrote:
> >>
> >>> My preference would be to fix it the opposite way, by
> >>> checking and ignoring zcache_host in zcache_put_pool.
> >>> The ref-counting is to ensure that a client isn't
> >>> accidentally destroyed while in use (for multiple-client
> >>> users such as ramster and kvm) and since zcache_host is a static
> >>> struct, it should never be deleted so need not be ref-counted.
> >>
> >>
> >> If we do that, we'll need to comment it. If we don't, it won't be
> >> obvious why we are refcounting every zcache client except one. It'll
> >> look like a bug.
> >
> >
> > Okay, i will fix it like Dan's way and comment it.
>
> Hmm...But i notice that zcache_host is the same as other clients, all
> of them are static struct:
>
> | static struct zcache_client zcache_host;
> | static struct zcache_client zcache_clients[MAX_CLIENTS];
>
> And all of them are not destroyed.
Yes, the code currently in zcache was a first step towards
supporting multiple clients. Ramster goes one step further
and kvm will require even a tiny bit more work.
FYI, I'm working on a unification version of zcache that can support
all of these cleanly as well as better support for eviction
that will make standalone zcache more suitable for promotion from
staging and enterprise-ready. Due to various summer commitments,
it will probably be a few weeks before it is ready for posting.
Dan
On 06/21/2012 06:25 AM, Dan Magenheimer wrote:
>> From: Xiao Guangrong [mailto:[email protected]]
>> Subject: Re: [PATCH 02/10] zcache: fix refcount leak
>>
>> On 06/20/2012 10:54 AM, Xiao Guangrong wrote:
>>
>>> On 06/20/2012 04:06 AM, Seth Jennings wrote:
>>>
>>>> On 06/19/2012 02:49 PM, Dan Magenheimer wrote:
>>>>
>>>>> My preference would be to fix it the opposite way, by
>>>>> checking and ignoring zcache_host in zcache_put_pool.
>>>>> The ref-counting is to ensure that a client isn't
>>>>> accidentally destroyed while in use (for multiple-client
>>>>> users such as ramster and kvm) and since zcache_host is a static
>>>>> struct, it should never be deleted so need not be ref-counted.
>>>>
>>>>
>>>> If we do that, we'll need to comment it. If we don't, it won't be
>>>> obvious why we are refcounting every zcache client except one. It'll
>>>> look like a bug.
>>>
>>>
>>> Okay, i will fix it like Dan's way and comment it.
>>
>> Hmm...But i notice that zcache_host is the same as other clients, all
>> of them are static struct:
>>
>> | static struct zcache_client zcache_host;
>> | static struct zcache_client zcache_clients[MAX_CLIENTS];
>>
>> And all of them are not destroyed.
>
> Yes, the code currently in zcache was a first step towards
> supporting multiple clients. Ramster goes one step further
> and kvm will require even a tiny bit more work.
>
So, do you mind we increase the refcount for all clients (zcache host and
other clients) first? Like my origin patch?
> FYI, I'm working on a unification version of zcache that can support
> all of these cleanly as well as better support for eviction
> that will make standalone zcache more suitable for promotion from
> staging and enterprise-ready. Due to various summer commitments,
> it will probably be a few weeks before it is ready for posting.
Great work, look forward to the progress! :)
I just noticed you sent this patchset to Andrew, but the
staging tree is maintained by Greg. You're going to want to
send these patches to him.
Greg Kroah-Hartman <[email protected]>
--
Seth
On Thu, Jun 21, 2012 at 01:51:30PM -0500, Seth Jennings wrote:
> I just noticed you sent this patchset to Andrew, but the
> staging tree is maintained by Greg. You're going to want to
> send these patches to him.
>
> Greg Kroah-Hartman <[email protected]>
After this series is redone, right? As it is, this submission didn't
look ok, so I'm hoping a second round is forthcoming...
thanks,
greg k-h
On 06/22/2012 10:00 PM, Greg Kroah-Hartman wrote:
> On Thu, Jun 21, 2012 at 01:51:30PM -0500, Seth Jennings wrote:
>> I just noticed you sent this patchset to Andrew, but the
>> staging tree is maintained by Greg. You're going to want to
>> send these patches to him.
>>
>> Greg Kroah-Hartman <[email protected]>
>
> After this series is redone, right? As it is, this submission didn't
> look ok, so I'm hoping a second round is forthcoming...
Yes. That is the cleanest way since there are dependencies
among the patches. You could pull 04-08 and be ok, but you
might just prefer a repost.
--
Seth
On Mon, Jun 25, 2012 at 08:48:13AM -0500, Seth Jennings wrote:
> On 06/22/2012 10:00 PM, Greg Kroah-Hartman wrote:
> > On Thu, Jun 21, 2012 at 01:51:30PM -0500, Seth Jennings wrote:
> >> I just noticed you sent this patchset to Andrew, but the
> >> staging tree is maintained by Greg. You're going to want to
> >> send these patches to him.
> >>
> >> Greg Kroah-Hartman <[email protected]>
> >
> > After this series is redone, right? As it is, this submission didn't
> > look ok, so I'm hoping a second round is forthcoming...
>
> Yes. That is the cleanest way since there are dependencies
> among the patches. You could pull 04-08 and be ok, but you
> might just prefer a repost.
I do prefer a repost, thanks.
greg k-h
On 06/25/2012 10:11 PM, Greg Kroah-Hartman wrote:
> On Mon, Jun 25, 2012 at 08:48:13AM -0500, Seth Jennings wrote:
>> On 06/22/2012 10:00 PM, Greg Kroah-Hartman wrote:
>>> On Thu, Jun 21, 2012 at 01:51:30PM -0500, Seth Jennings wrote:
>>>> I just noticed you sent this patchset to Andrew, but the
>>>> staging tree is maintained by Greg. You're going to want to
>>>> send these patches to him.
>>>>
>>>> Greg Kroah-Hartman <[email protected]>
>>>
>>> After this series is redone, right? As it is, this submission didn't
>>> look ok, so I'm hoping a second round is forthcoming...
>>
>> Yes. That is the cleanest way since there are dependencies
>> among the patches. You could pull 04-08 and be ok, but you
>> might just prefer a repost.
>
> I do prefer a repost, thanks.
Sorry for the delay reply since i was on my vacation. I will
post the v2 soon. Thank you all!