2013-07-04 00:33:38

by Wanpeng Li

[permalink] [raw]
Subject: [PATCH v3 1/5] mm/slab: Fix drain freelist excessively

Changelog:
v1 -> v2:
* Fix the callers that pass # of objects. Make sure they pass # of slabs.
v2 -> v3:
* introduce helper function slabs_tofree

The drain_freelist is called to drain slabs_free lists for cache reap,
cache shrink, memory hotplug callback etc. The tofree parameter should
be the number of slab to free instead of the number of slab objects to
free.

This patch fix the callers that pass # of objects. Make sure they pass #
of slabs.

Signed-off-by: Wanpeng Li <[email protected]>
---
mm/slab.c | 12 +++++++++---
1 file changed, 9 insertions(+), 3 deletions(-)

diff --git a/mm/slab.c b/mm/slab.c
index e6122b2..3002771 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1180,6 +1180,12 @@ static int init_cache_node_node(int node)
return 0;
}

+static inline int slabs_tofree(struct kmem_cache *cachep,
+ struct kmem_cache_node *n)
+{
+ return (n->free_objects + cachep->num - 1) / cachep->num;
+}
+
static void cpuup_canceled(long cpu)
{
struct kmem_cache *cachep;
@@ -1241,7 +1247,7 @@ free_array_cache:
n = cachep->node[node];
if (!n)
continue;
- drain_freelist(cachep, n, n->free_objects);
+ drain_freelist(cachep, n, slabs_tofree(cachep, n));
}
}

@@ -1408,7 +1414,7 @@ static int __meminit drain_cache_node_node(int node)
if (!n)
continue;

- drain_freelist(cachep, n, n->free_objects);
+ drain_freelist(cachep, n, slabs_tofree(cachep, n));

if (!list_empty(&n->slabs_full) ||
!list_empty(&n->slabs_partial)) {
@@ -2532,7 +2538,7 @@ static int __cache_shrink(struct kmem_cache *cachep)
if (!n)
continue;

- drain_freelist(cachep, n, n->free_objects);
+ drain_freelist(cachep, n, slabs_tofree(cachep, n));

ret += !list_empty(&n->slabs_full) ||
!list_empty(&n->slabs_partial);
--
1.8.1.2


2013-07-04 00:33:46

by Wanpeng Li

[permalink] [raw]
Subject: [PATCH v3 2/5] mm/slab: Sharing s_next and s_stop between slab and slub

This patch shares s_next and s_stop between slab and slub.

Acked-by: Christoph Lameter <[email protected]>
Signed-off-by: Wanpeng Li <[email protected]>
---
mm/slab.c | 10 ----------
mm/slab.h | 3 +++
mm/slab_common.c | 4 ++--
3 files changed, 5 insertions(+), 12 deletions(-)

diff --git a/mm/slab.c b/mm/slab.c
index 3002771..59c78b1 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4436,16 +4436,6 @@ static int leaks_show(struct seq_file *m, void *p)
return 0;
}

-static void *s_next(struct seq_file *m, void *p, loff_t *pos)
-{
- return seq_list_next(p, &slab_caches, pos);
-}
-
-static void s_stop(struct seq_file *m, void *p)
-{
- mutex_unlock(&slab_mutex);
-}
-
static const struct seq_operations slabstats_op = {
.start = leaks_start,
.next = s_next,
diff --git a/mm/slab.h b/mm/slab.h
index f96b49e..95c8860 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -271,3 +271,6 @@ struct kmem_cache_node {
#endif

};
+
+void *s_next(struct seq_file *m, void *p, loff_t *pos);
+void s_stop(struct seq_file *m, void *p);
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 2d41450..d161b81 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -531,12 +531,12 @@ static void *s_start(struct seq_file *m, loff_t *pos)
return seq_list_start(&slab_caches, *pos);
}

-static void *s_next(struct seq_file *m, void *p, loff_t *pos)
+void *s_next(struct seq_file *m, void *p, loff_t *pos)
{
return seq_list_next(p, &slab_caches, pos);
}

-static void s_stop(struct seq_file *m, void *p)
+void s_stop(struct seq_file *m, void *p)
{
mutex_unlock(&slab_mutex);
}
--
1.8.1.2

2013-07-04 00:33:59

by Wanpeng Li

[permalink] [raw]
Subject: [PATCH v3 5/5] mm/slub: Use node_nr_slabs and node_nr_objs in get_slabinfo

Use existing interface node_nr_slabs and node_nr_objs to get
nr_slabs and nr_objs.

Acked-by: Christoph Lameter <[email protected]>
Signed-off-by: Wanpeng Li <[email protected]>
---
mm/slub.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index 84b84f4..d9135a8 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5280,8 +5280,8 @@ void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
if (!n)
continue;

- nr_slabs += atomic_long_read(&n->nr_slabs);
- nr_objs += atomic_long_read(&n->total_objects);
+ nr_slabs += node_nr_slabs(n);
+ nr_objs += node_nr_objs(n);
nr_free += count_partial(n, count_free);
}

--
1.8.1.2

2013-07-04 00:33:44

by Wanpeng Li

[permalink] [raw]
Subject: [PATCH v3 3/5] mm/slab: Fix /proc/slabinfo unwriteable for slab

Slab have some tunables like limit, batchcount, and sharedfactor can be
tuned through function slabinfo_write. Commit (b7454ad3: mm/sl[au]b: Move
slabinfo processing to slab_common.c) uncorrectly change /proc/slabinfo
unwriteable for slab, this patch fix it by revert to original mode.

Acked-by: Christoph Lameter <[email protected]>
Signed-off-by: Wanpeng Li <[email protected]>
---
mm/slab_common.c | 10 +++++++++-
1 file changed, 9 insertions(+), 1 deletion(-)

diff --git a/mm/slab_common.c b/mm/slab_common.c
index d161b81..6a2e530 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -497,6 +497,13 @@ void __init create_kmalloc_caches(unsigned long flags)


#ifdef CONFIG_SLABINFO
+
+#ifdef CONFIG_SLAB
+#define SLABINFO_RIGHTS (S_IWUSR | S_IRUSR)
+#else
+#define SLABINFO_RIGHTS S_IRUSR
+#endif
+
void print_slabinfo_header(struct seq_file *m)
{
/*
@@ -633,7 +640,8 @@ static const struct file_operations proc_slabinfo_operations = {

static int __init slab_proc_init(void)
{
- proc_create("slabinfo", S_IRUSR, NULL, &proc_slabinfo_operations);
+ proc_create("slabinfo", SLABINFO_RIGHTS, NULL,
+ &proc_slabinfo_operations);
return 0;
}
module_init(slab_proc_init);
--
1.8.1.2

2013-07-04 00:34:32

by Wanpeng Li

[permalink] [raw]
Subject: [PATCH v3 4/5] mm/slub: Drop unnecessary nr_partials

This patch remove unused nr_partials variable.

Acked-by: Christoph Lameter <[email protected]>
Signed-off-by: Wanpeng Li <[email protected]>
---
mm/slub.c | 2 --
1 file changed, 2 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index 4649ff0..84b84f4 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5269,7 +5269,6 @@ __initcall(slab_sysfs_init);
#ifdef CONFIG_SLABINFO
void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
{
- unsigned long nr_partials = 0;
unsigned long nr_slabs = 0;
unsigned long nr_objs = 0;
unsigned long nr_free = 0;
@@ -5281,7 +5280,6 @@ void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
if (!n)
continue;

- nr_partials += n->nr_partial;
nr_slabs += atomic_long_read(&n->nr_slabs);
nr_objs += atomic_long_read(&n->total_objects);
nr_free += count_partial(n, count_free);
--
1.8.1.2

Subject: Re: [PATCH v3 1/5] mm/slab: Fix drain freelist excessively

On Thu, 4 Jul 2013, Wanpeng Li wrote:

> This patch fix the callers that pass # of objects. Make sure they pass #
> of slabs.

Acked-by: Christoph Lameter <[email protected]>

2013-07-07 15:38:50

by Pekka Enberg

[permalink] [raw]
Subject: Re: [PATCH v3 1/5] mm/slab: Fix drain freelist excessively

On 7/7/13 12:24 PM, Wanpeng Li wrote:
> On Fri, Jul 05, 2013 at 01:37:28PM +0000, Christoph Lameter wrote:
>> On Thu, 4 Jul 2013, Wanpeng Li wrote:
>>
>>> This patch fix the callers that pass # of objects. Make sure they pass #
>>> of slabs.
>>
>> Acked-by: Christoph Lameter <[email protected]>
>
> Hi Pekka,
>
> Is it ok for you to pick this patchset? ;-)

Applied, thanks a lot!