Recently we ran into a oom issue, kernel panic due to no killable process.
The dmesg shows huge unreclaimable slabs used almost 100% memory, but kdump doesn't capture vmcore due to some reason.
So, it may sound better to capture unreclaimable slab info in oom message when kernel panic to aid trouble shooting and cover the corner case.
Since kernel already panic, so capturing more information sounds worthy and doesn't bother normal oom killer.
With the patchset, tools/vm/slabinfo has a new option, "-U", to show unreclaimable slab only.
And, oom will print all non zero (num_objs * size != 0) unreclaimable slabs in oom killer message.
For details, please see the commit log for each commit.
Changelog v2 —> v3:
* Show used size and total size of each kmem cache per David’s comment
Changelog v1 —> v2:
* Removed the original patch 1 (“mm: slab: output reclaimable flag in /proc/slabinfo”) since Christoph suggested it might break the compatibility and /proc/slabinfo is legacy
* Added Christoph’s Acked-by
* Removed acquiring slab_mutex per Tetsuo’s comment
Yang Shi (2):
tools: slabinfo: add "-U" option to show unreclaimable slabs only
mm: oom: show unreclaimable slab info when kernel panic
mm/oom_kill.c | 13 +++++++++++--
mm/slab.c | 1 +
mm/slab.h | 7 +++++++
mm/slab_common.c | 31 +++++++++++++++++++++++++++++++
mm/slub.c | 1 +
tools/vm/slabinfo.c | 11 ++++++++++-
6 files changed, 61 insertions(+), 3 deletions(-)
Add "-U" option to show unreclaimable slabs only.
"-U" and "-S" together can tell us what unreclaimable slabs use the most
memory to help debug huge unreclaimable slabs issue.
Signed-off-by: Yang Shi <[email protected]>
Acked-by: Christoph Lameter <[email protected]>
---
tools/vm/slabinfo.c | 11 ++++++++++-
1 file changed, 10 insertions(+), 1 deletion(-)
diff --git a/tools/vm/slabinfo.c b/tools/vm/slabinfo.c
index b9d34b3..9673190 100644
--- a/tools/vm/slabinfo.c
+++ b/tools/vm/slabinfo.c
@@ -83,6 +83,7 @@ struct aliasinfo {
int sort_loss;
int extended_totals;
int show_bytes;
+int unreclaim_only;
/* Debug options */
int sanity;
@@ -132,6 +133,7 @@ static void usage(void)
"-L|--Loss Sort by loss\n"
"-X|--Xtotals Show extended summary information\n"
"-B|--Bytes Show size in bytes\n"
+ "-U|--unreclaim Show unreclaimable slabs only\n"
"\nValid debug options (FZPUT may be combined)\n"
"a / A Switch on all debug options (=FZUP)\n"
"- Switch off all debug options\n"
@@ -568,6 +570,9 @@ static void slabcache(struct slabinfo *s)
if (strcmp(s->name, "*") == 0)
return;
+ if (unreclaim_only && s->reclaim_account)
+ return;
+
if (actual_slabs == 1) {
report(s);
return;
@@ -1346,6 +1351,7 @@ struct option opts[] = {
{ "Loss", no_argument, NULL, 'L'},
{ "Xtotals", no_argument, NULL, 'X'},
{ "Bytes", no_argument, NULL, 'B'},
+ { "unreclaim", no_argument, NULL, 'U'},
{ NULL, 0, NULL, 0 }
};
@@ -1357,7 +1363,7 @@ int main(int argc, char *argv[])
page_size = getpagesize();
- while ((c = getopt_long(argc, argv, "aAd::Defhil1noprstvzTSN:LXB",
+ while ((c = getopt_long(argc, argv, "aAd::Defhil1noprstvzTSN:LXBU",
opts, NULL)) != -1)
switch (c) {
case '1':
@@ -1438,6 +1444,9 @@ int main(int argc, char *argv[])
case 'B':
show_bytes = 1;
break;
+ case 'U':
+ unreclaim_only = 1;
+ break;
default:
fatal("%s: Invalid option '%c'\n", argv[0], optopt);
--
1.8.3.1
Kernel may panic when oom happens without killable process sometimes it
is caused by huge unreclaimable slabs used by kernel.
Altough kdump could help debug such problem, however, kdump is not
available on all architectures and it might be malfunction sometime.
And, since kernel already panic it is worthy capturing such information
in dmesg to aid touble shooting.
Add a field in struct slibinfo to show if this slab is reclaimable or
not, and a helper function to achieve the value from
SLAB_RECLAIM_ACCOUNT flag.
Print out unreclaimable slab info (used size and total size) which actual
memory usage is not zero (num_objs * size != 0) when panic_on_oom is set or
no killable process. Since such information is just showed when kernel panic,
so it will not lead too verbose message for normal oom.
The output looks like:
Unreclaimable slab info:
Name Used Total
rpc_buffers 31KB 31KB
rpc_tasks 7KB 7KB
ebitmap_node 1964KB 1964KB
avtab_node 5024KB 5024KB
xfs_buf 1402KB 1402KB
xfs_ili 134KB 134KB
xfs_efi_item 115KB 115KB
xfs_efd_item 115KB 115KB
xfs_buf_item 134KB 134KB
xfs_log_item_desc 342KB 342KB
xfs_trans 1412KB 1412KB
xfs_ifork 212KB 212KB
Signed-off-by: Yang Shi <[email protected]>
---
mm/oom_kill.c | 13 +++++++++++--
mm/slab.c | 1 +
mm/slab.h | 7 +++++++
mm/slab_common.c | 31 +++++++++++++++++++++++++++++++
mm/slub.c | 1 +
5 files changed, 51 insertions(+), 2 deletions(-)
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 99736e0..173c423 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -43,6 +43,7 @@
#include <asm/tlb.h>
#include "internal.h"
+#include "slab.h"
#define CREATE_TRACE_POINTS
#include <trace/events/oom.h>
@@ -427,6 +428,14 @@ static void dump_header(struct oom_control *oc, struct task_struct *p)
dump_tasks(oc->memcg, oc->nodemask);
}
+static void dump_header_with_slabinfo(struct oom_control *oc, struct task_struct *p)
+{
+ dump_header(oc, p);
+
+ if (IS_ENABLED(CONFIG_SLABINFO))
+ show_unreclaimable_slab();
+}
+
/*
* Number of OOM victims in flight
*/
@@ -959,7 +968,7 @@ static void check_panic_on_oom(struct oom_control *oc,
/* Do not panic for oom kills triggered by sysrq */
if (is_sysrq_oom(oc))
return;
- dump_header(oc, NULL);
+ dump_header_with_slabinfo(oc, NULL);
panic("Out of memory: %s panic_on_oom is enabled\n",
sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
}
@@ -1043,7 +1052,7 @@ bool out_of_memory(struct oom_control *oc)
select_bad_process(oc);
/* Found nothing?!?! Either we hang forever, or we panic. */
if (!oc->chosen && !is_sysrq_oom(oc) && !is_memcg_oom(oc)) {
- dump_header(oc, NULL);
+ dump_header_with_slabinfo(oc, NULL);
panic("Out of memory and no killable processes...\n");
}
if (oc->chosen && oc->chosen != (void *)-1UL) {
diff --git a/mm/slab.c b/mm/slab.c
index 04dec48..4f4971c 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4132,6 +4132,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
sinfo->shared = cachep->shared;
sinfo->objects_per_slab = cachep->num;
sinfo->cache_order = cachep->gfporder;
+ sinfo->reclaim = is_reclaimable(cachep);
}
void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
diff --git a/mm/slab.h b/mm/slab.h
index 0733628..2f1ebce 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -186,6 +186,7 @@ struct slabinfo {
unsigned int shared;
unsigned int objects_per_slab;
unsigned int cache_order;
+ unsigned int reclaim;
};
void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
@@ -352,6 +353,11 @@ static inline void memcg_link_cache(struct kmem_cache *s)
#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
+static inline bool is_reclaimable(struct kmem_cache *s)
+{
+ return (s->flags & SLAB_RECLAIM_ACCOUNT) ? true : false;
+}
+
static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
{
struct kmem_cache *cachep;
@@ -504,6 +510,7 @@ static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos);
void memcg_slab_stop(struct seq_file *m, void *p);
int memcg_slab_show(struct seq_file *m, void *p);
+void show_unreclaimable_slab(void);
void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 904a83b..f2c6200 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -35,6 +35,8 @@
static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
slab_caches_to_rcu_destroy_workfn);
+#define K(x) ((x)/1024)
+
/*
* Set of flags that will prevent slab merging
*/
@@ -1272,6 +1274,35 @@ static int slab_show(struct seq_file *m, void *p)
return 0;
}
+void show_unreclaimable_slab()
+{
+ struct kmem_cache *s = NULL;
+ struct slabinfo sinfo;
+
+ memset(&sinfo, 0, sizeof(sinfo));
+
+ printk("Unreclaimable slab info:\n");
+ printk("Name Used Total\n");
+
+ /*
+ * Here acquiring slab_mutex is unnecessary since we don't prefer to
+ * get sleep in oom path right before kernel panic, and avoid race condition.
+ * Since it is already oom, so there should be not any big allocation
+ * which could change the statistics significantly.
+ */
+ list_for_each_entry(s, &slab_caches, list) {
+ if (!is_root_cache(s))
+ continue;
+
+ get_slabinfo(s, &sinfo);
+
+ if (!is_reclaimable(s) && sinfo.num_objs > 0)
+ printk("%-17s %10luKB %10luKB\n", cache_name(s), K(sinfo.active_objs * s->size), K(sinfo.num_objs * s->size));
+ }
+}
+EXPORT_SYMBOL(show_unreclaimable_slab);
+#undef K
+
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
void *memcg_slab_start(struct seq_file *m, loff_t *pos)
{
diff --git a/mm/slub.c b/mm/slub.c
index 163352c..5c17c0a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5872,6 +5872,7 @@ void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
sinfo->num_slabs = nr_slabs;
sinfo->objects_per_slab = oo_objects(s->oo);
sinfo->cache_order = oo_order(s->oo);
+ sinfo->reclaim = is_reclaimable(s);
}
void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s)
--
1.8.3.1
On Thu, 21 Sep 2017, Yang Shi wrote:
> diff --git a/tools/vm/slabinfo.c b/tools/vm/slabinfo.c
> index b9d34b3..9673190 100644
> --- a/tools/vm/slabinfo.c
> +++ b/tools/vm/slabinfo.c
> @@ -83,6 +83,7 @@ struct aliasinfo {
> int sort_loss;
> int extended_totals;
> int show_bytes;
> +int unreclaim_only;
>
> /* Debug options */
> int sanity;
> @@ -132,6 +133,7 @@ static void usage(void)
> "-L|--Loss Sort by loss\n"
> "-X|--Xtotals Show extended summary information\n"
> "-B|--Bytes Show size in bytes\n"
> + "-U|--unreclaim Show unreclaimable slabs only\n"
> "\nValid debug options (FZPUT may be combined)\n"
> "a / A Switch on all debug options (=FZUP)\n"
> "- Switch off all debug options\n"
I suppose this should be s/unreclaim/Unreclaim/
> @@ -568,6 +570,9 @@ static void slabcache(struct slabinfo *s)
> if (strcmp(s->name, "*") == 0)
> return;
>
> + if (unreclaim_only && s->reclaim_account)
> + return;
> +
> if (actual_slabs == 1) {
> report(s);
> return;
> @@ -1346,6 +1351,7 @@ struct option opts[] = {
> { "Loss", no_argument, NULL, 'L'},
> { "Xtotals", no_argument, NULL, 'X'},
> { "Bytes", no_argument, NULL, 'B'},
> + { "unreclaim", no_argument, NULL, 'U'},
> { NULL, 0, NULL, 0 }
> };
>
Same.
After that:
Acked-by: David Rientjes <[email protected]>
Also, you may find it better to remove the "RFC" tag from the patchset's
header email since it's agreed that we want this.
On Thu, 21 Sep 2017, Yang Shi wrote:
> diff --git a/mm/oom_kill.c b/mm/oom_kill.c
> index 99736e0..173c423 100644
> --- a/mm/oom_kill.c
> +++ b/mm/oom_kill.c
> @@ -43,6 +43,7 @@
>
> #include <asm/tlb.h>
> #include "internal.h"
> +#include "slab.h"
>
> #define CREATE_TRACE_POINTS
> #include <trace/events/oom.h>
> @@ -427,6 +428,14 @@ static void dump_header(struct oom_control *oc, struct task_struct *p)
> dump_tasks(oc->memcg, oc->nodemask);
> }
>
> +static void dump_header_with_slabinfo(struct oom_control *oc, struct task_struct *p)
> +{
> + dump_header(oc, p);
> +
> + if (IS_ENABLED(CONFIG_SLABINFO))
> + show_unreclaimable_slab();
> +}
> +
> /*
> * Number of OOM victims in flight
> */
I don't think we need a new function for this. Where you want to dump
unreclaimable slab before panic, just call a new dump_unreclaimable_slab()
function that gets declared in slab.h that is a no-op when CONFIG_SLABINFO
is disabled. We just want to do
dump_header(...);
dump_unreclaimable_slab(...);
panic(...);
> diff --git a/mm/slab.c b/mm/slab.c
> index 04dec48..4f4971c 100644
> --- a/mm/slab.c
> +++ b/mm/slab.c
> @@ -4132,6 +4132,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
> sinfo->shared = cachep->shared;
> sinfo->objects_per_slab = cachep->num;
> sinfo->cache_order = cachep->gfporder;
> + sinfo->reclaim = is_reclaimable(cachep);
We don't need a new field, we already have cachep->flags accessible.
> }
>
> void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
> diff --git a/mm/slab.h b/mm/slab.h
> index 0733628..2f1ebce 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -186,6 +186,7 @@ struct slabinfo {
> unsigned int shared;
> unsigned int objects_per_slab;
> unsigned int cache_order;
> + unsigned int reclaim;
Not needed.
> };
>
> void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
> @@ -352,6 +353,11 @@ static inline void memcg_link_cache(struct kmem_cache *s)
>
> #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
>
> +static inline bool is_reclaimable(struct kmem_cache *s)
> +{
> + return (s->flags & SLAB_RECLAIM_ACCOUNT) ? true : false;
> +}
> +
I don't think we need this.
> static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
> {
> struct kmem_cache *cachep;
> @@ -504,6 +510,7 @@ static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
> void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos);
> void memcg_slab_stop(struct seq_file *m, void *p);
> int memcg_slab_show(struct seq_file *m, void *p);
> +void show_unreclaimable_slab(void);
>
> void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
>
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index 904a83b..f2c6200 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -35,6 +35,8 @@
> static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
> slab_caches_to_rcu_destroy_workfn);
>
> +#define K(x) ((x)/1024)
> +
I don't think we need this.
> /*
> * Set of flags that will prevent slab merging
> */
> @@ -1272,6 +1274,35 @@ static int slab_show(struct seq_file *m, void *p)
> return 0;
> }
>
> +void show_unreclaimable_slab()
void show_unreclaimable_slab(void)
> +{
> + struct kmem_cache *s = NULL;
No initialization needed.
> + struct slabinfo sinfo;
> +
> + memset(&sinfo, 0, sizeof(sinfo));
> +
> + printk("Unreclaimable slab info:\n");
> + printk("Name Used Total\n");
> +
> + /*
> + * Here acquiring slab_mutex is unnecessary since we don't prefer to
> + * get sleep in oom path right before kernel panic, and avoid race condition.
> + * Since it is already oom, so there should be not any big allocation
> + * which could change the statistics significantly.
> + */
> + list_for_each_entry(s, &slab_caches, list) {
> + if (!is_root_cache(s))
> + continue;
> +
We need to do the memset() here.
> + get_slabinfo(s, &sinfo);
> +
> + if (!is_reclaimable(s) && sinfo.num_objs > 0)
> + printk("%-17s %10luKB %10luKB\n", cache_name(s), K(sinfo.active_objs * s->size), K(sinfo.num_objs * s->size));
I think you can just check for SLAB_RECLAIM_ACCOUNT here.
Everything in this function should be pr_info().
> + }
> +}
> +EXPORT_SYMBOL(show_unreclaimable_slab);
> +#undef K
> +
> #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
> void *memcg_slab_start(struct seq_file *m, loff_t *pos)
> {
> diff --git a/mm/slub.c b/mm/slub.c
> index 163352c..5c17c0a 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -5872,6 +5872,7 @@ void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
> sinfo->num_slabs = nr_slabs;
> sinfo->objects_per_slab = oo_objects(s->oo);
> sinfo->cache_order = oo_order(s->oo);
> + sinfo->reclaim = is_reclaimable(s);
Not needed.
> }
>
> void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s)
On 9/20/17 1:45 PM, David Rientjes wrote:
> On Thu, 21 Sep 2017, Yang Shi wrote:
>
>> diff --git a/tools/vm/slabinfo.c b/tools/vm/slabinfo.c
>> index b9d34b3..9673190 100644
>> --- a/tools/vm/slabinfo.c
>> +++ b/tools/vm/slabinfo.c
>> @@ -83,6 +83,7 @@ struct aliasinfo {
>> int sort_loss;
>> int extended_totals;
>> int show_bytes;
>> +int unreclaim_only;
>>
>> /* Debug options */
>> int sanity;
>> @@ -132,6 +133,7 @@ static void usage(void)
>> "-L|--Loss Sort by loss\n"
>> "-X|--Xtotals Show extended summary information\n"
>> "-B|--Bytes Show size in bytes\n"
>> + "-U|--unreclaim Show unreclaimable slabs only\n"
>> "\nValid debug options (FZPUT may be combined)\n"
>> "a / A Switch on all debug options (=FZUP)\n"
>> "- Switch off all debug options\n"
>
> I suppose this should be s/unreclaim/Unreclaim/
>
>> @@ -568,6 +570,9 @@ static void slabcache(struct slabinfo *s)
>> if (strcmp(s->name, "*") == 0)
>> return;
>>
>> + if (unreclaim_only && s->reclaim_account)
>> + return;
>> +
>> if (actual_slabs == 1) {
>> report(s);
>> return;
>> @@ -1346,6 +1351,7 @@ struct option opts[] = {
>> { "Loss", no_argument, NULL, 'L'},
>> { "Xtotals", no_argument, NULL, 'X'},
>> { "Bytes", no_argument, NULL, 'B'},
>> + { "unreclaim", no_argument, NULL, 'U'},
>> { NULL, 0, NULL, 0 }
>> };
>>
>
> Same.
>
> After that:
>
> Acked-by: David Rientjes <[email protected]>
>
> Also, you may find it better to remove the "RFC" tag from the patchset's
> header email since it's agreed that we want this.
Thanks, will get fixed in v4.
Yang
>
On 9/20/17 2:00 PM, David Rientjes wrote:
> On Thu, 21 Sep 2017, Yang Shi wrote:
>
>> diff --git a/mm/oom_kill.c b/mm/oom_kill.c
>> index 99736e0..173c423 100644
>> --- a/mm/oom_kill.c
>> +++ b/mm/oom_kill.c
>> @@ -43,6 +43,7 @@
>>
>> #include <asm/tlb.h>
>> #include "internal.h"
>> +#include "slab.h"
>>
>> #define CREATE_TRACE_POINTS
>> #include <trace/events/oom.h>
>> @@ -427,6 +428,14 @@ static void dump_header(struct oom_control *oc, struct task_struct *p)
>> dump_tasks(oc->memcg, oc->nodemask);
>> }
>>
>> +static void dump_header_with_slabinfo(struct oom_control *oc, struct task_struct *p)
>> +{
>> + dump_header(oc, p);
>> +
>> + if (IS_ENABLED(CONFIG_SLABINFO))
>> + show_unreclaimable_slab();
>> +}
>> +
>> /*
>> * Number of OOM victims in flight
>> */
>
> I don't think we need a new function for this. Where you want to dump
> unreclaimable slab before panic, just call a new dump_unreclaimable_slab()
> function that gets declared in slab.h that is a no-op when CONFIG_SLABINFO
> is disabled. We just want to do
>
> dump_header(...);
> dump_unreclaimable_slab(...);
> panic(...);
Thanks for the comment, they will be solved in v4.
Yang
>
>> diff --git a/mm/slab.c b/mm/slab.c
>> index 04dec48..4f4971c 100644
>> --- a/mm/slab.c
>> +++ b/mm/slab.c
>> @@ -4132,6 +4132,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
>> sinfo->shared = cachep->shared;
>> sinfo->objects_per_slab = cachep->num;
>> sinfo->cache_order = cachep->gfporder;
>> + sinfo->reclaim = is_reclaimable(cachep);
>
> We don't need a new field, we already have cachep->flags accessible.
>
>> }
>>
>> void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
>> diff --git a/mm/slab.h b/mm/slab.h
>> index 0733628..2f1ebce 100644
>> --- a/mm/slab.h
>> +++ b/mm/slab.h
>> @@ -186,6 +186,7 @@ struct slabinfo {
>> unsigned int shared;
>> unsigned int objects_per_slab;
>> unsigned int cache_order;
>> + unsigned int reclaim;
>
> Not needed.
>
>> };
>>
>> void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
>> @@ -352,6 +353,11 @@ static inline void memcg_link_cache(struct kmem_cache *s)
>>
>> #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
>>
>> +static inline bool is_reclaimable(struct kmem_cache *s)
>> +{
>> + return (s->flags & SLAB_RECLAIM_ACCOUNT) ? true : false;
>> +}
>> +
>
> I don't think we need this.
>
>> static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
>> {
>> struct kmem_cache *cachep;
>> @@ -504,6 +510,7 @@ static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
>> void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos);
>> void memcg_slab_stop(struct seq_file *m, void *p);
>> int memcg_slab_show(struct seq_file *m, void *p);
>> +void show_unreclaimable_slab(void);
>>
>> void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
>>
>> diff --git a/mm/slab_common.c b/mm/slab_common.c
>> index 904a83b..f2c6200 100644
>> --- a/mm/slab_common.c
>> +++ b/mm/slab_common.c
>> @@ -35,6 +35,8 @@
>> static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
>> slab_caches_to_rcu_destroy_workfn);
>>
>> +#define K(x) ((x)/1024)
>> +
>
> I don't think we need this.
>
>> /*
>> * Set of flags that will prevent slab merging
>> */
>> @@ -1272,6 +1274,35 @@ static int slab_show(struct seq_file *m, void *p)
>> return 0;
>> }
>>
>> +void show_unreclaimable_slab()
>
> void show_unreclaimable_slab(void)
>
>> +{
>> + struct kmem_cache *s = NULL;
>
> No initialization needed.
>
>> + struct slabinfo sinfo;
>> +
>> + memset(&sinfo, 0, sizeof(sinfo));
>> +
>> + printk("Unreclaimable slab info:\n");
>> + printk("Name Used Total\n");
>> +
>> + /*
>> + * Here acquiring slab_mutex is unnecessary since we don't prefer to
>> + * get sleep in oom path right before kernel panic, and avoid race condition.
>> + * Since it is already oom, so there should be not any big allocation
>> + * which could change the statistics significantly.
>> + */
>> + list_for_each_entry(s, &slab_caches, list) {
>> + if (!is_root_cache(s))
>> + continue;
>> +
>
> We need to do the memset() here.
>
>> + get_slabinfo(s, &sinfo);
>> +
>> + if (!is_reclaimable(s) && sinfo.num_objs > 0)
>> + printk("%-17s %10luKB %10luKB\n", cache_name(s), K(sinfo.active_objs * s->size), K(sinfo.num_objs * s->size));
>
> I think you can just check for SLAB_RECLAIM_ACCOUNT here.
>
> Everything in this function should be pr_info().
>
>> + }
>> +}
>> +EXPORT_SYMBOL(show_unreclaimable_slab);
>> +#undef K
>> +
>> #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
>> void *memcg_slab_start(struct seq_file *m, loff_t *pos)
>> {
>> diff --git a/mm/slub.c b/mm/slub.c
>> index 163352c..5c17c0a 100644
>> --- a/mm/slub.c
>> +++ b/mm/slub.c
>> @@ -5872,6 +5872,7 @@ void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
>> sinfo->num_slabs = nr_slabs;
>> sinfo->objects_per_slab = oo_objects(s->oo);
>> sinfo->cache_order = oo_order(s->oo);
>> + sinfo->reclaim = is_reclaimable(s);
>
> Not needed.
>
>> }
>>
>> void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s)