2021-05-21 12:52:20

by Oliver Glitta

[permalink] [raw]
Subject: [RFC 1/3] mm/slub: aggregate objects in cache by stack trace

From: Oliver Glitta <[email protected]>

Aggregate objects in slub cache by stack trace in addition to caller
address during alloc_calls and free_calls implementation
in debugfs. Add stack trace to output.

Add all_objects implementation to debugfs to print information
about all objects.

Signed-off-by: Oliver Glitta <[email protected]>
---
Based on next-20210518 and
https://lore.kernel.org/r/[email protected]/

mm/slub.c | 30 ++++++++++++++++++++++++++++--
1 file changed, 28 insertions(+), 2 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index e9b84eddc50d..d5ed6ed7d68b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4770,6 +4770,7 @@ EXPORT_SYMBOL(validate_slab_cache);
*/

struct location {
+ depot_stack_handle_t handle;
unsigned long count;
unsigned long addr;
long long sum_time;
@@ -4822,9 +4823,15 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
{
long start, end, pos;
struct location *l;
- unsigned long caddr;
+ unsigned long caddr, chandle;
unsigned long age = jiffies - track->when;
+ depot_stack_handle_t handle;

+#ifdef CONFIG_STACKDEPOT
+ handle = READ_ONCE(track->handle);
+#else
+ handle = 0;
+#endif
start = -1;
end = t->count;

@@ -4839,7 +4846,8 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
break;

caddr = t->loc[pos].addr;
- if (track->addr == caddr) {
+ chandle = t->loc[pos].handle;
+ if ((track->addr == caddr) && (handle == chandle)) {

l = &t->loc[pos];
l->count++;
@@ -4864,6 +4872,8 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,

if (track->addr < caddr)
end = pos;
+ else if (track->addr == caddr && handle < chandle)
+ end = pos;
else
start = pos;
}
@@ -4886,6 +4896,7 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
l->max_time = age;
l->min_pid = track->pid;
l->max_pid = track->pid;
+ l->handle = handle;
cpumask_clear(to_cpumask(l->cpus));
cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
nodes_clear(l->nodes);
@@ -5837,6 +5848,21 @@ static int slab_debugfs_show(struct seq_file *seq, void *v)
seq_printf(seq, " nodes=%*pbl",
nodemask_pr_args(&l->nodes));

+#ifdef CONFIG_STACKDEPOT
+ {
+ depot_stack_handle_t handle;
+ unsigned long *entries;
+ unsigned int nr_entries, j;
+
+ handle = READ_ONCE(l->handle);
+ if (handle) {
+ nr_entries = stack_depot_fetch(handle, &entries);
+ seq_puts(seq, "\n");
+ for (j = 0; j < nr_entries; j++)
+ seq_printf(seq, "\t%pS\n", (void *)entries[j]);
+ }
+ }
+#endif
seq_puts(seq, "\n");
}

--
2.31.1.272.g89b43f80a5


2021-05-21 20:15:23

by Oliver Glitta

[permalink] [raw]
Subject: [RFC 3/3] mm/slub: add all_objects implementation in debugfs

From: Oliver Glitta <[email protected]>

Add all_objects implementation to debugfs to print information
about all objects in slub cache.

Signed-off-by: Oliver Glitta <[email protected]>
---
mm/slub.c | 225 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 225 insertions(+)

diff --git a/mm/slub.c b/mm/slub.c
index 247983d647cd..885d0b074e31 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4789,6 +4789,17 @@ struct loc_track {
struct location *loc;
};

+enum slub_list_field { PARTIAL_LIST, FULL_LIST };
+
+struct slab_debug_private {
+ struct inode *inode;
+ struct kmem_cache_node *node;
+ unsigned long nid;
+ long slabs_remaining;
+ enum slub_list_field field;
+ unsigned long *map;
+};
+
static struct dentry *slab_debugfs_root;
struct loc_track t = { 0, 0, NULL };

@@ -5809,6 +5820,216 @@ static int debugfs_slab_alias(struct kmem_cache *s, const char *name)
return 0;
}

+static struct kmem_cache_node *find_node(struct kmem_cache *s, unsigned long *nid)
+{
+ struct kmem_cache_node *node = NULL;
+
+ while (*nid < nr_node_ids) {
+ node = s->node[*nid];
+ ++*nid;
+ if (!node || !atomic_long_read(&node->nr_slabs))
+ node = NULL;
+ else
+ break;
+ }
+ return node;
+}
+
+static bool next_page_new_node(struct slab_debug_private *priv)
+{
+ struct kmem_cache_node *node;
+ struct kmem_cache *s = priv->inode->i_private;
+
+ node = find_node(s, &priv->nid);
+
+ if (!node)
+ return false;
+
+ priv->node = node;
+
+ if (node->nr_partial > 0) {
+ priv->field = PARTIAL_LIST;
+ priv->slabs_remaining = node->nr_partial;
+ } else if (atomic_long_read(&node->nr_slabs) > 0) {
+ priv->field = FULL_LIST;
+ priv->slabs_remaining = atomic_long_read(&node->nr_slabs);
+ }
+
+ return priv->slabs_remaining;
+}
+
+static struct page *next_page(struct slab_debug_private *priv)
+{
+ struct page *page = NULL;
+ struct kmem_cache *s = priv->inode->i_private;
+ struct kmem_cache_node *node;
+ unsigned long flags;
+
+redo:
+ node = priv->node;
+ if (priv->slabs_remaining > 0) {
+ struct list_head *head;
+ void *p, *addr;
+
+ --priv->slabs_remaining;
+
+ if (priv->field == PARTIAL_LIST)
+ head = &node->partial;
+ else
+ head = &node->full;
+
+ spin_lock_irqsave(&node->list_lock, flags);
+ page = list_first_entry(head, struct page, slab_list);
+ if (page) {
+ get_page(page);
+ slab_lock(page);
+ addr = page_address(page);
+ bitmap_zero(priv->map, page->objects);
+
+ for (p = page->freelist; p; p = get_freepointer(s, p))
+ set_bit(__obj_to_index(s, addr, p), priv->map);
+ slab_unlock(page);
+ }
+ list_rotate_left(head);
+ spin_unlock_irqrestore(&node->list_lock, flags);
+
+ } else if ((priv->field == PARTIAL_LIST)
+ && (atomic_long_read(&node->nr_slabs) != node->nr_partial)) {
+
+ priv->field = FULL_LIST;
+ priv->slabs_remaining = atomic_long_read(&node->nr_slabs) - node->nr_partial;
+
+ goto redo;
+ } else {
+ if (next_page_new_node(priv))
+ goto redo;
+ }
+
+ return page;
+}
+
+static int debugfs_all_objects_show(struct seq_file *seq, void *v)
+{
+ struct slab_debug_private *priv = seq->private;
+ struct kmem_cache *s = priv->inode->i_private;
+ struct page *page = v;
+ void *addr = page_address(page);
+ void *p;
+ unsigned long *map = priv->map;
+ struct track *track;
+ depot_stack_handle_t handle;
+ unsigned long *entries;
+ unsigned int nr_entries, j;
+
+ for_each_object(p, s, addr, page->objects) {
+ seq_printf(seq, "Object: %pK ", p);
+ if (!test_bit(__obj_to_index(s, addr, p), map))
+ seq_puts(seq, "allocated\n");
+ else
+ seq_puts(seq, "free\n");
+
+ track = get_track(s, p, TRACK_ALLOC);
+ seq_printf(seq, "Last allocated: %pS age=%ld pid=%d cpu=%u\n",
+ (void *)track->addr, jiffies - track->when, track->pid, track->cpu);
+
+#ifdef CONFIG_STACKDEPOT
+ handle = READ_ONCE(track->handle);
+ if (handle) {
+ nr_entries = stack_depot_fetch(handle, &entries);
+ for (j = 0; j < nr_entries; j++)
+ seq_printf(seq, "\t%pS\n", (void *)entries[j]);
+ }
+#endif
+
+ track = get_track(s, p, TRACK_FREE);
+ seq_printf(seq, "Last free: %pS age=%ld pid=%d cpu=%u\n",
+ (void *)track->addr, jiffies - track->when, track->pid, track->cpu);
+
+#ifdef CONFIG_STACKDEPOT
+ handle = READ_ONCE(track->handle);
+ if (handle) {
+ nr_entries = stack_depot_fetch(handle, &entries);
+ for (j = 0; j < nr_entries; j++)
+ seq_printf(seq, "\t%pS\n", (void *)entries[j]);
+ }
+#endif
+ seq_puts(seq, "\n");
+ }
+ return 0;
+}
+
+static void *debugfs_all_objects_start(struct seq_file *m, loff_t *ppos)
+{
+ struct slab_debug_private *priv = m->private;
+ struct kmem_cache *s = priv->inode->i_private;
+ struct page *page;
+
+ priv->map = kmalloc(BITS_TO_LONGS(MAX_OBJS_PER_PAGE), GFP_KERNEL);
+
+ if (!priv->map)
+ return NULL;
+
+ if (!(s->flags & SLAB_STORE_USER))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ page = next_page(priv);
+ return page;
+}
+
+static void *debugfs_all_objects_next(struct seq_file *m, void *v, loff_t *ppos)
+{
+ struct slab_debug_private *priv = m->private;
+ struct page *page;
+
+ if (v)
+ put_page(v);
+
+ ++*ppos;
+ page = next_page(priv);
+
+ return page;
+}
+
+static void debugfs_all_objects_stop(struct seq_file *m, void *v)
+{
+ struct slab_debug_private *priv = m->private;
+ struct kmem_cache *s = priv->inode->i_private;
+
+ kfree(priv->map);
+
+ if (v && (s->flags & SLAB_STORE_USER))
+ put_page(v);
+}
+
+static const struct seq_operations debugfs_all_objects_ops = {
+ .start = debugfs_all_objects_start,
+ .next = debugfs_all_objects_next,
+ .stop = debugfs_all_objects_stop,
+ .show = debugfs_all_objects_show
+};
+
+static int debugfs_all_objects_open(struct inode *inode, struct file *file)
+{
+ struct slab_debug_private *priv = __seq_open_private(file,
+ &debugfs_all_objects_ops, sizeof(struct slab_debug_private));
+
+ if (!priv)
+ return -ENOMEM;
+
+ priv->inode = inode;
+ priv->nid = 0;
+ priv->field = FULL_LIST;
+
+ return 0;
+}
+
+static const struct file_operations debugfs_all_objects_fops = {
+ .open = debugfs_all_objects_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private,
+};
+
static int slab_debugfs_show(struct seq_file *seq, void *v)
{
struct location *l;
@@ -6018,6 +6239,10 @@ static void debugfs_slab_add(struct kmem_cache *s)
debugfs_create_file("free_traces", 0400,
slab_cache_dir, s, &slab_debugfs_fops);

+ debugfs_create_file("all_objects", 0400,
+ slab_cache_dir, s, &debugfs_all_objects_fops);
+
+
if (!unmergeable)
/* Setup first alias */
debugfs_slab_alias(s, s->name);
--
2.31.1.272.g89b43f80a5

2021-05-21 20:15:36

by Oliver Glitta

[permalink] [raw]
Subject: [RFC 2/3] mm/slub: sort objects in cache by frequency of stack trace

From: Oliver Glitta <[email protected]>

Sort objects in slub cache by the frequency of stack trace used
in object location in alloc_calls and free_calls implementation
in debugfs. Most frequently used stack traces will be the first.

Signed-off-by: Oliver Glitta <[email protected]>
---
mm/slub.c | 17 +++++++++++++++++
1 file changed, 17 insertions(+)

diff --git a/mm/slub.c b/mm/slub.c
index d5ed6ed7d68b..247983d647cd 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -37,6 +37,7 @@
#include <linux/memcontrol.h>
#include <linux/random.h>
#include <kunit/test.h>
+#include <linux/sort.h>

#include <linux/debugfs.h>
#include <trace/events/kmem.h>
@@ -5893,6 +5894,17 @@ static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos)
return NULL;
}

+static int cmp_loc_by_count(const void *a, const void *b, const void *data)
+{
+ struct location *loc1 = (struct location *)a;
+ struct location *loc2 = (struct location *)b;
+
+ if (loc1->count > loc2->count)
+ return -1;
+ else
+ return 1;
+}
+
static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
{
struct kmem_cache_node *n;
@@ -5944,6 +5956,11 @@ static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
process_slab(&t, s, page, alloc);
spin_unlock_irqrestore(&n->list_lock, flags);
}
+
+ /* Sort locations by count */
+ sort_r(t.loc, t.count, sizeof(struct location),
+ cmp_loc_by_count, NULL, NULL);
+
}

if (*ppos < t.count) {
--
2.31.1.272.g89b43f80a5

2021-05-26 14:12:45

by Vlastimil Babka

[permalink] [raw]
Subject: Re: [RFC 2/3] mm/slub: sort objects in cache by frequency of stack trace

On 5/21/21 2:11 PM, [email protected] wrote:
> From: Oliver Glitta <[email protected]>
>
> Sort objects in slub cache by the frequency of stack trace used
> in object location in alloc_calls and free_calls implementation
> in debugfs. Most frequently used stack traces will be the first.

That will make it much more convenient.

> Signed-off-by: Oliver Glitta <[email protected]>

Reviewed-by: Vlastimil Babka <[email protected]>

> ---
> mm/slub.c | 17 +++++++++++++++++
> 1 file changed, 17 insertions(+)
>
> diff --git a/mm/slub.c b/mm/slub.c
> index d5ed6ed7d68b..247983d647cd 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -37,6 +37,7 @@
> #include <linux/memcontrol.h>
> #include <linux/random.h>
> #include <kunit/test.h>
> +#include <linux/sort.h>
>
> #include <linux/debugfs.h>
> #include <trace/events/kmem.h>
> @@ -5893,6 +5894,17 @@ static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos)
> return NULL;
> }
>
> +static int cmp_loc_by_count(const void *a, const void *b, const void *data)
> +{
> + struct location *loc1 = (struct location *)a;
> + struct location *loc2 = (struct location *)b;
> +
> + if (loc1->count > loc2->count)
> + return -1;
> + else
> + return 1;
> +}
> +
> static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
> {
> struct kmem_cache_node *n;
> @@ -5944,6 +5956,11 @@ static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
> process_slab(&t, s, page, alloc);
> spin_unlock_irqrestore(&n->list_lock, flags);
> }
> +
> + /* Sort locations by count */
> + sort_r(t.loc, t.count, sizeof(struct location),
> + cmp_loc_by_count, NULL, NULL);
> +
> }
>
> if (*ppos < t.count) {
>

2021-05-26 14:29:17

by Vlastimil Babka

[permalink] [raw]
Subject: Re: [RFC 3/3] mm/slub: add all_objects implementation in debugfs

On 5/21/21 2:11 PM, [email protected] wrote:
> From: Oliver Glitta <[email protected]>
>
> Add all_objects implementation to debugfs to print information
> about all objects in slub cache.

An example listing of 1-2 objects would be useful in the changelog.

Also can you describe what are the guarantees (or limitations) of observing
really all objects if the cache is modified by concurrent allocation and free
operations?

...

> +static void *debugfs_all_objects_start(struct seq_file *m, loff_t *ppos)
> +{
> + struct slab_debug_private *priv = m->private;
> + struct kmem_cache *s = priv->inode->i_private;
> + struct page *page;
> +
> + priv->map = kmalloc(BITS_TO_LONGS(MAX_OBJS_PER_PAGE), GFP_KERNEL);

We can use bitmap_alloc/bitmap_free wrappers and allocate according to objects
per page in the actual kmem_cache, not the theoretical maximum, see:
https://lore.kernel.org/linux-mm/[email protected]/

2021-05-26 18:48:07

by Vlastimil Babka

[permalink] [raw]
Subject: Re: [RFC 1/3] mm/slub: aggregate objects in cache by stack trace

On 5/21/21 2:11 PM, [email protected] wrote:
> From: Oliver Glitta <[email protected]>
>
> Aggregate objects in slub cache by stack trace in addition to caller
> address during alloc_calls and free_calls implementation
> in debugfs. Add stack trace to output.

Yes, the immediate callers alone often don't tell much.

> Add all_objects implementation to debugfs to print information
> about all objects.

Looks like this sentence applies only to patch 3/3.

> Signed-off-by: Oliver Glitta <[email protected]>

Reviewed-by: Vlastimil Babka <[email protected]>

> ---
> Based on next-20210518 and
> https://lore.kernel.org/r/[email protected]/
>
> mm/slub.c | 30 ++++++++++++++++++++++++++++--
> 1 file changed, 28 insertions(+), 2 deletions(-)
>
> diff --git a/mm/slub.c b/mm/slub.c
> index e9b84eddc50d..d5ed6ed7d68b 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -4770,6 +4770,7 @@ EXPORT_SYMBOL(validate_slab_cache);
> */
>
> struct location {
> + depot_stack_handle_t handle;
> unsigned long count;
> unsigned long addr;
> long long sum_time;
> @@ -4822,9 +4823,15 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
> {
> long start, end, pos;
> struct location *l;
> - unsigned long caddr;
> + unsigned long caddr, chandle;
> unsigned long age = jiffies - track->when;
> + depot_stack_handle_t handle;
>
> +#ifdef CONFIG_STACKDEPOT
> + handle = READ_ONCE(track->handle);
> +#else
> + handle = 0;
> +#endif
> start = -1;
> end = t->count;
>
> @@ -4839,7 +4846,8 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
> break;
>
> caddr = t->loc[pos].addr;
> - if (track->addr == caddr) {
> + chandle = t->loc[pos].handle;
> + if ((track->addr == caddr) && (handle == chandle)) {
>
> l = &t->loc[pos];
> l->count++;
> @@ -4864,6 +4872,8 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
>
> if (track->addr < caddr)
> end = pos;
> + else if (track->addr == caddr && handle < chandle)
> + end = pos;
> else
> start = pos;
> }
> @@ -4886,6 +4896,7 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
> l->max_time = age;
> l->min_pid = track->pid;
> l->max_pid = track->pid;
> + l->handle = handle;
> cpumask_clear(to_cpumask(l->cpus));
> cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
> nodes_clear(l->nodes);
> @@ -5837,6 +5848,21 @@ static int slab_debugfs_show(struct seq_file *seq, void *v)
> seq_printf(seq, " nodes=%*pbl",
> nodemask_pr_args(&l->nodes));
>
> +#ifdef CONFIG_STACKDEPOT
> + {
> + depot_stack_handle_t handle;
> + unsigned long *entries;
> + unsigned int nr_entries, j;
> +
> + handle = READ_ONCE(l->handle);
> + if (handle) {
> + nr_entries = stack_depot_fetch(handle, &entries);
> + seq_puts(seq, "\n");
> + for (j = 0; j < nr_entries; j++)
> + seq_printf(seq, "\t%pS\n", (void *)entries[j]);
> + }
> + }
> +#endif
> seq_puts(seq, "\n");
> }
>
> --
> 2.31.1.272.g89b43f80a5
>

2021-06-08 08:48:01

by Oliver Glitta

[permalink] [raw]
Subject: [RFC 4/4] docs: add description of debugfs files for SLUB cache

From: Oliver Glitta <[email protected]>

Add description of debugfs files alloc_traces, free_traces
and all_objects to SLUB cache documentation.

Signed-off-by: Oliver Glitta <[email protected]>
---
Documentation/vm/slub.rst | 109 ++++++++++++++++++++++++++++++++++++++
1 file changed, 109 insertions(+)

diff --git a/Documentation/vm/slub.rst b/Documentation/vm/slub.rst
index 03f294a638bd..2280d74d395c 100644
--- a/Documentation/vm/slub.rst
+++ b/Documentation/vm/slub.rst
@@ -384,5 +384,114 @@ c) Execute ``slabinfo-gnuplot.sh`` in '-t' mode, passing all of the
40,60`` range will plot only samples collected between 40th and
60th seconds).

+
+DebugFS files for SLUB
+======================
+
+For more debug information about current state of SLUB caches
+with the user tracking debug option enabled, debugfs files
+are available. There are 3 types of these files
+with different debug information:
+
+1. alloc_traces::
+
+ Prints information about unique allocation traces of
+ the currently allocated objects together with their frequency.
+ Output sorted by frequency of use.
+
+ Information on output:
+ Number of uses, allocating function, minimal/average/maximal jiffies since alloc,
+ pid of the process, allocated by cpu and stack trace.
+
+ Example:::
+
+ 1085 populate_error_injection_list+0x97/0x110 age=166678/166680/166682 pid=1 cpus=1::
+ __slab_alloc+0x6d/0x90
+ kmem_cache_alloc_trace+0x2eb/0x300
+ populate_error_injection_list+0x97/0x110
+ init_error_injection+0x1b/0x71
+ do_one_initcall+0x5f/0x2d0
+ kernel_init_freeable+0x26f/0x2d7
+ kernel_init+0xe/0x118
+ ret_from_fork+0x22/0x30
+
+
+2. free_traces::
+
+ Prints information about unique free traces of
+ the currently free objects together with their frequency.
+ Output sorted by frequency of use.
+
+ Information on output:
+ Number of uses, freeing function, minimal/average/maximal jiffies since free,
+ pid of the process, freed by cpu and stack trace.
+
+ Example:::
+
+ 51 acpi_ut_update_ref_count+0x6a6/0x782 age=236886/237027/237772 pid=1 cpus=1
+ kfree+0x2db/0x420
+ acpi_ut_update_ref_count+0x6a6/0x782
+ acpi_ut_update_object_reference+0x1ad/0x234
+ acpi_ut_remove_reference+0x7d/0x84
+ acpi_rs_get_prt_method_data+0x97/0xd6
+ acpi_get_irq_routing_table+0x82/0xc4
+ acpi_pci_irq_find_prt_entry+0x8e/0x2e0
+ acpi_pci_irq_lookup+0x3a/0x1e0
+ acpi_pci_irq_enable+0x77/0x240
+ pcibios_enable_device+0x39/0x40
+ do_pci_enable_device.part.0+0x5d/0xe0
+ pci_enable_device_flags+0xfc/0x120
+ pci_enable_device+0x13/0x20
+ virtio_pci_probe+0x9e/0x170
+ local_pci_probe+0x48/0x80
+ pci_device_probe+0x105/0x1c0
+
+
+3. all_objects::
+
+ Prints information about all object (both allocated and free)
+ in given SLUB cache.
+
+ Example:::
+
+ Object: 0000000042ee8b00 free
+ Last allocated: ima_queue_key+0x2f/0x1b0 age=247112 pid=1 cpu=1
+ __slab_alloc+0x6d/0x90
+ kmem_cache_alloc_trace+0x2eb/0x300
+ ima_queue_key+0x2f/0x1b0
+ ima_post_key_create_or_update+0x46/0x80
+ key_create_or_update+0x383/0x5b0
+ load_certificate_list+0x75/0xa0
+ load_system_certificate_list+0x2f/0x31
+ do_one_initcall+0x5f/0x2d0
+ kernel_init_freeable+0x26f/0x2d7
+ kernel_init+0xe/0x118
+ ret_from_fork+0x22/0x30
+ Last free: ima_process_queued_keys.part.0+0x84/0xf0 age=170962 pid=137 cpu=1
+ kfree+0x2db/0x420
+ ima_process_queued_keys.part.0+0x84/0xf0
+ ima_keys_handler+0x57/0x60
+ process_one_work+0x2a5/0x590
+ worker_thread+0x52/0x3f0
+ kthread+0x140/0x160
+ ret_from_fork+0x22/0x30
+
+ Information on output:
+
+ a) Object: <address> allocated/free
+ There is address of object and information whether object is
+ allocated or free.
+
+ b) Last allocated: <address os user> age=<jiffies since alloc> pid=<pid of
+ the process> cpu=<allocated by cpu>
+
+ c) Alloc stacktrace
+
+ d) Last free: <address of user> age=<jiffies since freed> pid=<pid of
+ the process> cpu=<freed by cpu>
+
+ e) Free stacktrace
+
+
Christoph Lameter, May 30, 2007
Sergey Senozhatsky, October 23, 2015
--
2.31.1.272.g89b43f80a5

2021-06-14 00:23:06

by David Rientjes

[permalink] [raw]
Subject: Re: [RFC 4/4] docs: add description of debugfs files for SLUB cache

On Tue, 8 Jun 2021, [email protected] wrote:

> From: Oliver Glitta <[email protected]>
>
> Add description of debugfs files alloc_traces, free_traces
> and all_objects to SLUB cache documentation.
>
> Signed-off-by: Oliver Glitta <[email protected]>

This looks good to me, thanks Oliver.

Adding in Randy Dunlap as well for Documentation changes if he has a
chance to take a look.

Acked-by: David Rientjes <[email protected]>