2019-03-11 01:16:32

by Tobin C. Harding

[permalink] [raw]
Subject: [PATCH 0/4] mm: Use slab_list list_head instead of lru

Currently the slab allocators (ab)use the struct page 'lru' list_head.
We have a list head for slab allocators to use, 'slab_list'.

Clean up all three allocators by using the 'slab_list' list_head instead
of overloading the 'lru' list_head.

Initial patch makes no code changes, adds comments to #endif statements.

Final 3 patches do changes as a patch per allocator, tested by building
and booting (in Qemu) after configuring kernel to use appropriate
allocator. Also build and boot with debug options enabled (for slab
and slub).


thanks,
Tobin.

Tobin C. Harding (4):
slub: Add comments to endif pre-processor macros
slub: Use slab_list instead of lru
slab: Use slab_list instead of lru
slob: Use slab_list instead of lru

mm/slab.c | 49 +++++++++++++++++++++++----------------------
mm/slob.c | 10 +++++-----
mm/slub.c | 60 +++++++++++++++++++++++++++----------------------------
3 files changed, 60 insertions(+), 59 deletions(-)

--
2.21.0



2019-03-11 01:17:16

by Tobin C. Harding

[permalink] [raw]
Subject: [PATCH 1/4] slub: Add comments to endif pre-processor macros

SLUB allocator makes heavy use of ifdef/endif pre-processor macros.
The pairing of these statements is at times hard to follow e.g. if the
pair are further than a screen apart or if there are nested pairs. We
can reduce cognitive load by adding a comment to the endif statement of
form

#ifdef CONFIG_FOO
...
#endif /* CONFIG_FOO */

Add comments to endif pre-processor macros if ifdef/endif pair is not
immediately apparent.

Signed-off-by: Tobin C. Harding <[email protected]>
---
mm/slub.c | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index 1b08fbcb7e61..b282e22885cd 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1951,7 +1951,7 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
}
}
} while (read_mems_allowed_retry(cpuset_mems_cookie));
-#endif
+#endif /* CONFIG_NUMA */
return NULL;
}

@@ -2249,7 +2249,7 @@ static void unfreeze_partials(struct kmem_cache *s,
discard_slab(s, page);
stat(s, FREE_SLAB);
}
-#endif
+#endif /* CONFIG_SLUB_CPU_PARTIAL */
}

/*
@@ -2308,7 +2308,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
local_irq_restore(flags);
}
preempt_enable();
-#endif
+#endif /* CONFIG_SLUB_CPU_PARTIAL */
}

static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
@@ -2813,7 +2813,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
}
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
#endif
-#endif
+#endif /* CONFIG_NUMA */

/*
* Slow path handling. This may still be called frequently since objects
@@ -3845,7 +3845,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
return ret;
}
EXPORT_SYMBOL(__kmalloc_node);
-#endif
+#endif /* CONFIG_NUMA */

#ifdef CONFIG_HARDENED_USERCOPY
/*
@@ -4063,7 +4063,7 @@ void __kmemcg_cache_deactivate(struct kmem_cache *s)
*/
slab_deactivate_memcg_cache_rcu_sched(s, kmemcg_cache_deact_after_rcu);
}
-#endif
+#endif /* CONFIG_MEMCG */

static int slab_mem_going_offline_callback(void *arg)
{
@@ -4696,7 +4696,7 @@ static int list_locations(struct kmem_cache *s, char *buf,
len += sprintf(buf, "No data\n");
return len;
}
-#endif
+#endif /* CONFIG_SLUB_DEBUG */

#ifdef SLUB_RESILIENCY_TEST
static void __init resiliency_test(void)
@@ -4756,7 +4756,7 @@ static void __init resiliency_test(void)
#ifdef CONFIG_SYSFS
static void resiliency_test(void) {};
#endif
-#endif
+#endif /* SLUB_RESILIENCY_TEST */

#ifdef CONFIG_SYSFS
enum slab_stat_type {
@@ -5413,7 +5413,7 @@ STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
-#endif
+#endif /* CONFIG_SLUB_STATS */

static struct attribute *slab_attrs[] = {
&slab_size_attr.attr,
@@ -5614,7 +5614,7 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s)

if (buffer)
free_page((unsigned long)buffer);
-#endif
+#endif /* CONFIG_MEMCG */
}

static void kmem_cache_release(struct kobject *k)
--
2.21.0


2019-03-11 01:24:56

by Tobin C. Harding

[permalink] [raw]
Subject: [PATCH 2/4] slub: Use slab_list instead of lru

Currently we use the page->lru list for maintaining lists of slabs. We
have a list in the page structure (slab_list) that can be used for this
purpose. Doing so makes the code cleaner since we are not overloading
the lru list.

Use the slab_list instead of the lru list for maintaining lists of
slabs.

Signed-off-by: Tobin C. Harding <[email protected]>
---
mm/slub.c | 40 ++++++++++++++++++++--------------------
1 file changed, 20 insertions(+), 20 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index b282e22885cd..d692b5e0163d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1023,7 +1023,7 @@ static void add_full(struct kmem_cache *s,
return;

lockdep_assert_held(&n->list_lock);
- list_add(&page->lru, &n->full);
+ list_add(&page->slab_list, &n->full);
}

static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
@@ -1032,7 +1032,7 @@ static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct
return;

lockdep_assert_held(&n->list_lock);
- list_del(&page->lru);
+ list_del(&page->slab_list);
}

/* Tracking of the number of slabs for debugging purposes */
@@ -1773,9 +1773,9 @@ __add_partial(struct kmem_cache_node *n, struct page *page, int tail)
{
n->nr_partial++;
if (tail == DEACTIVATE_TO_TAIL)
- list_add_tail(&page->lru, &n->partial);
+ list_add_tail(&page->slab_list, &n->partial);
else
- list_add(&page->lru, &n->partial);
+ list_add(&page->slab_list, &n->partial);
}

static inline void add_partial(struct kmem_cache_node *n,
@@ -1789,7 +1789,7 @@ static inline void remove_partial(struct kmem_cache_node *n,
struct page *page)
{
lockdep_assert_held(&n->list_lock);
- list_del(&page->lru);
+ list_del(&page->slab_list);
n->nr_partial--;
}

@@ -1863,7 +1863,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
return NULL;

spin_lock(&n->list_lock);
- list_for_each_entry_safe(page, page2, &n->partial, lru) {
+ list_for_each_entry_safe(page, page2, &n->partial, slab_list) {
void *t;

if (!pfmemalloc_match(page, flags))
@@ -2407,7 +2407,7 @@ static unsigned long count_partial(struct kmem_cache_node *n,
struct page *page;

spin_lock_irqsave(&n->list_lock, flags);
- list_for_each_entry(page, &n->partial, lru)
+ list_for_each_entry(page, &n->partial, slab_list)
x += get_count(page);
spin_unlock_irqrestore(&n->list_lock, flags);
return x;
@@ -3702,10 +3702,10 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)

BUG_ON(irqs_disabled());
spin_lock_irq(&n->list_lock);
- list_for_each_entry_safe(page, h, &n->partial, lru) {
+ list_for_each_entry_safe(page, h, &n->partial, slab_list) {
if (!page->inuse) {
remove_partial(n, page);
- list_add(&page->lru, &discard);
+ list_add(&page->slab_list, &discard);
} else {
list_slab_objects(s, page,
"Objects remaining in %s on __kmem_cache_shutdown()");
@@ -3713,7 +3713,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
}
spin_unlock_irq(&n->list_lock);

- list_for_each_entry_safe(page, h, &discard, lru)
+ list_for_each_entry_safe(page, h, &discard, slab_list)
discard_slab(s, page);
}

@@ -3993,7 +3993,7 @@ int __kmem_cache_shrink(struct kmem_cache *s)
* Note that concurrent frees may occur while we hold the
* list_lock. page->inuse here is the upper limit.
*/
- list_for_each_entry_safe(page, t, &n->partial, lru) {
+ list_for_each_entry_safe(page, t, &n->partial, slab_list) {
int free = page->objects - page->inuse;

/* Do not reread page->inuse */
@@ -4003,10 +4003,10 @@ int __kmem_cache_shrink(struct kmem_cache *s)
BUG_ON(free <= 0);

if (free == page->objects) {
- list_move(&page->lru, &discard);
+ list_move(&page->slab_list, &discard);
n->nr_partial--;
} else if (free <= SHRINK_PROMOTE_MAX)
- list_move(&page->lru, promote + free - 1);
+ list_move(&page->slab_list, promote + free - 1);
}

/*
@@ -4019,7 +4019,7 @@ int __kmem_cache_shrink(struct kmem_cache *s)
spin_unlock_irqrestore(&n->list_lock, flags);

/* Release empty slabs */
- list_for_each_entry_safe(page, t, &discard, lru)
+ list_for_each_entry_safe(page, t, &discard, slab_list)
discard_slab(s, page);

if (slabs_node(s, node))
@@ -4211,11 +4211,11 @@ static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
for_each_kmem_cache_node(s, node, n) {
struct page *p;

- list_for_each_entry(p, &n->partial, lru)
+ list_for_each_entry(p, &n->partial, slab_list)
p->slab_cache = s;

#ifdef CONFIG_SLUB_DEBUG
- list_for_each_entry(p, &n->full, lru)
+ list_for_each_entry(p, &n->full, slab_list)
p->slab_cache = s;
#endif
}
@@ -4432,7 +4432,7 @@ static int validate_slab_node(struct kmem_cache *s,

spin_lock_irqsave(&n->list_lock, flags);

- list_for_each_entry(page, &n->partial, lru) {
+ list_for_each_entry(page, &n->partial, slab_list) {
validate_slab_slab(s, page, map);
count++;
}
@@ -4443,7 +4443,7 @@ static int validate_slab_node(struct kmem_cache *s,
if (!(s->flags & SLAB_STORE_USER))
goto out;

- list_for_each_entry(page, &n->full, lru) {
+ list_for_each_entry(page, &n->full, slab_list) {
validate_slab_slab(s, page, map);
count++;
}
@@ -4639,9 +4639,9 @@ static int list_locations(struct kmem_cache *s, char *buf,
continue;

spin_lock_irqsave(&n->list_lock, flags);
- list_for_each_entry(page, &n->partial, lru)
+ list_for_each_entry(page, &n->partial, slab_list)
process_slab(&t, s, page, alloc, map);
- list_for_each_entry(page, &n->full, lru)
+ list_for_each_entry(page, &n->full, slab_list)
process_slab(&t, s, page, alloc, map);
spin_unlock_irqrestore(&n->list_lock, flags);
}
--
2.21.0


2019-03-11 01:27:43

by Tobin C. Harding

[permalink] [raw]
Subject: [PATCH 4/4] slob: Use slab_list instead of lru

Currently we use the page->lru list for maintaining lists of slabs. We
have a list in the page structure (slab_list) that can be used for this
purpose. Doing so makes the code cleaner since we are not overloading
the lru list.

Use the slab_list instead of the lru list for maintaining lists of
slabs.

Signed-off-by: Tobin C. Harding <[email protected]>
---
mm/slob.c | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/mm/slob.c b/mm/slob.c
index 307c2c9feb44..ee68ff2a2833 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -112,13 +112,13 @@ static inline int slob_page_free(struct page *sp)

static void set_slob_page_free(struct page *sp, struct list_head *list)
{
- list_add(&sp->lru, list);
+ list_add(&sp->slab_list, list);
__SetPageSlobFree(sp);
}

static inline void clear_slob_page_free(struct page *sp)
{
- list_del(&sp->lru);
+ list_del(&sp->slab_list);
__ClearPageSlobFree(sp);
}

@@ -283,7 +283,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)

spin_lock_irqsave(&slob_lock, flags);
/* Iterate through each partially free page, try to find room */
- list_for_each_entry(sp, slob_list, lru) {
+ list_for_each_entry(sp, slob_list, slab_list) {
#ifdef CONFIG_NUMA
/*
* If there's a node specification, search for a partial
@@ -297,7 +297,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
continue;

/* Attempt to alloc */
- prev = sp->lru.prev;
+ prev = sp->slab_list.prev;
b = slob_page_alloc(sp, size, align);
if (!b)
continue;
@@ -323,7 +323,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
spin_lock_irqsave(&slob_lock, flags);
sp->units = SLOB_UNITS(PAGE_SIZE);
sp->freelist = b;
- INIT_LIST_HEAD(&sp->lru);
+ INIT_LIST_HEAD(&sp->slab_list);
set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
set_slob_page_free(sp, slob_list);
b = slob_page_alloc(sp, size, align);
--
2.21.0


2019-03-11 02:32:36

by Tobin C. Harding

[permalink] [raw]
Subject: [PATCH 3/4] slab: Use slab_list instead of lru

Currently we use the page->lru list for maintaining lists of slabs. We
have a list in the page structure (slab_list) that can be used for this
purpose. Doing so makes the code cleaner since we are not overloading
the lru list.

Use the slab_list instead of the lru list for maintaining lists of
slabs.

Signed-off-by: Tobin C. Harding <[email protected]>
---
mm/slab.c | 49 +++++++++++++++++++++++++------------------------
1 file changed, 25 insertions(+), 24 deletions(-)

diff --git a/mm/slab.c b/mm/slab.c
index 28652e4218e0..09cc64ef9613 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1710,8 +1710,8 @@ static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
{
struct page *page, *n;

- list_for_each_entry_safe(page, n, list, lru) {
- list_del(&page->lru);
+ list_for_each_entry_safe(page, n, list, slab_list) {
+ list_del(&page->slab_list);
slab_destroy(cachep, page);
}
}
@@ -2265,8 +2265,8 @@ static int drain_freelist(struct kmem_cache *cache,
goto out;
}

- page = list_entry(p, struct page, lru);
- list_del(&page->lru);
+ page = list_entry(p, struct page, slab_list);
+ list_del(&page->slab_list);
n->free_slabs--;
n->total_slabs--;
/*
@@ -2726,13 +2726,13 @@ static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
if (!page)
return;

- INIT_LIST_HEAD(&page->lru);
+ INIT_LIST_HEAD(&page->slab_list);
n = get_node(cachep, page_to_nid(page));

spin_lock(&n->list_lock);
n->total_slabs++;
if (!page->active) {
- list_add_tail(&page->lru, &(n->slabs_free));
+ list_add_tail(&page->slab_list, &n->slabs_free);
n->free_slabs++;
} else
fixup_slab_list(cachep, n, page, &list);
@@ -2841,9 +2841,9 @@ static inline void fixup_slab_list(struct kmem_cache *cachep,
void **list)
{
/* move slabp to correct slabp list: */
- list_del(&page->lru);
+ list_del(&page->slab_list);
if (page->active == cachep->num) {
- list_add(&page->lru, &n->slabs_full);
+ list_add(&page->slab_list, &n->slabs_full);
if (OBJFREELIST_SLAB(cachep)) {
#if DEBUG
/* Poisoning will be done without holding the lock */
@@ -2857,7 +2857,7 @@ static inline void fixup_slab_list(struct kmem_cache *cachep,
page->freelist = NULL;
}
} else
- list_add(&page->lru, &n->slabs_partial);
+ list_add(&page->slab_list, &n->slabs_partial);
}

/* Try to find non-pfmemalloc slab if needed */
@@ -2880,20 +2880,20 @@ static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n,
}

/* Move pfmemalloc slab to the end of list to speed up next search */
- list_del(&page->lru);
+ list_del(&page->slab_list);
if (!page->active) {
- list_add_tail(&page->lru, &n->slabs_free);
+ list_add_tail(&page->slab_list, &n->slabs_free);
n->free_slabs++;
} else
- list_add_tail(&page->lru, &n->slabs_partial);
+ list_add_tail(&page->slab_list, &n->slabs_partial);

- list_for_each_entry(page, &n->slabs_partial, lru) {
+ list_for_each_entry(page, &n->slabs_partial, slab_list) {
if (!PageSlabPfmemalloc(page))
return page;
}

n->free_touched = 1;
- list_for_each_entry(page, &n->slabs_free, lru) {
+ list_for_each_entry(page, &n->slabs_free, slab_list) {
if (!PageSlabPfmemalloc(page)) {
n->free_slabs--;
return page;
@@ -2908,11 +2908,12 @@ static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
struct page *page;

assert_spin_locked(&n->list_lock);
- page = list_first_entry_or_null(&n->slabs_partial, struct page, lru);
+ page = list_first_entry_or_null(&n->slabs_partial, struct page,
+ slab_list);
if (!page) {
n->free_touched = 1;
page = list_first_entry_or_null(&n->slabs_free, struct page,
- lru);
+ slab_list);
if (page)
n->free_slabs--;
}
@@ -3413,29 +3414,29 @@ static void free_block(struct kmem_cache *cachep, void **objpp,
objp = objpp[i];

page = virt_to_head_page(objp);
- list_del(&page->lru);
+ list_del(&page->slab_list);
check_spinlock_acquired_node(cachep, node);
slab_put_obj(cachep, page, objp);
STATS_DEC_ACTIVE(cachep);

/* fixup slab chains */
if (page->active == 0) {
- list_add(&page->lru, &n->slabs_free);
+ list_add(&page->slab_list, &n->slabs_free);
n->free_slabs++;
} else {
/* Unconditionally move a slab to the end of the
* partial list on free - maximum time for the
* other objects to be freed, too.
*/
- list_add_tail(&page->lru, &n->slabs_partial);
+ list_add_tail(&page->slab_list, &n->slabs_partial);
}
}

while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) {
n->free_objects -= cachep->num;

- page = list_last_entry(&n->slabs_free, struct page, lru);
- list_move(&page->lru, list);
+ page = list_last_entry(&n->slabs_free, struct page, slab_list);
+ list_move(&page->slab_list, list);
n->free_slabs--;
n->total_slabs--;
}
@@ -3473,7 +3474,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
int i = 0;
struct page *page;

- list_for_each_entry(page, &n->slabs_free, lru) {
+ list_for_each_entry(page, &n->slabs_free, slab_list) {
BUG_ON(page->active);

i++;
@@ -4336,9 +4337,9 @@ static int leaks_show(struct seq_file *m, void *p)
check_irq_on();
spin_lock_irq(&n->list_lock);

- list_for_each_entry(page, &n->slabs_full, lru)
+ list_for_each_entry(page, &n->slabs_full, slab_list)
handle_slab(x, cachep, page);
- list_for_each_entry(page, &n->slabs_partial, lru)
+ list_for_each_entry(page, &n->slabs_partial, slab_list)
handle_slab(x, cachep, page);
spin_unlock_irq(&n->list_lock);
}
--
2.21.0


2019-03-11 20:51:08

by Roman Gushchin

[permalink] [raw]
Subject: Re: [PATCH 0/4] mm: Use slab_list list_head instead of lru

On Mon, Mar 11, 2019 at 12:07:40PM +1100, Tobin C. Harding wrote:
> Currently the slab allocators (ab)use the struct page 'lru' list_head.
> We have a list head for slab allocators to use, 'slab_list'.
>
> Clean up all three allocators by using the 'slab_list' list_head instead
> of overloading the 'lru' list_head.
>
> Initial patch makes no code changes, adds comments to #endif statements.
>
> Final 3 patches do changes as a patch per allocator, tested by building
> and booting (in Qemu) after configuring kernel to use appropriate
> allocator. Also build and boot with debug options enabled (for slab
> and slub).

Hi Tobin!

The patchset looks good to me, however I'd add some clarifications
why switching from lru to slab_list is safe.

My understanding is that the slab_list fields isn't currently in use,
but it's not that obvious that putting slab_list and next/pages/pobjects
fields into a union is safe (for the slub case).

Please, add a clarification/comment.

For patches 1, 3 and 4:
Reviewed-by: Roman Gushchin <[email protected]>

Thanks!

2019-03-11 23:18:51

by Matthew Wilcox

[permalink] [raw]
Subject: Re: [PATCH 0/4] mm: Use slab_list list_head instead of lru

On Mon, Mar 11, 2019 at 08:49:23PM +0000, Roman Gushchin wrote:
> The patchset looks good to me, however I'd add some clarifications
> why switching from lru to slab_list is safe.
>
> My understanding is that the slab_list fields isn't currently in use,
> but it's not that obvious that putting slab_list and next/pages/pobjects
> fields into a union is safe (for the slub case).

It's already in a union.

struct page {
union {
struct { /* Page cache and anonymous pages */
struct list_head lru;
...
struct { /* slab, slob and slub */
union {
struct list_head slab_list; /* uses lru */
struct { /* Partial pages */
struct page *next;

slab_list and lru are in the same bits. Once this patch set is in,
we can remove the enigmatic 'uses lru' comment that I added.

2019-03-12 00:23:35

by Roman Gushchin

[permalink] [raw]
Subject: Re: [PATCH 0/4] mm: Use slab_list list_head instead of lru

On Mon, Mar 11, 2019 at 04:16:33PM -0700, Matthew Wilcox wrote:
> On Mon, Mar 11, 2019 at 08:49:23PM +0000, Roman Gushchin wrote:
> > The patchset looks good to me, however I'd add some clarifications
> > why switching from lru to slab_list is safe.
> >
> > My understanding is that the slab_list fields isn't currently in use,
> > but it's not that obvious that putting slab_list and next/pages/pobjects
> > fields into a union is safe (for the slub case).
>
> It's already in a union.
>
> struct page {
> union {
> struct { /* Page cache and anonymous pages */
> struct list_head lru;
> ...
> struct { /* slab, slob and slub */
> union {
> struct list_head slab_list; /* uses lru */
> struct { /* Partial pages */
> struct page *next;
>
> slab_list and lru are in the same bits. Once this patch set is in,
> we can remove the enigmatic 'uses lru' comment that I added.

Ah, perfect, thanks! Makes total sense then.

Tobin, can you, please, add a note to the commit message?
With the note:
Reviewed-by: Roman Gushchin <[email protected]>

Thank you!

2019-03-12 01:07:01

by Tobin C. Harding

[permalink] [raw]
Subject: Re: [PATCH 0/4] mm: Use slab_list list_head instead of lru

On Mon, Mar 11, 2019 at 04:16:33PM -0700, Matthew Wilcox wrote:
> On Mon, Mar 11, 2019 at 08:49:23PM +0000, Roman Gushchin wrote:
> > The patchset looks good to me, however I'd add some clarifications
> > why switching from lru to slab_list is safe.
> >
> > My understanding is that the slab_list fields isn't currently in use,
> > but it's not that obvious that putting slab_list and next/pages/pobjects
> > fields into a union is safe (for the slub case).
>
> It's already in a union.
>
> struct page {
> union {
> struct { /* Page cache and anonymous pages */
> struct list_head lru;
> ...
> struct { /* slab, slob and slub */
> union {
> struct list_head slab_list; /* uses lru */
> struct { /* Partial pages */
> struct page *next;
>
> slab_list and lru are in the same bits. Once this patch set is in,
> we can remove the enigmatic 'uses lru' comment that I added.

Funny you should say this, I came to me today while daydreaming that I
should have removed that comment :)

I'll remove it in v2.

thanks,
Tobin.

2019-03-12 01:08:11

by Tobin C. Harding

[permalink] [raw]
Subject: Re: [PATCH 0/4] mm: Use slab_list list_head instead of lru

On Tue, Mar 12, 2019 at 12:22:23AM +0000, Roman Gushchin wrote:
> On Mon, Mar 11, 2019 at 04:16:33PM -0700, Matthew Wilcox wrote:
> > On Mon, Mar 11, 2019 at 08:49:23PM +0000, Roman Gushchin wrote:
> > > The patchset looks good to me, however I'd add some clarifications
> > > why switching from lru to slab_list is safe.
> > >
> > > My understanding is that the slab_list fields isn't currently in use,
> > > but it's not that obvious that putting slab_list and next/pages/pobjects
> > > fields into a union is safe (for the slub case).
> >
> > It's already in a union.
> >
> > struct page {
> > union {
> > struct { /* Page cache and anonymous pages */
> > struct list_head lru;
> > ...
> > struct { /* slab, slob and slub */
> > union {
> > struct list_head slab_list; /* uses lru */
> > struct { /* Partial pages */
> > struct page *next;
> >
> > slab_list and lru are in the same bits. Once this patch set is in,
> > we can remove the enigmatic 'uses lru' comment that I added.
>
> Ah, perfect, thanks! Makes total sense then.
>
> Tobin, can you, please, add a note to the commit message?
> With the note:
> Reviewed-by: Roman Gushchin <[email protected]>

Thanks for the review Roman, will add tag to v2.

Tobin.

2019-03-12 02:03:25

by Tobin C. Harding

[permalink] [raw]
Subject: Re: [PATCH 0/4] mm: Use slab_list list_head instead of lru

On Tue, Mar 12, 2019 at 12:22:23AM +0000, Roman Gushchin wrote:
> On Mon, Mar 11, 2019 at 04:16:33PM -0700, Matthew Wilcox wrote:
> > On Mon, Mar 11, 2019 at 08:49:23PM +0000, Roman Gushchin wrote:
> > > The patchset looks good to me, however I'd add some clarifications
> > > why switching from lru to slab_list is safe.
> > >
> > > My understanding is that the slab_list fields isn't currently in use,
> > > but it's not that obvious that putting slab_list and next/pages/pobjects
> > > fields into a union is safe (for the slub case).
> >
> > It's already in a union.
> >
> > struct page {
> > union {
> > struct { /* Page cache and anonymous pages */
> > struct list_head lru;
> > ...
> > struct { /* slab, slob and slub */
> > union {
> > struct list_head slab_list; /* uses lru */
> > struct { /* Partial pages */
> > struct page *next;
> >
> > slab_list and lru are in the same bits. Once this patch set is in,
> > we can remove the enigmatic 'uses lru' comment that I added.
>
> Ah, perfect, thanks! Makes total sense then.
>
> Tobin, can you, please, add a note to the commit message?
> With the note:
> Reviewed-by: Roman Gushchin <[email protected]>

Awesome, thanks. That's for all 4 patches or excluding 2?

thanks,
Tobin.

2019-03-12 02:39:21

by Matthew Wilcox

[permalink] [raw]
Subject: Re: [PATCH 0/4] mm: Use slab_list list_head instead of lru

On Tue, Mar 12, 2019 at 12:05:54PM +1100, Tobin C. Harding wrote:
> > slab_list and lru are in the same bits. Once this patch set is in,
> > we can remove the enigmatic 'uses lru' comment that I added.
>
> Funny you should say this, I came to me today while daydreaming that I
> should have removed that comment :)
>
> I'll remove it in v2.

That's great. BTW, something else you could do to verify this patch
set is check that the object file is unchanged before/after the patch.
I tend to use 'objdump -dr' to before.s and after.s and use 'diff'
to compare the two.

2019-03-12 03:54:47

by Tobin C. Harding

[permalink] [raw]
Subject: Re: [PATCH 0/4] mm: Use slab_list list_head instead of lru

On Mon, Mar 11, 2019 at 07:38:28PM -0700, Matthew Wilcox wrote:
> On Tue, Mar 12, 2019 at 12:05:54PM +1100, Tobin C. Harding wrote:
> > > slab_list and lru are in the same bits. Once this patch set is in,
> > > we can remove the enigmatic 'uses lru' comment that I added.
> >
> > Funny you should say this, I came to me today while daydreaming that I
> > should have removed that comment :)
> >
> > I'll remove it in v2.
>
> That's great. BTW, something else you could do to verify this patch
> set is check that the object file is unchanged before/after the patch.
> I tend to use 'objdump -dr' to before.s and after.s and use 'diff'
> to compare the two.

Oh cool, I didn't know to do that. I'm not super familiar with the use
of unions having never had need to use one myself so any other union
related tips you think of please share.

thanks,
Tobin.

2019-03-12 17:23:31

by Roman Gushchin

[permalink] [raw]
Subject: Re: [PATCH 0/4] mm: Use slab_list list_head instead of lru

On Tue, Mar 12, 2019 at 01:01:53PM +1100, Tobin C. Harding wrote:
> On Tue, Mar 12, 2019 at 12:22:23AM +0000, Roman Gushchin wrote:
> > On Mon, Mar 11, 2019 at 04:16:33PM -0700, Matthew Wilcox wrote:
> > > On Mon, Mar 11, 2019 at 08:49:23PM +0000, Roman Gushchin wrote:
> > > > The patchset looks good to me, however I'd add some clarifications
> > > > why switching from lru to slab_list is safe.
> > > >
> > > > My understanding is that the slab_list fields isn't currently in use,
> > > > but it's not that obvious that putting slab_list and next/pages/pobjects
> > > > fields into a union is safe (for the slub case).
> > >
> > > It's already in a union.
> > >
> > > struct page {
> > > union {
> > > struct { /* Page cache and anonymous pages */
> > > struct list_head lru;
> > > ...
> > > struct { /* slab, slob and slub */
> > > union {
> > > struct list_head slab_list; /* uses lru */
> > > struct { /* Partial pages */
> > > struct page *next;
> > >
> > > slab_list and lru are in the same bits. Once this patch set is in,
> > > we can remove the enigmatic 'uses lru' comment that I added.
> >
> > Ah, perfect, thanks! Makes total sense then.
> >
> > Tobin, can you, please, add a note to the commit message?
> > With the note:
> > Reviewed-by: Roman Gushchin <[email protected]>
>
> Awesome, thanks. That's for all 4 patches or excluding 2?

To all 4, given that you'll add some explanations to the commit message.

Thanks!

2019-03-12 17:26:34

by Roman Gushchin

[permalink] [raw]
Subject: Re: [PATCH 0/4] mm: Use slab_list list_head instead of lru

On Mon, Mar 11, 2019 at 07:38:28PM -0700, Matthew Wilcox wrote:
> On Tue, Mar 12, 2019 at 12:05:54PM +1100, Tobin C. Harding wrote:
> > > slab_list and lru are in the same bits. Once this patch set is in,
> > > we can remove the enigmatic 'uses lru' comment that I added.
> >
> > Funny you should say this, I came to me today while daydreaming that I
> > should have removed that comment :)
> >
> > I'll remove it in v2.
>
> That's great. BTW, something else you could do to verify this patch
> set is check that the object file is unchanged before/after the patch.
> I tend to use 'objdump -dr' to before.s and after.s and use 'diff'
> to compare the two.

Btw, is it guaranteed that the object file will not change?
I was about to recommend the same, but was not sure, if such change
can cause gcc to generate a *slightly* different obj code.

Thanks!