From: Jiri Kosina <[email protected]>
The list_lock can be taken in hardirq context when do_drain() is being
called via IPI on all cores, and therefore lockdep complains about it, as
it can't be preempted on PREEMPT_RT.
That's not a real issue, as SLAB can't be built on PREEMPT_RT anyway, but
we still want to get rid of the warning on non-PREEMPT_RT builds.
Annotate it therefore as a raw lock. This gets rid of the lockdep warning
below.
=============================
[ BUG: Invalid wait context ]
6.1.0-rc1-00134-ge35184f32151 #4 Not tainted
-----------------------------
swapper/3/0 is trying to lock:
ffff8bc88086dc18 (&parent->list_lock){..-.}-{3:3}, at: do_drain+0x57/0xb0
other info that might help us debug this:
context-{2:2}
no locks held by swapper/3/0.
stack backtrace:
CPU: 3 PID: 0 Comm: swapper/3 Not tainted 6.1.0-rc1-00134-ge35184f32151 #4
Call Trace:
<IRQ>
dump_stack_lvl+0x6b/0x9d
__lock_acquire+0x1519/0x1730
? build_sched_domains+0x4bd/0x1590
? __lock_acquire+0xad2/0x1730
lock_acquire+0x294/0x340
? do_drain+0x57/0xb0
? sched_clock_tick+0x41/0x60
_raw_spin_lock+0x2c/0x40
? do_drain+0x57/0xb0
do_drain+0x57/0xb0
__flush_smp_call_function_queue+0x138/0x220
__sysvec_call_function+0x4f/0x210
sysvec_call_function+0x4b/0x90
</IRQ>
<TASK>
asm_sysvec_call_function+0x16/0x20
RIP: 0010:mwait_idle+0x5e/0x80
Code: 31 d2 65 48 8b 04 25 80 ed 01 00 48 89 d1 0f 01 c8 48 8b 00 a8 08 75 14 66 90 0f 00 2d 0b 78 46 00 31 c0 48 89 c1 fb 0f 01 c9 <eb> 06 fb 0f 1f 44 00 00 65 48 8b 04 25 80 ed 01 00 f0 80 60 02 df
RSP: 0000:ffffa90940217ee0 EFLAGS: 00000246
RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffffffff9bb9f93a
RBP: 0000000000000003 R08: 0000000000000001 R09: 0000000000000001
R10: ffffa90940217ea8 R11: 0000000000000000 R12: ffffffffffffffff
R13: 0000000000000000 R14: ffff8bc88127c500 R15: 0000000000000000
? default_idle_call+0x1a/0xa0
default_idle_call+0x4b/0xa0
do_idle+0x1f1/0x2c0
? _raw_spin_unlock_irqrestore+0x56/0x70
cpu_startup_entry+0x19/0x20
start_secondary+0x122/0x150
secondary_startup_64_no_verify+0xce/0xdb
</TASK>
Signed-off-by: Jiri Kosina <[email protected]>
---
mm/slab.c | 90 +++++++++++++++++++++++++++----------------------------
mm/slab.h | 2 +-
2 files changed, 46 insertions(+), 46 deletions(-)
diff --git a/mm/slab.c b/mm/slab.c
index 59c8e28f7b6a..d8a287900193 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -234,7 +234,7 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
parent->shared = NULL;
parent->alien = NULL;
parent->colour_next = 0;
- spin_lock_init(&parent->list_lock);
+ raw_spin_lock_init(&parent->list_lock);
parent->free_objects = 0;
parent->free_touched = 0;
}
@@ -559,9 +559,9 @@ static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep,
slab_node = slab_nid(slab);
n = get_node(cachep, slab_node);
- spin_lock(&n->list_lock);
+ raw_spin_lock(&n->list_lock);
free_block(cachep, &objp, 1, slab_node, &list);
- spin_unlock(&n->list_lock);
+ raw_spin_unlock(&n->list_lock);
slabs_destroy(cachep, &list);
}
@@ -684,7 +684,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
struct kmem_cache_node *n = get_node(cachep, node);
if (ac->avail) {
- spin_lock(&n->list_lock);
+ raw_spin_lock(&n->list_lock);
/*
* Stuff objects into the remote nodes shared array first.
* That way we could avoid the overhead of putting the objects
@@ -695,7 +695,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
free_block(cachep, ac->entry, ac->avail, node, list);
ac->avail = 0;
- spin_unlock(&n->list_lock);
+ raw_spin_unlock(&n->list_lock);
}
}
@@ -768,9 +768,9 @@ static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
slabs_destroy(cachep, &list);
} else {
n = get_node(cachep, slab_node);
- spin_lock(&n->list_lock);
+ raw_spin_lock(&n->list_lock);
free_block(cachep, &objp, 1, slab_node, &list);
- spin_unlock(&n->list_lock);
+ raw_spin_unlock(&n->list_lock);
slabs_destroy(cachep, &list);
}
return 1;
@@ -811,10 +811,10 @@ static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
*/
n = get_node(cachep, node);
if (n) {
- spin_lock_irq(&n->list_lock);
+ raw_spin_lock_irq(&n->list_lock);
n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
cachep->num;
- spin_unlock_irq(&n->list_lock);
+ raw_spin_unlock_irq(&n->list_lock);
return 0;
}
@@ -893,7 +893,7 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep,
goto fail;
n = get_node(cachep, node);
- spin_lock_irq(&n->list_lock);
+ raw_spin_lock_irq(&n->list_lock);
if (n->shared && force_change) {
free_block(cachep, n->shared->entry,
n->shared->avail, node, &list);
@@ -911,7 +911,7 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep,
new_alien = NULL;
}
- spin_unlock_irq(&n->list_lock);
+ raw_spin_unlock_irq(&n->list_lock);
slabs_destroy(cachep, &list);
/*
@@ -950,7 +950,7 @@ static void cpuup_canceled(long cpu)
if (!n)
continue;
- spin_lock_irq(&n->list_lock);
+ raw_spin_lock_irq(&n->list_lock);
/* Free limit for this kmem_cache_node */
n->free_limit -= cachep->batchcount;
@@ -961,7 +961,7 @@ static void cpuup_canceled(long cpu)
nc->avail = 0;
if (!cpumask_empty(mask)) {
- spin_unlock_irq(&n->list_lock);
+ raw_spin_unlock_irq(&n->list_lock);
goto free_slab;
}
@@ -975,7 +975,7 @@ static void cpuup_canceled(long cpu)
alien = n->alien;
n->alien = NULL;
- spin_unlock_irq(&n->list_lock);
+ raw_spin_unlock_irq(&n->list_lock);
kfree(shared);
if (alien) {
@@ -1159,7 +1159,7 @@ static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *
/*
* Do not assume that spinlocks can be initialized via memcpy:
*/
- spin_lock_init(&ptr->list_lock);
+ raw_spin_lock_init(&ptr->list_lock);
MAKE_ALL_LISTS(cachep, ptr, nodeid);
cachep->node[nodeid] = ptr;
@@ -1330,11 +1330,11 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
for_each_kmem_cache_node(cachep, node, n) {
unsigned long total_slabs, free_slabs, free_objs;
- spin_lock_irqsave(&n->list_lock, flags);
+ raw_spin_lock_irqsave(&n->list_lock, flags);
total_slabs = n->total_slabs;
free_slabs = n->free_slabs;
free_objs = n->free_objects;
- spin_unlock_irqrestore(&n->list_lock, flags);
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld\n",
node, total_slabs - free_slabs, total_slabs,
@@ -2096,7 +2096,7 @@ static void check_spinlock_acquired(struct kmem_cache *cachep)
{
#ifdef CONFIG_SMP
check_irq_off();
- assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
+ assert_raw_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
#endif
}
@@ -2104,7 +2104,7 @@ static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
{
#ifdef CONFIG_SMP
check_irq_off();
- assert_spin_locked(&get_node(cachep, node)->list_lock);
+ assert_raw_spin_locked(&get_node(cachep, node)->list_lock);
#endif
}
@@ -2144,9 +2144,9 @@ static void do_drain(void *arg)
check_irq_off();
ac = cpu_cache_get(cachep);
n = get_node(cachep, node);
- spin_lock(&n->list_lock);
+ raw_spin_lock(&n->list_lock);
free_block(cachep, ac->entry, ac->avail, node, &list);
- spin_unlock(&n->list_lock);
+ raw_spin_unlock(&n->list_lock);
ac->avail = 0;
slabs_destroy(cachep, &list);
}
@@ -2164,9 +2164,9 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
drain_alien_cache(cachep, n->alien);
for_each_kmem_cache_node(cachep, node, n) {
- spin_lock_irq(&n->list_lock);
+ raw_spin_lock_irq(&n->list_lock);
drain_array_locked(cachep, n->shared, node, true, &list);
- spin_unlock_irq(&n->list_lock);
+ raw_spin_unlock_irq(&n->list_lock);
slabs_destroy(cachep, &list);
}
@@ -2188,10 +2188,10 @@ static int drain_freelist(struct kmem_cache *cache,
nr_freed = 0;
while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
- spin_lock_irq(&n->list_lock);
+ raw_spin_lock_irq(&n->list_lock);
p = n->slabs_free.prev;
if (p == &n->slabs_free) {
- spin_unlock_irq(&n->list_lock);
+ raw_spin_unlock_irq(&n->list_lock);
goto out;
}
@@ -2204,7 +2204,7 @@ static int drain_freelist(struct kmem_cache *cache,
* to the cache.
*/
n->free_objects -= cache->num;
- spin_unlock_irq(&n->list_lock);
+ raw_spin_unlock_irq(&n->list_lock);
slab_destroy(cache, slab);
nr_freed++;
}
@@ -2629,7 +2629,7 @@ static void cache_grow_end(struct kmem_cache *cachep, struct slab *slab)
INIT_LIST_HEAD(&slab->slab_list);
n = get_node(cachep, slab_nid(slab));
- spin_lock(&n->list_lock);
+ raw_spin_lock(&n->list_lock);
n->total_slabs++;
if (!slab->active) {
list_add_tail(&slab->slab_list, &n->slabs_free);
@@ -2639,7 +2639,7 @@ static void cache_grow_end(struct kmem_cache *cachep, struct slab *slab)
STATS_INC_GROWN(cachep);
n->free_objects += cachep->num - slab->active;
- spin_unlock(&n->list_lock);
+ raw_spin_unlock(&n->list_lock);
fixup_objfreelist_debug(cachep, &list);
}
@@ -2805,7 +2805,7 @@ static struct slab *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
{
struct slab *slab;
- assert_spin_locked(&n->list_lock);
+ assert_raw_spin_locked(&n->list_lock);
slab = list_first_entry_or_null(&n->slabs_partial, struct slab,
slab_list);
if (!slab) {
@@ -2832,10 +2832,10 @@ static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
if (!gfp_pfmemalloc_allowed(flags))
return NULL;
- spin_lock(&n->list_lock);
+ raw_spin_lock(&n->list_lock);
slab = get_first_slab(n, true);
if (!slab) {
- spin_unlock(&n->list_lock);
+ raw_spin_unlock(&n->list_lock);
return NULL;
}
@@ -2844,7 +2844,7 @@ static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
fixup_slab_list(cachep, n, slab, &list);
- spin_unlock(&n->list_lock);
+ raw_spin_unlock(&n->list_lock);
fixup_objfreelist_debug(cachep, &list);
return obj;
@@ -2903,7 +2903,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
if (!n->free_objects && (!shared || !shared->avail))
goto direct_grow;
- spin_lock(&n->list_lock);
+ raw_spin_lock(&n->list_lock);
shared = READ_ONCE(n->shared);
/* See if we can refill from the shared array */
@@ -2927,7 +2927,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
must_grow:
n->free_objects -= ac->avail;
alloc_done:
- spin_unlock(&n->list_lock);
+ raw_spin_unlock(&n->list_lock);
fixup_objfreelist_debug(cachep, &list);
direct_grow:
@@ -3147,7 +3147,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
BUG_ON(!n);
check_irq_off();
- spin_lock(&n->list_lock);
+ raw_spin_lock(&n->list_lock);
slab = get_first_slab(n, false);
if (!slab)
goto must_grow;
@@ -3165,12 +3165,12 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
fixup_slab_list(cachep, n, slab, &list);
- spin_unlock(&n->list_lock);
+ raw_spin_unlock(&n->list_lock);
fixup_objfreelist_debug(cachep, &list);
return obj;
must_grow:
- spin_unlock(&n->list_lock);
+ raw_spin_unlock(&n->list_lock);
slab = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
if (slab) {
/* This slab isn't counted yet so don't update free_objects */
@@ -3325,7 +3325,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
check_irq_off();
n = get_node(cachep, node);
- spin_lock(&n->list_lock);
+ raw_spin_lock(&n->list_lock);
if (n->shared) {
struct array_cache *shared_array = n->shared;
int max = shared_array->limit - shared_array->avail;
@@ -3354,7 +3354,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
STATS_SET_FREEABLE(cachep, i);
}
#endif
- spin_unlock(&n->list_lock);
+ raw_spin_unlock(&n->list_lock);
ac->avail -= batchcount;
memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
slabs_destroy(cachep, &list);
@@ -3721,9 +3721,9 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
node = cpu_to_mem(cpu);
n = get_node(cachep, node);
- spin_lock_irq(&n->list_lock);
+ raw_spin_lock_irq(&n->list_lock);
free_block(cachep, ac->entry, ac->avail, node, &list);
- spin_unlock_irq(&n->list_lock);
+ raw_spin_unlock_irq(&n->list_lock);
slabs_destroy(cachep, &list);
}
free_percpu(prev);
@@ -3815,9 +3815,9 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
return;
}
- spin_lock_irq(&n->list_lock);
+ raw_spin_lock_irq(&n->list_lock);
drain_array_locked(cachep, ac, node, false, &list);
- spin_unlock_irq(&n->list_lock);
+ raw_spin_unlock_irq(&n->list_lock);
slabs_destroy(cachep, &list);
}
@@ -3901,7 +3901,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
for_each_kmem_cache_node(cachep, node, n) {
check_irq_on();
- spin_lock_irq(&n->list_lock);
+ raw_spin_lock_irq(&n->list_lock);
total_slabs += n->total_slabs;
free_slabs += n->free_slabs;
@@ -3910,7 +3910,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
if (n->shared)
shared_avail += n->shared->avail;
- spin_unlock_irq(&n->list_lock);
+ raw_spin_unlock_irq(&n->list_lock);
}
num_objs = total_slabs * cachep->num;
active_slabs = total_slabs - free_slabs;
diff --git a/mm/slab.h b/mm/slab.h
index 0202a8c2f0d2..7e0bdd7773f0 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -750,7 +750,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
* The slab lists for all objects.
*/
struct kmem_cache_node {
- spinlock_t list_lock;
+ raw_spinlock_t list_lock;
#ifdef CONFIG_SLAB
struct list_head slabs_partial; /* partial list first, better asm code */
--
2.35.3
Hi Jiri,
I love your patch! Yet something to improve:
[auto build test ERROR on akpm-mm/mm-everything]
url: https://github.com/intel-lab-lkp/linux/commits/Jiri-Kosina/mm-slab-Annotate-kmem_cache_node-list_lock-as-raw/20221021-225037
base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link: https://lore.kernel.org/r/nycvar.YFH.7.76.2210211643330.29912%40cbobk.fhfr.pm
patch subject: [PATCH] mm/slab: Annotate kmem_cache_node->list_lock as raw
config: x86_64-rhel-8.3-func
compiler: gcc-11 (Debian 11.3.0-8) 11.3.0
reproduce (this is a W=1 build):
# https://github.com/intel-lab-lkp/linux/commit/709b8ca6934319e9e8e0519baa3ac2bb4f634451
git remote add linux-review https://github.com/intel-lab-lkp/linux
git fetch --no-tags linux-review Jiri-Kosina/mm-slab-Annotate-kmem_cache_node-list_lock-as-raw/20221021-225037
git checkout 709b8ca6934319e9e8e0519baa3ac2bb4f634451
# save the config file
mkdir build_dir && cp config build_dir/.config
make W=1 O=build_dir ARCH=x86_64 SHELL=/bin/bash
If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <[email protected]>
All errors (new ones prefixed by >>):
243 | flags = _raw_spin_lock_irqsave(lock); \
| ^~~~
mm/slub.c:2946:9: note: in expansion of macro 'spin_lock_irqsave'
2946 | spin_lock_irqsave(&n->list_lock, flags);
| ^~~~~~~~~~~~~~~~~
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
include/linux/spinlock.h:323:67: note: expected 'spinlock_t *' {aka 'struct spinlock *'} but argument is of type 'raw_spinlock_t *' {aka 'struct raw_spinlock *'}
323 | static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
| ~~~~~~~~~~~~^~~~
mm/slub.c:2949:32: error: passing argument 1 of 'spin_unlock_irqrestore' from incompatible pointer type [-Werror=incompatible-pointer-types]
2949 | spin_unlock_irqrestore(&n->list_lock, flags);
| ^~~~~~~~~~~~~
| |
| raw_spinlock_t * {aka struct raw_spinlock *}
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
include/linux/spinlock.h:403:64: note: expected 'spinlock_t *' {aka 'struct spinlock *'} but argument is of type 'raw_spinlock_t *' {aka 'struct raw_spinlock *'}
403 | static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
| ~~~~~~~~~~~~^~~~
mm/slub.c: In function '__slab_free':
mm/slub.c:3483:48: error: passing argument 1 of 'spin_unlock_irqrestore' from incompatible pointer type [-Werror=incompatible-pointer-types]
3483 | spin_unlock_irqrestore(&n->list_lock, flags);
| ^~~~~~~~~~~~~
| |
| raw_spinlock_t * {aka struct raw_spinlock *}
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
include/linux/spinlock.h:403:64: note: expected 'spinlock_t *' {aka 'struct spinlock *'} but argument is of type 'raw_spinlock_t *' {aka 'struct raw_spinlock *'}
403 | static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
| ~~~~~~~~~~~~^~~~
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
mm/slub.c:3515:51: error: passing argument 1 of 'spinlock_check' from incompatible pointer type [-Werror=incompatible-pointer-types]
3515 | spin_lock_irqsave(&n->list_lock, flags);
| ^~~~~~~~~~~~~
| |
| raw_spinlock_t * {aka struct raw_spinlock *}
include/linux/spinlock.h:243:48: note: in definition of macro 'raw_spin_lock_irqsave'
243 | flags = _raw_spin_lock_irqsave(lock); \
| ^~~~
mm/slub.c:3515:33: note: in expansion of macro 'spin_lock_irqsave'
3515 | spin_lock_irqsave(&n->list_lock, flags);
| ^~~~~~~~~~~~~~~~~
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
include/linux/spinlock.h:323:67: note: expected 'spinlock_t *' {aka 'struct spinlock *'} but argument is of type 'raw_spinlock_t *' {aka 'struct raw_spinlock *'}
323 | static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
| ~~~~~~~~~~~~^~~~
mm/slub.c:3557:32: error: passing argument 1 of 'spin_unlock_irqrestore' from incompatible pointer type [-Werror=incompatible-pointer-types]
3557 | spin_unlock_irqrestore(&n->list_lock, flags);
| ^~~~~~~~~~~~~
| |
| raw_spinlock_t * {aka struct raw_spinlock *}
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
include/linux/spinlock.h:403:64: note: expected 'spinlock_t *' {aka 'struct spinlock *'} but argument is of type 'raw_spinlock_t *' {aka 'struct raw_spinlock *'}
403 | static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
| ~~~~~~~~~~~~^~~~
mm/slub.c:3572:32: error: passing argument 1 of 'spin_unlock_irqrestore' from incompatible pointer type [-Werror=incompatible-pointer-types]
3572 | spin_unlock_irqrestore(&n->list_lock, flags);
| ^~~~~~~~~~~~~
| |
| raw_spinlock_t * {aka struct raw_spinlock *}
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
include/linux/spinlock.h:403:64: note: expected 'spinlock_t *' {aka 'struct spinlock *'} but argument is of type 'raw_spinlock_t *' {aka 'struct raw_spinlock *'}
403 | static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
| ~~~~~~~~~~~~^~~~
mm/slub.c: In function 'init_kmem_cache_node':
mm/slub.c:4008:24: error: passing argument 1 of 'spinlock_check' from incompatible pointer type [-Werror=incompatible-pointer-types]
4008 | spin_lock_init(&n->list_lock);
| ^~~~~~~~~~~~~
| |
| raw_spinlock_t * {aka struct raw_spinlock *}
include/linux/spinlock.h:342:24: note: in definition of macro 'spin_lock_init'
342 | spinlock_check(_lock); \
| ^~~~~
include/linux/spinlock.h:323:67: note: expected 'spinlock_t *' {aka 'struct spinlock *'} but argument is of type 'raw_spinlock_t *' {aka 'struct raw_spinlock *'}
323 | static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
| ~~~~~~~~~~~~^~~~
In file included from include/linux/spinlock.h:88,
from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
>> include/linux/spinlock_types.h:41:9: error: incompatible types when assigning to type 'raw_spinlock_t' {aka 'struct raw_spinlock'} from type 'spinlock_t' {aka 'struct spinlock'}
41 | (spinlock_t) __SPIN_LOCK_INITIALIZER(lockname)
| ^
include/linux/spinlock.h:343:20: note: in expansion of macro '__SPIN_LOCK_UNLOCKED'
343 | *(_lock) = __SPIN_LOCK_UNLOCKED(_lock); \
| ^~~~~~~~~~~~~~~~~~~~
mm/slub.c:4008:9: note: in expansion of macro 'spin_lock_init'
4008 | spin_lock_init(&n->list_lock);
| ^~~~~~~~~~~~~~
mm/slub.c: In function 'free_partial':
mm/slub.c:4393:23: error: passing argument 1 of 'spin_lock_irq' from incompatible pointer type [-Werror=incompatible-pointer-types]
4393 | spin_lock_irq(&n->list_lock);
| ^~~~~~~~~~~~~
| |
| raw_spinlock_t * {aka struct raw_spinlock *}
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
include/linux/spinlock.h:373:55: note: expected 'spinlock_t *' {aka 'struct spinlock *'} but argument is of type 'raw_spinlock_t *' {aka 'struct raw_spinlock *'}
373 | static __always_inline void spin_lock_irq(spinlock_t *lock)
| ~~~~~~~~~~~~^~~~
mm/slub.c:4403:25: error: passing argument 1 of 'spin_unlock_irq' from incompatible pointer type [-Werror=incompatible-pointer-types]
4403 | spin_unlock_irq(&n->list_lock);
| ^~~~~~~~~~~~~
| |
| raw_spinlock_t * {aka struct raw_spinlock *}
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
include/linux/spinlock.h:398:57: note: expected 'spinlock_t *' {aka 'struct spinlock *'} but argument is of type 'raw_spinlock_t *' {aka 'struct raw_spinlock *'}
398 | static __always_inline void spin_unlock_irq(spinlock_t *lock)
| ~~~~~~~~~~~~^~~~
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
mm/slub.c: In function '__kmem_cache_do_shrink':
mm/slub.c:4607:35: error: passing argument 1 of 'spinlock_check' from incompatible pointer type [-Werror=incompatible-pointer-types]
4607 | spin_lock_irqsave(&n->list_lock, flags);
| ^~~~~~~~~~~~~
| |
| raw_spinlock_t * {aka struct raw_spinlock *}
include/linux/spinlock.h:243:48: note: in definition of macro 'raw_spin_lock_irqsave'
243 | flags = _raw_spin_lock_irqsave(lock); \
| ^~~~
mm/slub.c:4607:17: note: in expansion of macro 'spin_lock_irqsave'
4607 | spin_lock_irqsave(&n->list_lock, flags);
| ^~~~~~~~~~~~~~~~~
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
include/linux/spinlock.h:323:67: note: expected 'spinlock_t *' {aka 'struct spinlock *'} but argument is of type 'raw_spinlock_t *' {aka 'struct raw_spinlock *'}
323 | static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
| ~~~~~~~~~~~~^~~~
mm/slub.c:4639:40: error: passing argument 1 of 'spin_unlock_irqrestore' from incompatible pointer type [-Werror=incompatible-pointer-types]
4639 | spin_unlock_irqrestore(&n->list_lock, flags);
| ^~~~~~~~~~~~~
| |
| raw_spinlock_t * {aka struct raw_spinlock *}
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
include/linux/spinlock.h:403:64: note: expected 'spinlock_t *' {aka 'struct spinlock *'} but argument is of type 'raw_spinlock_t *' {aka 'struct raw_spinlock *'}
403 | static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
| ~~~~~~~~~~~~^~~~
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
mm/slub.c: In function 'validate_slab_node':
mm/slub.c:4962:27: error: passing argument 1 of 'spinlock_check' from incompatible pointer type [-Werror=incompatible-pointer-types]
4962 | spin_lock_irqsave(&n->list_lock, flags);
| ^~~~~~~~~~~~~
| |
| raw_spinlock_t * {aka struct raw_spinlock *}
include/linux/spinlock.h:243:48: note: in definition of macro 'raw_spin_lock_irqsave'
243 | flags = _raw_spin_lock_irqsave(lock); \
| ^~~~
mm/slub.c:4962:9: note: in expansion of macro 'spin_lock_irqsave'
4962 | spin_lock_irqsave(&n->list_lock, flags);
| ^~~~~~~~~~~~~~~~~
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
include/linux/spinlock.h:323:67: note: expected 'spinlock_t *' {aka 'struct spinlock *'} but argument is of type 'raw_spinlock_t *' {aka 'struct raw_spinlock *'}
323 | static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
| ~~~~~~~~~~~~^~~~
mm/slub.c:4988:32: error: passing argument 1 of 'spin_unlock_irqrestore' from incompatible pointer type [-Werror=incompatible-pointer-types]
4988 | spin_unlock_irqrestore(&n->list_lock, flags);
| ^~~~~~~~~~~~~
| |
| raw_spinlock_t * {aka struct raw_spinlock *}
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
vim +41 include/linux/spinlock_types.h
c2f21ce2e31286 Thomas Gleixner 2009-12-02 30
de8f5e4f2dc1f0 Peter Zijlstra 2020-03-21 31 #define ___SPIN_LOCK_INITIALIZER(lockname) \
de8f5e4f2dc1f0 Peter Zijlstra 2020-03-21 32 { \
de8f5e4f2dc1f0 Peter Zijlstra 2020-03-21 33 .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
de8f5e4f2dc1f0 Peter Zijlstra 2020-03-21 34 SPIN_DEBUG_INIT(lockname) \
de8f5e4f2dc1f0 Peter Zijlstra 2020-03-21 35 SPIN_DEP_MAP_INIT(lockname) }
de8f5e4f2dc1f0 Peter Zijlstra 2020-03-21 36
c2f21ce2e31286 Thomas Gleixner 2009-12-02 37 #define __SPIN_LOCK_INITIALIZER(lockname) \
de8f5e4f2dc1f0 Peter Zijlstra 2020-03-21 38 { { .rlock = ___SPIN_LOCK_INITIALIZER(lockname) } }
c2f21ce2e31286 Thomas Gleixner 2009-12-02 39
c2f21ce2e31286 Thomas Gleixner 2009-12-02 40 #define __SPIN_LOCK_UNLOCKED(lockname) \
c2f21ce2e31286 Thomas Gleixner 2009-12-02 @41 (spinlock_t) __SPIN_LOCK_INITIALIZER(lockname)
fb1c8f93d869b3 Ingo Molnar 2005-09-10 42
--
0-DAY CI Kernel Test Service
https://01.org/lkp
From: Jiri Kosina <[email protected]>
The list_lock can be taken in hardirq context when do_drain() is being
called via IPI on all cores, and therefore lockdep complains about it,
because it can't be preempted on PREEMPT_RT.
That's not a real issue, as SLAB can't be built on PREEMPT_RT anyway, but
we still want to get rid of the warning on non-PREEMPT_RT builds.
Annotate it therefore as a raw lock in order to get rid of he lockdep
warning below.
=============================
[ BUG: Invalid wait context ]
6.1.0-rc1-00134-ge35184f32151 #4 Not tainted
-----------------------------
swapper/3/0 is trying to lock:
ffff8bc88086dc18 (&parent->list_lock){..-.}-{3:3}, at: do_drain+0x57/0xb0
other info that might help us debug this:
context-{2:2}
no locks held by swapper/3/0.
stack backtrace:
CPU: 3 PID: 0 Comm: swapper/3 Not tainted 6.1.0-rc1-00134-ge35184f32151 #4
Hardware name: LENOVO 20K5S22R00/20K5S22R00, BIOS R0IET38W (1.16 ) 05/31/2017
Call Trace:
<IRQ>
dump_stack_lvl+0x6b/0x9d
__lock_acquire+0x1519/0x1730
? build_sched_domains+0x4bd/0x1590
? __lock_acquire+0xad2/0x1730
lock_acquire+0x294/0x340
? do_drain+0x57/0xb0
? sched_clock_tick+0x41/0x60
_raw_spin_lock+0x2c/0x40
? do_drain+0x57/0xb0
do_drain+0x57/0xb0
__flush_smp_call_function_queue+0x138/0x220
__sysvec_call_function+0x4f/0x210
sysvec_call_function+0x4b/0x90
</IRQ>
<TASK>
asm_sysvec_call_function+0x16/0x20
RIP: 0010:mwait_idle+0x5e/0x80
Code: 31 d2 65 48 8b 04 25 80 ed 01 00 48 89 d1 0f 01 c8 48 8b 00 a8 08 75 14 66 90 0f 00 2d 0b 78 46 00 31 c0 48 89 c1 fb 0f 01 c9 <eb> 06 fb 0f 1f 44 00 00 65 48 8b 04 25 80 ed 01 00 f0 80 60 02 df
RSP: 0000:ffffa90940217ee0 EFLAGS: 00000246
RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffffffff9bb9f93a
RBP: 0000000000000003 R08: 0000000000000001 R09: 0000000000000001
R10: ffffa90940217ea8 R11: 0000000000000000 R12: ffffffffffffffff
R13: 0000000000000000 R14: ffff8bc88127c500 R15: 0000000000000000
? default_idle_call+0x1a/0xa0
default_idle_call+0x4b/0xa0
do_idle+0x1f1/0x2c0
? _raw_spin_unlock_irqrestore+0x56/0x70
cpu_startup_entry+0x19/0x20
start_secondary+0x122/0x150
secondary_startup_64_no_verify+0xce/0xdb
</TASK>
Signed-off-by: Jiri Kosina <[email protected]>
---
v1->v2: fix !SLAB build failures due to list_lock mismatch
mm/slab.c | 90 +++++++++++++++++++++++++++----------------------------
mm/slab.h | 2 +-
2 files changed, 46 insertions(+), 46 deletions(-)
diff --git a/mm/slab.c b/mm/slab.c
index 59c8e28f7b6a..d8a287900193 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -234,7 +234,7 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
parent->shared = NULL;
parent->alien = NULL;
parent->colour_next = 0;
- spin_lock_init(&parent->list_lock);
+ raw_spin_lock_init(&parent->list_lock);
parent->free_objects = 0;
parent->free_touched = 0;
}
@@ -559,9 +559,9 @@ static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep,
slab_node = slab_nid(slab);
n = get_node(cachep, slab_node);
- spin_lock(&n->list_lock);
+ raw_spin_lock(&n->list_lock);
free_block(cachep, &objp, 1, slab_node, &list);
- spin_unlock(&n->list_lock);
+ raw_spin_unlock(&n->list_lock);
slabs_destroy(cachep, &list);
}
@@ -684,7 +684,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
struct kmem_cache_node *n = get_node(cachep, node);
if (ac->avail) {
- spin_lock(&n->list_lock);
+ raw_spin_lock(&n->list_lock);
/*
* Stuff objects into the remote nodes shared array first.
* That way we could avoid the overhead of putting the objects
@@ -695,7 +695,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
free_block(cachep, ac->entry, ac->avail, node, list);
ac->avail = 0;
- spin_unlock(&n->list_lock);
+ raw_spin_unlock(&n->list_lock);
}
}
@@ -768,9 +768,9 @@ static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
slabs_destroy(cachep, &list);
} else {
n = get_node(cachep, slab_node);
- spin_lock(&n->list_lock);
+ raw_spin_lock(&n->list_lock);
free_block(cachep, &objp, 1, slab_node, &list);
- spin_unlock(&n->list_lock);
+ raw_spin_unlock(&n->list_lock);
slabs_destroy(cachep, &list);
}
return 1;
@@ -811,10 +811,10 @@ static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
*/
n = get_node(cachep, node);
if (n) {
- spin_lock_irq(&n->list_lock);
+ raw_spin_lock_irq(&n->list_lock);
n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
cachep->num;
- spin_unlock_irq(&n->list_lock);
+ raw_spin_unlock_irq(&n->list_lock);
return 0;
}
@@ -893,7 +893,7 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep,
goto fail;
n = get_node(cachep, node);
- spin_lock_irq(&n->list_lock);
+ raw_spin_lock_irq(&n->list_lock);
if (n->shared && force_change) {
free_block(cachep, n->shared->entry,
n->shared->avail, node, &list);
@@ -911,7 +911,7 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep,
new_alien = NULL;
}
- spin_unlock_irq(&n->list_lock);
+ raw_spin_unlock_irq(&n->list_lock);
slabs_destroy(cachep, &list);
/*
@@ -950,7 +950,7 @@ static void cpuup_canceled(long cpu)
if (!n)
continue;
- spin_lock_irq(&n->list_lock);
+ raw_spin_lock_irq(&n->list_lock);
/* Free limit for this kmem_cache_node */
n->free_limit -= cachep->batchcount;
@@ -961,7 +961,7 @@ static void cpuup_canceled(long cpu)
nc->avail = 0;
if (!cpumask_empty(mask)) {
- spin_unlock_irq(&n->list_lock);
+ raw_spin_unlock_irq(&n->list_lock);
goto free_slab;
}
@@ -975,7 +975,7 @@ static void cpuup_canceled(long cpu)
alien = n->alien;
n->alien = NULL;
- spin_unlock_irq(&n->list_lock);
+ raw_spin_unlock_irq(&n->list_lock);
kfree(shared);
if (alien) {
@@ -1159,7 +1159,7 @@ static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *
/*
* Do not assume that spinlocks can be initialized via memcpy:
*/
- spin_lock_init(&ptr->list_lock);
+ raw_spin_lock_init(&ptr->list_lock);
MAKE_ALL_LISTS(cachep, ptr, nodeid);
cachep->node[nodeid] = ptr;
@@ -1330,11 +1330,11 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
for_each_kmem_cache_node(cachep, node, n) {
unsigned long total_slabs, free_slabs, free_objs;
- spin_lock_irqsave(&n->list_lock, flags);
+ raw_spin_lock_irqsave(&n->list_lock, flags);
total_slabs = n->total_slabs;
free_slabs = n->free_slabs;
free_objs = n->free_objects;
- spin_unlock_irqrestore(&n->list_lock, flags);
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld\n",
node, total_slabs - free_slabs, total_slabs,
@@ -2096,7 +2096,7 @@ static void check_spinlock_acquired(struct kmem_cache *cachep)
{
#ifdef CONFIG_SMP
check_irq_off();
- assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
+ assert_raw_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
#endif
}
@@ -2104,7 +2104,7 @@ static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
{
#ifdef CONFIG_SMP
check_irq_off();
- assert_spin_locked(&get_node(cachep, node)->list_lock);
+ assert_raw_spin_locked(&get_node(cachep, node)->list_lock);
#endif
}
@@ -2144,9 +2144,9 @@ static void do_drain(void *arg)
check_irq_off();
ac = cpu_cache_get(cachep);
n = get_node(cachep, node);
- spin_lock(&n->list_lock);
+ raw_spin_lock(&n->list_lock);
free_block(cachep, ac->entry, ac->avail, node, &list);
- spin_unlock(&n->list_lock);
+ raw_spin_unlock(&n->list_lock);
ac->avail = 0;
slabs_destroy(cachep, &list);
}
@@ -2164,9 +2164,9 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
drain_alien_cache(cachep, n->alien);
for_each_kmem_cache_node(cachep, node, n) {
- spin_lock_irq(&n->list_lock);
+ raw_spin_lock_irq(&n->list_lock);
drain_array_locked(cachep, n->shared, node, true, &list);
- spin_unlock_irq(&n->list_lock);
+ raw_spin_unlock_irq(&n->list_lock);
slabs_destroy(cachep, &list);
}
@@ -2188,10 +2188,10 @@ static int drain_freelist(struct kmem_cache *cache,
nr_freed = 0;
while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
- spin_lock_irq(&n->list_lock);
+ raw_spin_lock_irq(&n->list_lock);
p = n->slabs_free.prev;
if (p == &n->slabs_free) {
- spin_unlock_irq(&n->list_lock);
+ raw_spin_unlock_irq(&n->list_lock);
goto out;
}
@@ -2204,7 +2204,7 @@ static int drain_freelist(struct kmem_cache *cache,
* to the cache.
*/
n->free_objects -= cache->num;
- spin_unlock_irq(&n->list_lock);
+ raw_spin_unlock_irq(&n->list_lock);
slab_destroy(cache, slab);
nr_freed++;
}
@@ -2629,7 +2629,7 @@ static void cache_grow_end(struct kmem_cache *cachep, struct slab *slab)
INIT_LIST_HEAD(&slab->slab_list);
n = get_node(cachep, slab_nid(slab));
- spin_lock(&n->list_lock);
+ raw_spin_lock(&n->list_lock);
n->total_slabs++;
if (!slab->active) {
list_add_tail(&slab->slab_list, &n->slabs_free);
@@ -2639,7 +2639,7 @@ static void cache_grow_end(struct kmem_cache *cachep, struct slab *slab)
STATS_INC_GROWN(cachep);
n->free_objects += cachep->num - slab->active;
- spin_unlock(&n->list_lock);
+ raw_spin_unlock(&n->list_lock);
fixup_objfreelist_debug(cachep, &list);
}
@@ -2805,7 +2805,7 @@ static struct slab *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
{
struct slab *slab;
- assert_spin_locked(&n->list_lock);
+ assert_raw_spin_locked(&n->list_lock);
slab = list_first_entry_or_null(&n->slabs_partial, struct slab,
slab_list);
if (!slab) {
@@ -2832,10 +2832,10 @@ static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
if (!gfp_pfmemalloc_allowed(flags))
return NULL;
- spin_lock(&n->list_lock);
+ raw_spin_lock(&n->list_lock);
slab = get_first_slab(n, true);
if (!slab) {
- spin_unlock(&n->list_lock);
+ raw_spin_unlock(&n->list_lock);
return NULL;
}
@@ -2844,7 +2844,7 @@ static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
fixup_slab_list(cachep, n, slab, &list);
- spin_unlock(&n->list_lock);
+ raw_spin_unlock(&n->list_lock);
fixup_objfreelist_debug(cachep, &list);
return obj;
@@ -2903,7 +2903,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
if (!n->free_objects && (!shared || !shared->avail))
goto direct_grow;
- spin_lock(&n->list_lock);
+ raw_spin_lock(&n->list_lock);
shared = READ_ONCE(n->shared);
/* See if we can refill from the shared array */
@@ -2927,7 +2927,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
must_grow:
n->free_objects -= ac->avail;
alloc_done:
- spin_unlock(&n->list_lock);
+ raw_spin_unlock(&n->list_lock);
fixup_objfreelist_debug(cachep, &list);
direct_grow:
@@ -3147,7 +3147,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
BUG_ON(!n);
check_irq_off();
- spin_lock(&n->list_lock);
+ raw_spin_lock(&n->list_lock);
slab = get_first_slab(n, false);
if (!slab)
goto must_grow;
@@ -3165,12 +3165,12 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
fixup_slab_list(cachep, n, slab, &list);
- spin_unlock(&n->list_lock);
+ raw_spin_unlock(&n->list_lock);
fixup_objfreelist_debug(cachep, &list);
return obj;
must_grow:
- spin_unlock(&n->list_lock);
+ raw_spin_unlock(&n->list_lock);
slab = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
if (slab) {
/* This slab isn't counted yet so don't update free_objects */
@@ -3325,7 +3325,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
check_irq_off();
n = get_node(cachep, node);
- spin_lock(&n->list_lock);
+ raw_spin_lock(&n->list_lock);
if (n->shared) {
struct array_cache *shared_array = n->shared;
int max = shared_array->limit - shared_array->avail;
@@ -3354,7 +3354,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
STATS_SET_FREEABLE(cachep, i);
}
#endif
- spin_unlock(&n->list_lock);
+ raw_spin_unlock(&n->list_lock);
ac->avail -= batchcount;
memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
slabs_destroy(cachep, &list);
@@ -3721,9 +3721,9 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
node = cpu_to_mem(cpu);
n = get_node(cachep, node);
- spin_lock_irq(&n->list_lock);
+ raw_spin_lock_irq(&n->list_lock);
free_block(cachep, ac->entry, ac->avail, node, &list);
- spin_unlock_irq(&n->list_lock);
+ raw_spin_unlock_irq(&n->list_lock);
slabs_destroy(cachep, &list);
}
free_percpu(prev);
@@ -3815,9 +3815,9 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
return;
}
- spin_lock_irq(&n->list_lock);
+ raw_spin_lock_irq(&n->list_lock);
drain_array_locked(cachep, ac, node, false, &list);
- spin_unlock_irq(&n->list_lock);
+ raw_spin_unlock_irq(&n->list_lock);
slabs_destroy(cachep, &list);
}
@@ -3901,7 +3901,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
for_each_kmem_cache_node(cachep, node, n) {
check_irq_on();
- spin_lock_irq(&n->list_lock);
+ raw_spin_lock_irq(&n->list_lock);
total_slabs += n->total_slabs;
free_slabs += n->free_slabs;
@@ -3910,7 +3910,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
if (n->shared)
shared_avail += n->shared->avail;
- spin_unlock_irq(&n->list_lock);
+ raw_spin_unlock_irq(&n->list_lock);
}
num_objs = total_slabs * cachep->num;
active_slabs = total_slabs - free_slabs;
diff --git a/mm/slab.h b/mm/slab.h
index 0202a8c2f0d2..7e0bdd7773f0 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -750,7 +750,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
* The slab lists for all objects.
*/
struct kmem_cache_node {
- spinlock_t list_lock;
+ raw_spinlock_t list_lock;
#ifdef CONFIG_SLAB
struct list_head slabs_partial; /* partial list first, better asm code */
--
2.35.3
From: Jiri Kosina <[email protected]>
The list_lock can be taken in hardirq context when do_drain() is being
called via IPI on all cores, and therefore lockdep complains about it,
because it can't be preempted on PREEMPT_RT.
That's not a real issue, as SLAB can't be built on PREEMPT_RT anyway, but
we still want to get rid of the warning on non-PREEMPT_RT builds.
Annotate it therefore as a raw lock in order to get rid of he lockdep
warning below.
=============================
[ BUG: Invalid wait context ]
6.1.0-rc1-00134-ge35184f32151 #4 Not tainted
-----------------------------
swapper/3/0 is trying to lock:
ffff8bc88086dc18 (&parent->list_lock){..-.}-{3:3}, at: do_drain+0x57/0xb0
other info that might help us debug this:
context-{2:2}
no locks held by swapper/3/0.
stack backtrace:
CPU: 3 PID: 0 Comm: swapper/3 Not tainted 6.1.0-rc1-00134-ge35184f32151 #4
Hardware name: LENOVO 20K5S22R00/20K5S22R00, BIOS R0IET38W (1.16 ) 05/31/2017
Call Trace:
<IRQ>
dump_stack_lvl+0x6b/0x9d
__lock_acquire+0x1519/0x1730
? build_sched_domains+0x4bd/0x1590
? __lock_acquire+0xad2/0x1730
lock_acquire+0x294/0x340
? do_drain+0x57/0xb0
? sched_clock_tick+0x41/0x60
_raw_spin_lock+0x2c/0x40
? do_drain+0x57/0xb0
do_drain+0x57/0xb0
__flush_smp_call_function_queue+0x138/0x220
__sysvec_call_function+0x4f/0x210
sysvec_call_function+0x4b/0x90
</IRQ>
<TASK>
asm_sysvec_call_function+0x16/0x20
RIP: 0010:mwait_idle+0x5e/0x80
Code: 31 d2 65 48 8b 04 25 80 ed 01 00 48 89 d1 0f 01 c8 48 8b 00 a8 08 75 14 66 90 0f 00 2d 0b 78 46 00 31 c0 48 89 c1 fb 0f 01 c9 <eb> 06 fb 0f 1f 44 00 00 65 48 8b 04 25 80 ed 01 00 f0 80 60 02 df
RSP: 0000:ffffa90940217ee0 EFLAGS: 00000246
RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffffffff9bb9f93a
RBP: 0000000000000003 R08: 0000000000000001 R09: 0000000000000001
R10: ffffa90940217ea8 R11: 0000000000000000 R12: ffffffffffffffff
R13: 0000000000000000 R14: ffff8bc88127c500 R15: 0000000000000000
? default_idle_call+0x1a/0xa0
default_idle_call+0x4b/0xa0
do_idle+0x1f1/0x2c0
? _raw_spin_unlock_irqrestore+0x56/0x70
cpu_startup_entry+0x19/0x20
start_secondary+0x122/0x150
secondary_startup_64_no_verify+0xce/0xdb
</TASK>
Signed-off-by: Jiri Kosina <[email protected]>
---
v1->v2: fix !SLAB build failures due to list_lock mismatch
v2->v3: really fix it by sending refreshed version of the patch (facepalm)
mm/slab.c | 90 +++++++++++++++++++++++++++----------------------------
mm/slab.h | 4 +++
2 files changed, 49 insertions(+), 45 deletions(-)
diff --git a/mm/slab.c b/mm/slab.c
index 59c8e28f7b6a..d8a287900193 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -234,7 +234,7 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
parent->shared = NULL;
parent->alien = NULL;
parent->colour_next = 0;
- spin_lock_init(&parent->list_lock);
+ raw_spin_lock_init(&parent->list_lock);
parent->free_objects = 0;
parent->free_touched = 0;
}
@@ -559,9 +559,9 @@ static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep,
slab_node = slab_nid(slab);
n = get_node(cachep, slab_node);
- spin_lock(&n->list_lock);
+ raw_spin_lock(&n->list_lock);
free_block(cachep, &objp, 1, slab_node, &list);
- spin_unlock(&n->list_lock);
+ raw_spin_unlock(&n->list_lock);
slabs_destroy(cachep, &list);
}
@@ -684,7 +684,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
struct kmem_cache_node *n = get_node(cachep, node);
if (ac->avail) {
- spin_lock(&n->list_lock);
+ raw_spin_lock(&n->list_lock);
/*
* Stuff objects into the remote nodes shared array first.
* That way we could avoid the overhead of putting the objects
@@ -695,7 +695,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
free_block(cachep, ac->entry, ac->avail, node, list);
ac->avail = 0;
- spin_unlock(&n->list_lock);
+ raw_spin_unlock(&n->list_lock);
}
}
@@ -768,9 +768,9 @@ static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
slabs_destroy(cachep, &list);
} else {
n = get_node(cachep, slab_node);
- spin_lock(&n->list_lock);
+ raw_spin_lock(&n->list_lock);
free_block(cachep, &objp, 1, slab_node, &list);
- spin_unlock(&n->list_lock);
+ raw_spin_unlock(&n->list_lock);
slabs_destroy(cachep, &list);
}
return 1;
@@ -811,10 +811,10 @@ static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
*/
n = get_node(cachep, node);
if (n) {
- spin_lock_irq(&n->list_lock);
+ raw_spin_lock_irq(&n->list_lock);
n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
cachep->num;
- spin_unlock_irq(&n->list_lock);
+ raw_spin_unlock_irq(&n->list_lock);
return 0;
}
@@ -893,7 +893,7 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep,
goto fail;
n = get_node(cachep, node);
- spin_lock_irq(&n->list_lock);
+ raw_spin_lock_irq(&n->list_lock);
if (n->shared && force_change) {
free_block(cachep, n->shared->entry,
n->shared->avail, node, &list);
@@ -911,7 +911,7 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep,
new_alien = NULL;
}
- spin_unlock_irq(&n->list_lock);
+ raw_spin_unlock_irq(&n->list_lock);
slabs_destroy(cachep, &list);
/*
@@ -950,7 +950,7 @@ static void cpuup_canceled(long cpu)
if (!n)
continue;
- spin_lock_irq(&n->list_lock);
+ raw_spin_lock_irq(&n->list_lock);
/* Free limit for this kmem_cache_node */
n->free_limit -= cachep->batchcount;
@@ -961,7 +961,7 @@ static void cpuup_canceled(long cpu)
nc->avail = 0;
if (!cpumask_empty(mask)) {
- spin_unlock_irq(&n->list_lock);
+ raw_spin_unlock_irq(&n->list_lock);
goto free_slab;
}
@@ -975,7 +975,7 @@ static void cpuup_canceled(long cpu)
alien = n->alien;
n->alien = NULL;
- spin_unlock_irq(&n->list_lock);
+ raw_spin_unlock_irq(&n->list_lock);
kfree(shared);
if (alien) {
@@ -1159,7 +1159,7 @@ static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *
/*
* Do not assume that spinlocks can be initialized via memcpy:
*/
- spin_lock_init(&ptr->list_lock);
+ raw_spin_lock_init(&ptr->list_lock);
MAKE_ALL_LISTS(cachep, ptr, nodeid);
cachep->node[nodeid] = ptr;
@@ -1330,11 +1330,11 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
for_each_kmem_cache_node(cachep, node, n) {
unsigned long total_slabs, free_slabs, free_objs;
- spin_lock_irqsave(&n->list_lock, flags);
+ raw_spin_lock_irqsave(&n->list_lock, flags);
total_slabs = n->total_slabs;
free_slabs = n->free_slabs;
free_objs = n->free_objects;
- spin_unlock_irqrestore(&n->list_lock, flags);
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld\n",
node, total_slabs - free_slabs, total_slabs,
@@ -2096,7 +2096,7 @@ static void check_spinlock_acquired(struct kmem_cache *cachep)
{
#ifdef CONFIG_SMP
check_irq_off();
- assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
+ assert_raw_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
#endif
}
@@ -2104,7 +2104,7 @@ static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
{
#ifdef CONFIG_SMP
check_irq_off();
- assert_spin_locked(&get_node(cachep, node)->list_lock);
+ assert_raw_spin_locked(&get_node(cachep, node)->list_lock);
#endif
}
@@ -2144,9 +2144,9 @@ static void do_drain(void *arg)
check_irq_off();
ac = cpu_cache_get(cachep);
n = get_node(cachep, node);
- spin_lock(&n->list_lock);
+ raw_spin_lock(&n->list_lock);
free_block(cachep, ac->entry, ac->avail, node, &list);
- spin_unlock(&n->list_lock);
+ raw_spin_unlock(&n->list_lock);
ac->avail = 0;
slabs_destroy(cachep, &list);
}
@@ -2164,9 +2164,9 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
drain_alien_cache(cachep, n->alien);
for_each_kmem_cache_node(cachep, node, n) {
- spin_lock_irq(&n->list_lock);
+ raw_spin_lock_irq(&n->list_lock);
drain_array_locked(cachep, n->shared, node, true, &list);
- spin_unlock_irq(&n->list_lock);
+ raw_spin_unlock_irq(&n->list_lock);
slabs_destroy(cachep, &list);
}
@@ -2188,10 +2188,10 @@ static int drain_freelist(struct kmem_cache *cache,
nr_freed = 0;
while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
- spin_lock_irq(&n->list_lock);
+ raw_spin_lock_irq(&n->list_lock);
p = n->slabs_free.prev;
if (p == &n->slabs_free) {
- spin_unlock_irq(&n->list_lock);
+ raw_spin_unlock_irq(&n->list_lock);
goto out;
}
@@ -2204,7 +2204,7 @@ static int drain_freelist(struct kmem_cache *cache,
* to the cache.
*/
n->free_objects -= cache->num;
- spin_unlock_irq(&n->list_lock);
+ raw_spin_unlock_irq(&n->list_lock);
slab_destroy(cache, slab);
nr_freed++;
}
@@ -2629,7 +2629,7 @@ static void cache_grow_end(struct kmem_cache *cachep, struct slab *slab)
INIT_LIST_HEAD(&slab->slab_list);
n = get_node(cachep, slab_nid(slab));
- spin_lock(&n->list_lock);
+ raw_spin_lock(&n->list_lock);
n->total_slabs++;
if (!slab->active) {
list_add_tail(&slab->slab_list, &n->slabs_free);
@@ -2639,7 +2639,7 @@ static void cache_grow_end(struct kmem_cache *cachep, struct slab *slab)
STATS_INC_GROWN(cachep);
n->free_objects += cachep->num - slab->active;
- spin_unlock(&n->list_lock);
+ raw_spin_unlock(&n->list_lock);
fixup_objfreelist_debug(cachep, &list);
}
@@ -2805,7 +2805,7 @@ static struct slab *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
{
struct slab *slab;
- assert_spin_locked(&n->list_lock);
+ assert_raw_spin_locked(&n->list_lock);
slab = list_first_entry_or_null(&n->slabs_partial, struct slab,
slab_list);
if (!slab) {
@@ -2832,10 +2832,10 @@ static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
if (!gfp_pfmemalloc_allowed(flags))
return NULL;
- spin_lock(&n->list_lock);
+ raw_spin_lock(&n->list_lock);
slab = get_first_slab(n, true);
if (!slab) {
- spin_unlock(&n->list_lock);
+ raw_spin_unlock(&n->list_lock);
return NULL;
}
@@ -2844,7 +2844,7 @@ static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
fixup_slab_list(cachep, n, slab, &list);
- spin_unlock(&n->list_lock);
+ raw_spin_unlock(&n->list_lock);
fixup_objfreelist_debug(cachep, &list);
return obj;
@@ -2903,7 +2903,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
if (!n->free_objects && (!shared || !shared->avail))
goto direct_grow;
- spin_lock(&n->list_lock);
+ raw_spin_lock(&n->list_lock);
shared = READ_ONCE(n->shared);
/* See if we can refill from the shared array */
@@ -2927,7 +2927,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
must_grow:
n->free_objects -= ac->avail;
alloc_done:
- spin_unlock(&n->list_lock);
+ raw_spin_unlock(&n->list_lock);
fixup_objfreelist_debug(cachep, &list);
direct_grow:
@@ -3147,7 +3147,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
BUG_ON(!n);
check_irq_off();
- spin_lock(&n->list_lock);
+ raw_spin_lock(&n->list_lock);
slab = get_first_slab(n, false);
if (!slab)
goto must_grow;
@@ -3165,12 +3165,12 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
fixup_slab_list(cachep, n, slab, &list);
- spin_unlock(&n->list_lock);
+ raw_spin_unlock(&n->list_lock);
fixup_objfreelist_debug(cachep, &list);
return obj;
must_grow:
- spin_unlock(&n->list_lock);
+ raw_spin_unlock(&n->list_lock);
slab = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
if (slab) {
/* This slab isn't counted yet so don't update free_objects */
@@ -3325,7 +3325,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
check_irq_off();
n = get_node(cachep, node);
- spin_lock(&n->list_lock);
+ raw_spin_lock(&n->list_lock);
if (n->shared) {
struct array_cache *shared_array = n->shared;
int max = shared_array->limit - shared_array->avail;
@@ -3354,7 +3354,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
STATS_SET_FREEABLE(cachep, i);
}
#endif
- spin_unlock(&n->list_lock);
+ raw_spin_unlock(&n->list_lock);
ac->avail -= batchcount;
memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
slabs_destroy(cachep, &list);
@@ -3721,9 +3721,9 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
node = cpu_to_mem(cpu);
n = get_node(cachep, node);
- spin_lock_irq(&n->list_lock);
+ raw_spin_lock_irq(&n->list_lock);
free_block(cachep, ac->entry, ac->avail, node, &list);
- spin_unlock_irq(&n->list_lock);
+ raw_spin_unlock_irq(&n->list_lock);
slabs_destroy(cachep, &list);
}
free_percpu(prev);
@@ -3815,9 +3815,9 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
return;
}
- spin_lock_irq(&n->list_lock);
+ raw_spin_lock_irq(&n->list_lock);
drain_array_locked(cachep, ac, node, false, &list);
- spin_unlock_irq(&n->list_lock);
+ raw_spin_unlock_irq(&n->list_lock);
slabs_destroy(cachep, &list);
}
@@ -3901,7 +3901,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
for_each_kmem_cache_node(cachep, node, n) {
check_irq_on();
- spin_lock_irq(&n->list_lock);
+ raw_spin_lock_irq(&n->list_lock);
total_slabs += n->total_slabs;
free_slabs += n->free_slabs;
@@ -3910,7 +3910,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
if (n->shared)
shared_avail += n->shared->avail;
- spin_unlock_irq(&n->list_lock);
+ raw_spin_unlock_irq(&n->list_lock);
}
num_objs = total_slabs * cachep->num;
active_slabs = total_slabs - free_slabs;
diff --git a/mm/slab.h b/mm/slab.h
index 0202a8c2f0d2..7a705e4228c8 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -750,7 +750,11 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
* The slab lists for all objects.
*/
struct kmem_cache_node {
+#ifdef CONFIG_SLAB
+ raw_spinlock_t list_lock;
+#else
spinlock_t list_lock;
+#endif
#ifdef CONFIG_SLAB
struct list_head slabs_partial; /* partial list first, better asm code */
--
2.35.3
Hi Jiri,
I love your patch! Yet something to improve:
[auto build test ERROR on akpm-mm/mm-everything]
url: https://github.com/intel-lab-lkp/linux/commits/Jiri-Kosina/mm-slab-Annotate-kmem_cache_node-list_lock-as-raw/20221021-225037
base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link: https://lore.kernel.org/r/nycvar.YFH.7.76.2210211643330.29912%40cbobk.fhfr.pm
patch subject: [PATCH] mm/slab: Annotate kmem_cache_node->list_lock as raw
config: riscv-randconfig-r023-20221019
compiler: clang version 16.0.0 (https://github.com/llvm/llvm-project 791a7ae1ba3efd6bca96338e10ffde557ba83920)
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# install riscv cross compiling tool for clang build
# apt-get install binutils-riscv64-linux-gnu
# https://github.com/intel-lab-lkp/linux/commit/709b8ca6934319e9e8e0519baa3ac2bb4f634451
git remote add linux-review https://github.com/intel-lab-lkp/linux
git fetch --no-tags linux-review Jiri-Kosina/mm-slab-Annotate-kmem_cache_node-list_lock-as-raw/20221021-225037
git checkout 709b8ca6934319e9e8e0519baa3ac2bb4f634451
# save the config file
mkdir build_dir && cp config build_dir/.config
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 O=build_dir ARCH=riscv SHELL=/bin/bash
If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <[email protected]>
All errors (new ones prefixed by >>):
In file included from include/linux/interrupt.h:11:
In file included from include/linux/hardirq.h:11:
In file included from ./arch/riscv/include/generated/asm/hardirq.h:1:
In file included from include/asm-generic/hardirq.h:17:
In file included from include/linux/irq.h:20:
In file included from include/linux/io.h:13:
In file included from arch/riscv/include/asm/io.h:136:
include/asm-generic/io.h:751:2: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
insw(addr, buffer, count);
^~~~~~~~~~~~~~~~~~~~~~~~~
arch/riscv/include/asm/io.h:105:53: note: expanded from macro 'insw'
#define insw(addr, buffer, count) __insw(PCI_IOBASE + (addr), buffer, count)
~~~~~~~~~~ ^
In file included from mm/slub.c:14:
In file included from include/linux/swap.h:9:
In file included from include/linux/memcontrol.h:13:
In file included from include/linux/cgroup.h:26:
In file included from include/linux/kernel_stat.h:9:
In file included from include/linux/interrupt.h:11:
In file included from include/linux/hardirq.h:11:
In file included from ./arch/riscv/include/generated/asm/hardirq.h:1:
In file included from include/asm-generic/hardirq.h:17:
In file included from include/linux/irq.h:20:
In file included from include/linux/io.h:13:
In file included from arch/riscv/include/asm/io.h:136:
include/asm-generic/io.h:759:2: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
insl(addr, buffer, count);
^~~~~~~~~~~~~~~~~~~~~~~~~
arch/riscv/include/asm/io.h:106:53: note: expanded from macro 'insl'
#define insl(addr, buffer, count) __insl(PCI_IOBASE + (addr), buffer, count)
~~~~~~~~~~ ^
In file included from mm/slub.c:14:
In file included from include/linux/swap.h:9:
In file included from include/linux/memcontrol.h:13:
In file included from include/linux/cgroup.h:26:
In file included from include/linux/kernel_stat.h:9:
In file included from include/linux/interrupt.h:11:
In file included from include/linux/hardirq.h:11:
In file included from ./arch/riscv/include/generated/asm/hardirq.h:1:
In file included from include/asm-generic/hardirq.h:17:
In file included from include/linux/irq.h:20:
In file included from include/linux/io.h:13:
In file included from arch/riscv/include/asm/io.h:136:
include/asm-generic/io.h:768:2: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
outsb(addr, buffer, count);
^~~~~~~~~~~~~~~~~~~~~~~~~~
arch/riscv/include/asm/io.h:118:55: note: expanded from macro 'outsb'
#define outsb(addr, buffer, count) __outsb(PCI_IOBASE + (addr), buffer, count)
~~~~~~~~~~ ^
In file included from mm/slub.c:14:
In file included from include/linux/swap.h:9:
In file included from include/linux/memcontrol.h:13:
In file included from include/linux/cgroup.h:26:
In file included from include/linux/kernel_stat.h:9:
In file included from include/linux/interrupt.h:11:
In file included from include/linux/hardirq.h:11:
In file included from ./arch/riscv/include/generated/asm/hardirq.h:1:
In file included from include/asm-generic/hardirq.h:17:
In file included from include/linux/irq.h:20:
In file included from include/linux/io.h:13:
In file included from arch/riscv/include/asm/io.h:136:
include/asm-generic/io.h:777:2: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
outsw(addr, buffer, count);
^~~~~~~~~~~~~~~~~~~~~~~~~~
arch/riscv/include/asm/io.h:119:55: note: expanded from macro 'outsw'
#define outsw(addr, buffer, count) __outsw(PCI_IOBASE + (addr), buffer, count)
~~~~~~~~~~ ^
In file included from mm/slub.c:14:
In file included from include/linux/swap.h:9:
In file included from include/linux/memcontrol.h:13:
In file included from include/linux/cgroup.h:26:
In file included from include/linux/kernel_stat.h:9:
In file included from include/linux/interrupt.h:11:
In file included from include/linux/hardirq.h:11:
In file included from ./arch/riscv/include/generated/asm/hardirq.h:1:
In file included from include/asm-generic/hardirq.h:17:
In file included from include/linux/irq.h:20:
In file included from include/linux/io.h:13:
In file included from arch/riscv/include/asm/io.h:136:
include/asm-generic/io.h:786:2: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
outsl(addr, buffer, count);
^~~~~~~~~~~~~~~~~~~~~~~~~~
arch/riscv/include/asm/io.h:120:55: note: expanded from macro 'outsl'
#define outsl(addr, buffer, count) __outsl(PCI_IOBASE + (addr), buffer, count)
~~~~~~~~~~ ^
In file included from mm/slub.c:14:
In file included from include/linux/swap.h:9:
In file included from include/linux/memcontrol.h:13:
In file included from include/linux/cgroup.h:26:
In file included from include/linux/kernel_stat.h:9:
In file included from include/linux/interrupt.h:11:
In file included from include/linux/hardirq.h:11:
In file included from ./arch/riscv/include/generated/asm/hardirq.h:1:
In file included from include/asm-generic/hardirq.h:17:
In file included from include/linux/irq.h:20:
In file included from include/linux/io.h:13:
In file included from arch/riscv/include/asm/io.h:136:
include/asm-generic/io.h:1134:55: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port;
~~~~~~~~~~ ^
>> mm/slub.c:2124:20: error: incompatible pointer types passing 'raw_spinlock_t *' (aka 'struct raw_spinlock *') to parameter of type 'spinlock_t *' (aka 'struct spinlock *') [-Werror,-Wincompatible-pointer-types]
spin_lock_irqsave(&n->list_lock, flags);
^~~~~~~~~~~~~
include/linux/spinlock.h:380:39: note: expanded from macro 'spin_lock_irqsave'
raw_spin_lock_irqsave(spinlock_check(lock), flags); \
^~~~
include/linux/spinlock.h:243:34: note: expanded from macro 'raw_spin_lock_irqsave'
flags = _raw_spin_lock_irqsave(lock); \
^~~~
include/linux/spinlock.h:323:67: note: passing argument to parameter 'lock' here
static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
^
mm/slub.c:2132:25: error: incompatible pointer types passing 'raw_spinlock_t *' (aka 'struct raw_spinlock *') to parameter of type 'spinlock_t *' (aka 'struct spinlock *') [-Werror,-Wincompatible-pointer-types]
spin_unlock_irqrestore(&n->list_lock, flags);
^~~~~~~~~~~~~
include/linux/spinlock.h:403:64: note: passing argument to parameter 'lock' here
static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
^
mm/slub.c:2210:20: error: incompatible pointer types passing 'raw_spinlock_t *' (aka 'struct raw_spinlock *') to parameter of type 'spinlock_t *' (aka 'struct spinlock *') [-Werror,-Wincompatible-pointer-types]
spin_lock_irqsave(&n->list_lock, flags);
^~~~~~~~~~~~~
include/linux/spinlock.h:380:39: note: expanded from macro 'spin_lock_irqsave'
raw_spin_lock_irqsave(spinlock_check(lock), flags); \
^~~~
include/linux/spinlock.h:243:34: note: expanded from macro 'raw_spin_lock_irqsave'
flags = _raw_spin_lock_irqsave(lock); \
^~~~
include/linux/spinlock.h:323:67: note: passing argument to parameter 'lock' here
static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
^
mm/slub.c:2247:25: error: incompatible pointer types passing 'raw_spinlock_t *' (aka 'struct raw_spinlock *') to parameter of type 'spinlock_t *' (aka 'struct spinlock *') [-Werror,-Wincompatible-pointer-types]
spin_unlock_irqrestore(&n->list_lock, flags);
^~~~~~~~~~~~~
include/linux/spinlock.h:403:64: note: passing argument to parameter 'lock' here
static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
^
mm/slub.c:2199:15: warning: variable 'partial_slabs' set but not used [-Wunused-but-set-variable]
unsigned int partial_slabs = 0;
^
mm/slub.c:2489:21: error: incompatible pointer types passing 'raw_spinlock_t *' (aka 'struct raw_spinlock *') to parameter of type 'spinlock_t *' (aka 'struct spinlock *') [-Werror,-Wincompatible-pointer-types]
spin_lock_irqsave(&n->list_lock, flags);
^~~~~~~~~~~~~
include/linux/spinlock.h:380:39: note: expanded from macro 'spin_lock_irqsave'
raw_spin_lock_irqsave(spinlock_check(lock), flags); \
^~~~
include/linux/spinlock.h:243:34: note: expanded from macro 'raw_spin_lock_irqsave'
flags = _raw_spin_lock_irqsave(lock); \
^~~~
include/linux/spinlock.h:323:67: note: passing argument to parameter 'lock' here
static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
^
mm/slub.c:2497:21: error: incompatible pointer types passing 'raw_spinlock_t *' (aka 'struct raw_spinlock *') to parameter of type 'spinlock_t *' (aka 'struct spinlock *') [-Werror,-Wincompatible-pointer-types]
spin_lock_irqsave(&n->list_lock, flags);
^~~~~~~~~~~~~
include/linux/spinlock.h:380:39: note: expanded from macro 'spin_lock_irqsave'
raw_spin_lock_irqsave(spinlock_check(lock), flags); \
^~~~
include/linux/spinlock.h:243:34: note: expanded from macro 'raw_spin_lock_irqsave'
flags = _raw_spin_lock_irqsave(lock); \
^~~~
include/linux/spinlock.h:323:67: note: passing argument to parameter 'lock' here
static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
^
mm/slub.c:2508:27: error: incompatible pointer types passing 'raw_spinlock_t *' (aka 'struct raw_spinlock *') to parameter of type 'spinlock_t *' (aka 'struct spinlock *') [-Werror,-Wincompatible-pointer-types]
spin_unlock_irqrestore(&n->list_lock, flags);
^~~~~~~~~~~~~
include/linux/spinlock.h:403:64: note: passing argument to parameter 'lock' here
static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
^
mm/slub.c:2515:26: error: incompatible pointer types passing 'raw_spinlock_t *' (aka 'struct raw_spinlock *') to parameter of type 'spinlock_t *' (aka 'struct spinlock *') [-Werror,-Wincompatible-pointer-types]
spin_unlock_irqrestore(&n->list_lock, flags);
^~~~~~~~~~~~~
include/linux/spinlock.h:403:64: note: passing argument to parameter 'lock' here
static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
^
mm/slub.c:2523:26: error: incompatible pointer types passing 'raw_spinlock_t *' (aka 'struct raw_spinlock *') to parameter of type 'spinlock_t *' (aka 'struct spinlock *') [-Werror,-Wincompatible-pointer-types]
spin_unlock_irqrestore(&n->list_lock, flags);
^~~~~~~~~~~~~
include/linux/spinlock.h:403:64: note: passing argument to parameter 'lock' here
static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
^
mm/slub.c:2847:20: error: incompatible pointer types passing 'raw_spinlock_t *' (aka 'struct raw_spinlock *') to parameter of type 'spinlock_t *' (aka 'struct spinlock *') [-Werror,-Wincompatible-pointer-types]
spin_lock_irqsave(&n->list_lock, flags);
^~~~~~~~~~~~~
include/linux/spinlock.h:380:39: note: expanded from macro 'spin_lock_irqsave'
raw_spin_lock_irqsave(spinlock_check(lock), flags); \
^~~~
include/linux/spinlock.h:243:34: note: expanded from macro 'raw_spin_lock_irqsave'
flags = _raw_spin_lock_irqsave(lock); \
^~~~
include/linux/spinlock.h:323:67: note: passing argument to parameter 'lock' here
static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
^
mm/slub.c:2926:25: error: incompatible pointer types passing 'raw_spinlock_t *' (aka 'struct raw_spinlock *') to parameter of type 'spinlock_t *' (aka 'struct spinlock *') [-Werror,-Wincompatible-pointer-types]
spin_unlock_irqrestore(&n->list_lock, flags);
^~~~~~~~~~~~~
include/linux/spinlock.h:403:64: note: passing argument to parameter 'lock' here
static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
^
mm/slub.c:2946:20: error: incompatible pointer types passing 'raw_spinlock_t *' (aka 'struct raw_spinlock *') to parameter of type 'spinlock_t *' (aka 'struct spinlock *') [-Werror,-Wincompatible-pointer-types]
spin_lock_irqsave(&n->list_lock, flags);
vim +2124 mm/slub.c
c7323a5ad07863 Vlastimil Babka 2022-08-23 2097
c7323a5ad07863 Vlastimil Babka 2022-08-23 2098 /*
c7323a5ad07863 Vlastimil Babka 2022-08-23 2099 * Called only for kmem_cache_debug() caches to allocate from a freshly
c7323a5ad07863 Vlastimil Babka 2022-08-23 2100 * allocated slab. Allocate a single object instead of whole freelist
c7323a5ad07863 Vlastimil Babka 2022-08-23 2101 * and put the slab to the partial (or full) list.
c7323a5ad07863 Vlastimil Babka 2022-08-23 2102 */
c7323a5ad07863 Vlastimil Babka 2022-08-23 2103 static void *alloc_single_from_new_slab(struct kmem_cache *s,
6edf2576a6cc46 Feng Tang 2022-09-13 2104 struct slab *slab, int orig_size)
c7323a5ad07863 Vlastimil Babka 2022-08-23 2105 {
c7323a5ad07863 Vlastimil Babka 2022-08-23 2106 int nid = slab_nid(slab);
c7323a5ad07863 Vlastimil Babka 2022-08-23 2107 struct kmem_cache_node *n = get_node(s, nid);
c7323a5ad07863 Vlastimil Babka 2022-08-23 2108 unsigned long flags;
c7323a5ad07863 Vlastimil Babka 2022-08-23 2109 void *object;
c7323a5ad07863 Vlastimil Babka 2022-08-23 2110
c7323a5ad07863 Vlastimil Babka 2022-08-23 2111
c7323a5ad07863 Vlastimil Babka 2022-08-23 2112 object = slab->freelist;
c7323a5ad07863 Vlastimil Babka 2022-08-23 2113 slab->freelist = get_freepointer(s, object);
c7323a5ad07863 Vlastimil Babka 2022-08-23 2114 slab->inuse = 1;
c7323a5ad07863 Vlastimil Babka 2022-08-23 2115
6edf2576a6cc46 Feng Tang 2022-09-13 2116 if (!alloc_debug_processing(s, slab, object, orig_size))
c7323a5ad07863 Vlastimil Babka 2022-08-23 2117 /*
c7323a5ad07863 Vlastimil Babka 2022-08-23 2118 * It's not really expected that this would fail on a
c7323a5ad07863 Vlastimil Babka 2022-08-23 2119 * freshly allocated slab, but a concurrent memory
c7323a5ad07863 Vlastimil Babka 2022-08-23 2120 * corruption in theory could cause that.
c7323a5ad07863 Vlastimil Babka 2022-08-23 2121 */
c7323a5ad07863 Vlastimil Babka 2022-08-23 2122 return NULL;
c7323a5ad07863 Vlastimil Babka 2022-08-23 2123
c7323a5ad07863 Vlastimil Babka 2022-08-23 @2124 spin_lock_irqsave(&n->list_lock, flags);
c7323a5ad07863 Vlastimil Babka 2022-08-23 2125
c7323a5ad07863 Vlastimil Babka 2022-08-23 2126 if (slab->inuse == slab->objects)
c7323a5ad07863 Vlastimil Babka 2022-08-23 2127 add_full(s, n, slab);
c7323a5ad07863 Vlastimil Babka 2022-08-23 2128 else
c7323a5ad07863 Vlastimil Babka 2022-08-23 2129 add_partial(n, slab, DEACTIVATE_TO_HEAD);
c7323a5ad07863 Vlastimil Babka 2022-08-23 2130
c7323a5ad07863 Vlastimil Babka 2022-08-23 2131 inc_slabs_node(s, nid, slab->objects);
c7323a5ad07863 Vlastimil Babka 2022-08-23 2132 spin_unlock_irqrestore(&n->list_lock, flags);
c7323a5ad07863 Vlastimil Babka 2022-08-23 2133
c7323a5ad07863 Vlastimil Babka 2022-08-23 2134 return object;
c7323a5ad07863 Vlastimil Babka 2022-08-23 2135 }
c7323a5ad07863 Vlastimil Babka 2022-08-23 2136
--
0-DAY CI Kernel Test Service
https://01.org/lkp
Hi Jiri,
I love your patch! Yet something to improve:
[auto build test ERROR on akpm-mm/mm-everything]
url: https://github.com/intel-lab-lkp/linux/commits/Jiri-Kosina/mm-slab-Annotate-kmem_cache_node-list_lock-as-raw/20221021-225037
base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link: https://lore.kernel.org/r/nycvar.YFH.7.76.2210211643330.29912%40cbobk.fhfr.pm
patch subject: [PATCH] mm/slab: Annotate kmem_cache_node->list_lock as raw
config: mips-bcm63xx_defconfig
compiler: clang version 16.0.0 (https://github.com/llvm/llvm-project 791a7ae1ba3efd6bca96338e10ffde557ba83920)
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# install mips cross compiling tool for clang build
# apt-get install binutils-mips-linux-gnu
# https://github.com/intel-lab-lkp/linux/commit/709b8ca6934319e9e8e0519baa3ac2bb4f634451
git remote add linux-review https://github.com/intel-lab-lkp/linux
git fetch --no-tags linux-review Jiri-Kosina/mm-slab-Annotate-kmem_cache_node-list_lock-as-raw/20221021-225037
git checkout 709b8ca6934319e9e8e0519baa3ac2bb4f634451
# save the config file
mkdir build_dir && cp config build_dir/.config
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 O=build_dir ARCH=mips SHELL=/bin/bash
If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <[email protected]>
All errors (new ones prefixed by >>):
^
mm/slub.c:2508:27: error: incompatible pointer types passing 'raw_spinlock_t *' (aka 'struct raw_spinlock *') to parameter of type 'spinlock_t *' (aka 'struct spinlock *') [-Werror,-Wincompatible-pointer-types]
spin_unlock_irqrestore(&n->list_lock, flags);
^~~~~~~~~~~~~
include/linux/spinlock.h:403:64: note: passing argument to parameter 'lock' here
static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
^
mm/slub.c:2515:26: error: incompatible pointer types passing 'raw_spinlock_t *' (aka 'struct raw_spinlock *') to parameter of type 'spinlock_t *' (aka 'struct spinlock *') [-Werror,-Wincompatible-pointer-types]
spin_unlock_irqrestore(&n->list_lock, flags);
^~~~~~~~~~~~~
include/linux/spinlock.h:403:64: note: passing argument to parameter 'lock' here
static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
^
mm/slub.c:2523:26: error: incompatible pointer types passing 'raw_spinlock_t *' (aka 'struct raw_spinlock *') to parameter of type 'spinlock_t *' (aka 'struct spinlock *') [-Werror,-Wincompatible-pointer-types]
spin_unlock_irqrestore(&n->list_lock, flags);
^~~~~~~~~~~~~
include/linux/spinlock.h:403:64: note: passing argument to parameter 'lock' here
static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
^
mm/slub.c:2946:20: error: incompatible pointer types passing 'raw_spinlock_t *' (aka 'struct raw_spinlock *') to parameter of type 'spinlock_t *' (aka 'struct spinlock *') [-Werror,-Wincompatible-pointer-types]
spin_lock_irqsave(&n->list_lock, flags);
^~~~~~~~~~~~~
include/linux/spinlock.h:380:39: note: expanded from macro 'spin_lock_irqsave'
raw_spin_lock_irqsave(spinlock_check(lock), flags); \
^~~~
include/linux/spinlock.h:265:26: note: expanded from macro 'raw_spin_lock_irqsave'
_raw_spin_lock_irqsave(lock, flags); \
^~~~
include/linux/spinlock_api_up.h:69:60: note: expanded from macro '_raw_spin_lock_irqsave'
#define _raw_spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
^~~~
include/linux/spinlock_api_up.h:40:38: note: expanded from macro '__LOCK_IRQSAVE'
do { local_irq_save(flags); __LOCK(lock); } while (0)
^~~~
include/linux/spinlock_api_up.h:31:35: note: expanded from macro '__LOCK'
do { preempt_disable(); ___LOCK(lock); } while (0)
^~~~
include/linux/spinlock_api_up.h:28:32: note: expanded from macro '___LOCK'
do { __acquire(lock); (void)(lock); } while (0)
^~~~
include/linux/spinlock.h:323:67: note: passing argument to parameter 'lock' here
static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
^
mm/slub.c:2949:25: error: incompatible pointer types passing 'raw_spinlock_t *' (aka 'struct raw_spinlock *') to parameter of type 'spinlock_t *' (aka 'struct spinlock *') [-Werror,-Wincompatible-pointer-types]
spin_unlock_irqrestore(&n->list_lock, flags);
^~~~~~~~~~~~~
include/linux/spinlock.h:403:64: note: passing argument to parameter 'lock' here
static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
^
mm/slub.c:3483:27: error: incompatible pointer types passing 'raw_spinlock_t *' (aka 'struct raw_spinlock *') to parameter of type 'spinlock_t *' (aka 'struct spinlock *') [-Werror,-Wincompatible-pointer-types]
spin_unlock_irqrestore(&n->list_lock, flags);
^~~~~~~~~~~~~
include/linux/spinlock.h:403:64: note: passing argument to parameter 'lock' here
static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
^
mm/slub.c:3515:23: error: incompatible pointer types passing 'raw_spinlock_t *' (aka 'struct raw_spinlock *') to parameter of type 'spinlock_t *' (aka 'struct spinlock *') [-Werror,-Wincompatible-pointer-types]
spin_lock_irqsave(&n->list_lock, flags);
^~~~~~~~~~~~~
include/linux/spinlock.h:380:39: note: expanded from macro 'spin_lock_irqsave'
raw_spin_lock_irqsave(spinlock_check(lock), flags); \
^~~~
include/linux/spinlock.h:265:26: note: expanded from macro 'raw_spin_lock_irqsave'
_raw_spin_lock_irqsave(lock, flags); \
^~~~
include/linux/spinlock_api_up.h:69:60: note: expanded from macro '_raw_spin_lock_irqsave'
#define _raw_spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
^~~~
include/linux/spinlock_api_up.h:40:38: note: expanded from macro '__LOCK_IRQSAVE'
do { local_irq_save(flags); __LOCK(lock); } while (0)
^~~~
include/linux/spinlock_api_up.h:31:35: note: expanded from macro '__LOCK'
do { preempt_disable(); ___LOCK(lock); } while (0)
^~~~
include/linux/spinlock_api_up.h:28:32: note: expanded from macro '___LOCK'
do { __acquire(lock); (void)(lock); } while (0)
^~~~
include/linux/spinlock.h:323:67: note: passing argument to parameter 'lock' here
static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
^
mm/slub.c:3557:25: error: incompatible pointer types passing 'raw_spinlock_t *' (aka 'struct raw_spinlock *') to parameter of type 'spinlock_t *' (aka 'struct spinlock *') [-Werror,-Wincompatible-pointer-types]
spin_unlock_irqrestore(&n->list_lock, flags);
^~~~~~~~~~~~~
include/linux/spinlock.h:403:64: note: passing argument to parameter 'lock' here
static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
^
mm/slub.c:3572:25: error: incompatible pointer types passing 'raw_spinlock_t *' (aka 'struct raw_spinlock *') to parameter of type 'spinlock_t *' (aka 'struct spinlock *') [-Werror,-Wincompatible-pointer-types]
spin_unlock_irqrestore(&n->list_lock, flags);
^~~~~~~~~~~~~
include/linux/spinlock.h:403:64: note: passing argument to parameter 'lock' here
static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
^
mm/slub.c:4008:17: error: incompatible pointer types passing 'raw_spinlock_t *' (aka 'struct raw_spinlock *') to parameter of type 'spinlock_t *' (aka 'struct spinlock *') [-Werror,-Wincompatible-pointer-types]
spin_lock_init(&n->list_lock);
^~~~~~~~~~~~~
include/linux/spinlock.h:342:17: note: expanded from macro 'spin_lock_init'
spinlock_check(_lock); \
^~~~~
include/linux/spinlock.h:323:67: note: passing argument to parameter 'lock' here
static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
^
>> mm/slub.c:4008:2: error: assigning to 'raw_spinlock_t' (aka 'struct raw_spinlock') from incompatible type 'spinlock_t' (aka 'struct spinlock')
spin_lock_init(&n->list_lock);
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~
include/linux/spinlock.h:343:11: note: expanded from macro 'spin_lock_init'
*(_lock) = __SPIN_LOCK_UNLOCKED(_lock); \
^ ~~~~~~~~~~~~~~~~~~~~~~~~~~~
mm/slub.c:4393:16: error: incompatible pointer types passing 'raw_spinlock_t *' (aka 'struct raw_spinlock *') to parameter of type 'spinlock_t *' (aka 'struct spinlock *') [-Werror,-Wincompatible-pointer-types]
spin_lock_irq(&n->list_lock);
^~~~~~~~~~~~~
include/linux/spinlock.h:373:55: note: passing argument to parameter 'lock' here
static __always_inline void spin_lock_irq(spinlock_t *lock)
^
mm/slub.c:4403:18: error: incompatible pointer types passing 'raw_spinlock_t *' (aka 'struct raw_spinlock *') to parameter of type 'spinlock_t *' (aka 'struct spinlock *') [-Werror,-Wincompatible-pointer-types]
spin_unlock_irq(&n->list_lock);
^~~~~~~~~~~~~
include/linux/spinlock.h:398:57: note: passing argument to parameter 'lock' here
static __always_inline void spin_unlock_irq(spinlock_t *lock)
^
fatal error: too many errors emitted, stopping now [-ferror-limit=]
1 warning and 20 errors generated.
vim +4008 mm/slub.c
81819f0fc8285a Christoph Lameter 2007-05-06 4003
5595cffc8248e4 Pekka Enberg 2008-08-05 4004 static void
4053497d6a3771 Joonsoo Kim 2012-05-11 4005 init_kmem_cache_node(struct kmem_cache_node *n)
81819f0fc8285a Christoph Lameter 2007-05-06 4006 {
81819f0fc8285a Christoph Lameter 2007-05-06 4007 n->nr_partial = 0;
81819f0fc8285a Christoph Lameter 2007-05-06 @4008 spin_lock_init(&n->list_lock);
81819f0fc8285a Christoph Lameter 2007-05-06 4009 INIT_LIST_HEAD(&n->partial);
8ab1372fac5684 Christoph Lameter 2007-07-17 4010 #ifdef CONFIG_SLUB_DEBUG
0f389ec6307752 Christoph Lameter 2008-04-14 4011 atomic_long_set(&n->nr_slabs, 0);
02b71b70129aaa Salman Qazi 2008-09-11 4012 atomic_long_set(&n->total_objects, 0);
643b113849d8fa Christoph Lameter 2007-05-06 4013 INIT_LIST_HEAD(&n->full);
8ab1372fac5684 Christoph Lameter 2007-07-17 4014 #endif
81819f0fc8285a Christoph Lameter 2007-05-06 4015 }
81819f0fc8285a Christoph Lameter 2007-05-06 4016
--
0-DAY CI Kernel Test Service
https://01.org/lkp
On Fri, Oct 21, 2022 at 09:18:12PM +0200, Jiri Kosina wrote:
> From: Jiri Kosina <[email protected]>
>
> The list_lock can be taken in hardirq context when do_drain() is being
> called via IPI on all cores, and therefore lockdep complains about it,
> because it can't be preempted on PREEMPT_RT.
>
> That's not a real issue, as SLAB can't be built on PREEMPT_RT anyway, but
> we still want to get rid of the warning on non-PREEMPT_RT builds.
>
> Annotate it therefore as a raw lock in order to get rid of he lockdep
> warning below.
>
> =============================
> [ BUG: Invalid wait context ]
> 6.1.0-rc1-00134-ge35184f32151 #4 Not tainted
> -----------------------------
> swapper/3/0 is trying to lock:
> ffff8bc88086dc18 (&parent->list_lock){..-.}-{3:3}, at: do_drain+0x57/0xb0
> other info that might help us debug this:
> context-{2:2}
> no locks held by swapper/3/0.
> stack backtrace:
> CPU: 3 PID: 0 Comm: swapper/3 Not tainted 6.1.0-rc1-00134-ge35184f32151 #4
> Hardware name: LENOVO 20K5S22R00/20K5S22R00, BIOS R0IET38W (1.16 ) 05/31/2017
> Call Trace:
> <IRQ>
> dump_stack_lvl+0x6b/0x9d
> __lock_acquire+0x1519/0x1730
> ? build_sched_domains+0x4bd/0x1590
> ? __lock_acquire+0xad2/0x1730
> lock_acquire+0x294/0x340
> ? do_drain+0x57/0xb0
> ? sched_clock_tick+0x41/0x60
> _raw_spin_lock+0x2c/0x40
> ? do_drain+0x57/0xb0
> do_drain+0x57/0xb0
> __flush_smp_call_function_queue+0x138/0x220
> __sysvec_call_function+0x4f/0x210
> sysvec_call_function+0x4b/0x90
> </IRQ>
> <TASK>
> asm_sysvec_call_function+0x16/0x20
> RIP: 0010:mwait_idle+0x5e/0x80
> Code: 31 d2 65 48 8b 04 25 80 ed 01 00 48 89 d1 0f 01 c8 48 8b 00 a8 08 75 14 66 90 0f 00 2d 0b 78 46 00 31 c0 48 89 c1 fb 0f 01 c9 <eb> 06 fb 0f 1f 44 00 00 65 48 8b 04 25 80 ed 01 00 f0 80 60 02 df
> RSP: 0000:ffffa90940217ee0 EFLAGS: 00000246
> RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000000
> RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffffffff9bb9f93a
> RBP: 0000000000000003 R08: 0000000000000001 R09: 0000000000000001
> R10: ffffa90940217ea8 R11: 0000000000000000 R12: ffffffffffffffff
> R13: 0000000000000000 R14: ffff8bc88127c500 R15: 0000000000000000
> ? default_idle_call+0x1a/0xa0
> default_idle_call+0x4b/0xa0
> do_idle+0x1f1/0x2c0
> ? _raw_spin_unlock_irqrestore+0x56/0x70
> cpu_startup_entry+0x19/0x20
> start_secondary+0x122/0x150
> secondary_startup_64_no_verify+0xce/0xdb
> </TASK>
>
Looks good to me.
Reviewed-by: Hyeonggon Yoo <[email protected]>
> Signed-off-by: Jiri Kosina <[email protected]>
> ---
>
> v1->v2: fix !SLAB build failures due to list_lock mismatch
> v2->v3: really fix it by sending refreshed version of the patch (facepalm)
>
> mm/slab.c | 90 +++++++++++++++++++++++++++----------------------------
> mm/slab.h | 4 +++
> 2 files changed, 49 insertions(+), 45 deletions(-)
>
> diff --git a/mm/slab.c b/mm/slab.c
> index 59c8e28f7b6a..d8a287900193 100644
> --- a/mm/slab.c
> +++ b/mm/slab.c
> @@ -234,7 +234,7 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
> parent->shared = NULL;
> parent->alien = NULL;
> parent->colour_next = 0;
> - spin_lock_init(&parent->list_lock);
> + raw_spin_lock_init(&parent->list_lock);
> parent->free_objects = 0;
> parent->free_touched = 0;
> }
> @@ -559,9 +559,9 @@ static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep,
> slab_node = slab_nid(slab);
> n = get_node(cachep, slab_node);
>
> - spin_lock(&n->list_lock);
> + raw_spin_lock(&n->list_lock);
> free_block(cachep, &objp, 1, slab_node, &list);
> - spin_unlock(&n->list_lock);
> + raw_spin_unlock(&n->list_lock);
>
> slabs_destroy(cachep, &list);
> }
> @@ -684,7 +684,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
> struct kmem_cache_node *n = get_node(cachep, node);
>
> if (ac->avail) {
> - spin_lock(&n->list_lock);
> + raw_spin_lock(&n->list_lock);
> /*
> * Stuff objects into the remote nodes shared array first.
> * That way we could avoid the overhead of putting the objects
> @@ -695,7 +695,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
>
> free_block(cachep, ac->entry, ac->avail, node, list);
> ac->avail = 0;
> - spin_unlock(&n->list_lock);
> + raw_spin_unlock(&n->list_lock);
> }
> }
>
> @@ -768,9 +768,9 @@ static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
> slabs_destroy(cachep, &list);
> } else {
> n = get_node(cachep, slab_node);
> - spin_lock(&n->list_lock);
> + raw_spin_lock(&n->list_lock);
> free_block(cachep, &objp, 1, slab_node, &list);
> - spin_unlock(&n->list_lock);
> + raw_spin_unlock(&n->list_lock);
> slabs_destroy(cachep, &list);
> }
> return 1;
> @@ -811,10 +811,10 @@ static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
> */
> n = get_node(cachep, node);
> if (n) {
> - spin_lock_irq(&n->list_lock);
> + raw_spin_lock_irq(&n->list_lock);
> n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
> cachep->num;
> - spin_unlock_irq(&n->list_lock);
> + raw_spin_unlock_irq(&n->list_lock);
>
> return 0;
> }
> @@ -893,7 +893,7 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep,
> goto fail;
>
> n = get_node(cachep, node);
> - spin_lock_irq(&n->list_lock);
> + raw_spin_lock_irq(&n->list_lock);
> if (n->shared && force_change) {
> free_block(cachep, n->shared->entry,
> n->shared->avail, node, &list);
> @@ -911,7 +911,7 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep,
> new_alien = NULL;
> }
>
> - spin_unlock_irq(&n->list_lock);
> + raw_spin_unlock_irq(&n->list_lock);
> slabs_destroy(cachep, &list);
>
> /*
> @@ -950,7 +950,7 @@ static void cpuup_canceled(long cpu)
> if (!n)
> continue;
>
> - spin_lock_irq(&n->list_lock);
> + raw_spin_lock_irq(&n->list_lock);
>
> /* Free limit for this kmem_cache_node */
> n->free_limit -= cachep->batchcount;
> @@ -961,7 +961,7 @@ static void cpuup_canceled(long cpu)
> nc->avail = 0;
>
> if (!cpumask_empty(mask)) {
> - spin_unlock_irq(&n->list_lock);
> + raw_spin_unlock_irq(&n->list_lock);
> goto free_slab;
> }
>
> @@ -975,7 +975,7 @@ static void cpuup_canceled(long cpu)
> alien = n->alien;
> n->alien = NULL;
>
> - spin_unlock_irq(&n->list_lock);
> + raw_spin_unlock_irq(&n->list_lock);
>
> kfree(shared);
> if (alien) {
> @@ -1159,7 +1159,7 @@ static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *
> /*
> * Do not assume that spinlocks can be initialized via memcpy:
> */
> - spin_lock_init(&ptr->list_lock);
> + raw_spin_lock_init(&ptr->list_lock);
>
> MAKE_ALL_LISTS(cachep, ptr, nodeid);
> cachep->node[nodeid] = ptr;
> @@ -1330,11 +1330,11 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
> for_each_kmem_cache_node(cachep, node, n) {
> unsigned long total_slabs, free_slabs, free_objs;
>
> - spin_lock_irqsave(&n->list_lock, flags);
> + raw_spin_lock_irqsave(&n->list_lock, flags);
> total_slabs = n->total_slabs;
> free_slabs = n->free_slabs;
> free_objs = n->free_objects;
> - spin_unlock_irqrestore(&n->list_lock, flags);
> + raw_spin_unlock_irqrestore(&n->list_lock, flags);
>
> pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld\n",
> node, total_slabs - free_slabs, total_slabs,
> @@ -2096,7 +2096,7 @@ static void check_spinlock_acquired(struct kmem_cache *cachep)
> {
> #ifdef CONFIG_SMP
> check_irq_off();
> - assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
> + assert_raw_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
> #endif
> }
>
> @@ -2104,7 +2104,7 @@ static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
> {
> #ifdef CONFIG_SMP
> check_irq_off();
> - assert_spin_locked(&get_node(cachep, node)->list_lock);
> + assert_raw_spin_locked(&get_node(cachep, node)->list_lock);
> #endif
> }
>
> @@ -2144,9 +2144,9 @@ static void do_drain(void *arg)
> check_irq_off();
> ac = cpu_cache_get(cachep);
> n = get_node(cachep, node);
> - spin_lock(&n->list_lock);
> + raw_spin_lock(&n->list_lock);
> free_block(cachep, ac->entry, ac->avail, node, &list);
> - spin_unlock(&n->list_lock);
> + raw_spin_unlock(&n->list_lock);
> ac->avail = 0;
> slabs_destroy(cachep, &list);
> }
> @@ -2164,9 +2164,9 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
> drain_alien_cache(cachep, n->alien);
>
> for_each_kmem_cache_node(cachep, node, n) {
> - spin_lock_irq(&n->list_lock);
> + raw_spin_lock_irq(&n->list_lock);
> drain_array_locked(cachep, n->shared, node, true, &list);
> - spin_unlock_irq(&n->list_lock);
> + raw_spin_unlock_irq(&n->list_lock);
>
> slabs_destroy(cachep, &list);
> }
> @@ -2188,10 +2188,10 @@ static int drain_freelist(struct kmem_cache *cache,
> nr_freed = 0;
> while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
>
> - spin_lock_irq(&n->list_lock);
> + raw_spin_lock_irq(&n->list_lock);
> p = n->slabs_free.prev;
> if (p == &n->slabs_free) {
> - spin_unlock_irq(&n->list_lock);
> + raw_spin_unlock_irq(&n->list_lock);
> goto out;
> }
>
> @@ -2204,7 +2204,7 @@ static int drain_freelist(struct kmem_cache *cache,
> * to the cache.
> */
> n->free_objects -= cache->num;
> - spin_unlock_irq(&n->list_lock);
> + raw_spin_unlock_irq(&n->list_lock);
> slab_destroy(cache, slab);
> nr_freed++;
> }
> @@ -2629,7 +2629,7 @@ static void cache_grow_end(struct kmem_cache *cachep, struct slab *slab)
> INIT_LIST_HEAD(&slab->slab_list);
> n = get_node(cachep, slab_nid(slab));
>
> - spin_lock(&n->list_lock);
> + raw_spin_lock(&n->list_lock);
> n->total_slabs++;
> if (!slab->active) {
> list_add_tail(&slab->slab_list, &n->slabs_free);
> @@ -2639,7 +2639,7 @@ static void cache_grow_end(struct kmem_cache *cachep, struct slab *slab)
>
> STATS_INC_GROWN(cachep);
> n->free_objects += cachep->num - slab->active;
> - spin_unlock(&n->list_lock);
> + raw_spin_unlock(&n->list_lock);
>
> fixup_objfreelist_debug(cachep, &list);
> }
> @@ -2805,7 +2805,7 @@ static struct slab *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
> {
> struct slab *slab;
>
> - assert_spin_locked(&n->list_lock);
> + assert_raw_spin_locked(&n->list_lock);
> slab = list_first_entry_or_null(&n->slabs_partial, struct slab,
> slab_list);
> if (!slab) {
> @@ -2832,10 +2832,10 @@ static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
> if (!gfp_pfmemalloc_allowed(flags))
> return NULL;
>
> - spin_lock(&n->list_lock);
> + raw_spin_lock(&n->list_lock);
> slab = get_first_slab(n, true);
> if (!slab) {
> - spin_unlock(&n->list_lock);
> + raw_spin_unlock(&n->list_lock);
> return NULL;
> }
>
> @@ -2844,7 +2844,7 @@ static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
>
> fixup_slab_list(cachep, n, slab, &list);
>
> - spin_unlock(&n->list_lock);
> + raw_spin_unlock(&n->list_lock);
> fixup_objfreelist_debug(cachep, &list);
>
> return obj;
> @@ -2903,7 +2903,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
> if (!n->free_objects && (!shared || !shared->avail))
> goto direct_grow;
>
> - spin_lock(&n->list_lock);
> + raw_spin_lock(&n->list_lock);
> shared = READ_ONCE(n->shared);
>
> /* See if we can refill from the shared array */
> @@ -2927,7 +2927,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
> must_grow:
> n->free_objects -= ac->avail;
> alloc_done:
> - spin_unlock(&n->list_lock);
> + raw_spin_unlock(&n->list_lock);
> fixup_objfreelist_debug(cachep, &list);
>
> direct_grow:
> @@ -3147,7 +3147,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
> BUG_ON(!n);
>
> check_irq_off();
> - spin_lock(&n->list_lock);
> + raw_spin_lock(&n->list_lock);
> slab = get_first_slab(n, false);
> if (!slab)
> goto must_grow;
> @@ -3165,12 +3165,12 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
>
> fixup_slab_list(cachep, n, slab, &list);
>
> - spin_unlock(&n->list_lock);
> + raw_spin_unlock(&n->list_lock);
> fixup_objfreelist_debug(cachep, &list);
> return obj;
>
> must_grow:
> - spin_unlock(&n->list_lock);
> + raw_spin_unlock(&n->list_lock);
> slab = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
> if (slab) {
> /* This slab isn't counted yet so don't update free_objects */
> @@ -3325,7 +3325,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
>
> check_irq_off();
> n = get_node(cachep, node);
> - spin_lock(&n->list_lock);
> + raw_spin_lock(&n->list_lock);
> if (n->shared) {
> struct array_cache *shared_array = n->shared;
> int max = shared_array->limit - shared_array->avail;
> @@ -3354,7 +3354,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
> STATS_SET_FREEABLE(cachep, i);
> }
> #endif
> - spin_unlock(&n->list_lock);
> + raw_spin_unlock(&n->list_lock);
> ac->avail -= batchcount;
> memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
> slabs_destroy(cachep, &list);
> @@ -3721,9 +3721,9 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
>
> node = cpu_to_mem(cpu);
> n = get_node(cachep, node);
> - spin_lock_irq(&n->list_lock);
> + raw_spin_lock_irq(&n->list_lock);
> free_block(cachep, ac->entry, ac->avail, node, &list);
> - spin_unlock_irq(&n->list_lock);
> + raw_spin_unlock_irq(&n->list_lock);
> slabs_destroy(cachep, &list);
> }
> free_percpu(prev);
> @@ -3815,9 +3815,9 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
> return;
> }
>
> - spin_lock_irq(&n->list_lock);
> + raw_spin_lock_irq(&n->list_lock);
> drain_array_locked(cachep, ac, node, false, &list);
> - spin_unlock_irq(&n->list_lock);
> + raw_spin_unlock_irq(&n->list_lock);
>
> slabs_destroy(cachep, &list);
> }
> @@ -3901,7 +3901,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
>
> for_each_kmem_cache_node(cachep, node, n) {
> check_irq_on();
> - spin_lock_irq(&n->list_lock);
> + raw_spin_lock_irq(&n->list_lock);
>
> total_slabs += n->total_slabs;
> free_slabs += n->free_slabs;
> @@ -3910,7 +3910,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
> if (n->shared)
> shared_avail += n->shared->avail;
>
> - spin_unlock_irq(&n->list_lock);
> + raw_spin_unlock_irq(&n->list_lock);
> }
> num_objs = total_slabs * cachep->num;
> active_slabs = total_slabs - free_slabs;
> diff --git a/mm/slab.h b/mm/slab.h
> index 0202a8c2f0d2..7a705e4228c8 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -750,7 +750,11 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
> * The slab lists for all objects.
> */
> struct kmem_cache_node {
> +#ifdef CONFIG_SLAB
> + raw_spinlock_t list_lock;
> +#else
> spinlock_t list_lock;
> +#endif
>
> #ifdef CONFIG_SLAB
> struct list_head slabs_partial; /* partial list first, better asm code */
> --
> 2.35.3
>
--
Thanks,
Hyeonggon
Hi Jiri,
I love your patch! Yet something to improve:
[auto build test ERROR on akpm-mm/mm-everything]
[also build test ERROR on v6.1-rc1 next-20221021]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/UPDATE-20221022-031645/Jiri-Kosina/mm-slab-Annotate-kmem_cache_node-list_lock-as-raw/20221021-225037
base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link: https://lore.kernel.org/r/nycvar.YFH.7.76.2210212114480.29912%40cbobk.fhfr.pm
patch subject: [PATCH v2] mm/slab: Annotate kmem_cache_node->list_lock as raw
config: x86_64-rhel-8.3-func
compiler: gcc-11 (Debian 11.3.0-8) 11.3.0
reproduce (this is a W=1 build):
# https://github.com/intel-lab-lkp/linux/commit/87acd941576d0867488b675c1fe0d35ffd4d4541
git remote add linux-review https://github.com/intel-lab-lkp/linux
git fetch --no-tags linux-review UPDATE-20221022-031645/Jiri-Kosina/mm-slab-Annotate-kmem_cache_node-list_lock-as-raw/20221021-225037
git checkout 87acd941576d0867488b675c1fe0d35ffd4d4541
# save the config file
mkdir build_dir && cp config build_dir/.config
make W=1 O=build_dir ARCH=x86_64 SHELL=/bin/bash
If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <[email protected]>
All errors (new ones prefixed by >>):
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
mm/slub.c: In function 'alloc_single_from_new_slab':
>> mm/slub.c:2124:27: error: passing argument 1 of 'spinlock_check' from incompatible pointer type [-Werror=incompatible-pointer-types]
2124 | spin_lock_irqsave(&n->list_lock, flags);
| ^~~~~~~~~~~~~
| |
| raw_spinlock_t * {aka struct raw_spinlock *}
include/linux/spinlock.h:243:48: note: in definition of macro 'raw_spin_lock_irqsave'
243 | flags = _raw_spin_lock_irqsave(lock); \
| ^~~~
mm/slub.c:2124:9: note: in expansion of macro 'spin_lock_irqsave'
2124 | spin_lock_irqsave(&n->list_lock, flags);
| ^~~~~~~~~~~~~~~~~
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
include/linux/spinlock.h:323:67: note: expected 'spinlock_t *' {aka 'struct spinlock *'} but argument is of type 'raw_spinlock_t *' {aka 'struct raw_spinlock *'}
323 | static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
| ~~~~~~~~~~~~^~~~
>> mm/slub.c:2132:32: error: passing argument 1 of 'spin_unlock_irqrestore' from incompatible pointer type [-Werror=incompatible-pointer-types]
2132 | spin_unlock_irqrestore(&n->list_lock, flags);
| ^~~~~~~~~~~~~
| |
| raw_spinlock_t * {aka struct raw_spinlock *}
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
include/linux/spinlock.h:403:64: note: expected 'spinlock_t *' {aka 'struct spinlock *'} but argument is of type 'raw_spinlock_t *' {aka 'struct raw_spinlock *'}
403 | static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
| ~~~~~~~~~~~~^~~~
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
mm/slub.c: In function 'get_partial_node':
mm/slub.c:2210:27: error: passing argument 1 of 'spinlock_check' from incompatible pointer type [-Werror=incompatible-pointer-types]
2210 | spin_lock_irqsave(&n->list_lock, flags);
| ^~~~~~~~~~~~~
| |
| raw_spinlock_t * {aka struct raw_spinlock *}
include/linux/spinlock.h:243:48: note: in definition of macro 'raw_spin_lock_irqsave'
243 | flags = _raw_spin_lock_irqsave(lock); \
| ^~~~
mm/slub.c:2210:9: note: in expansion of macro 'spin_lock_irqsave'
2210 | spin_lock_irqsave(&n->list_lock, flags);
| ^~~~~~~~~~~~~~~~~
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
include/linux/spinlock.h:323:67: note: expected 'spinlock_t *' {aka 'struct spinlock *'} but argument is of type 'raw_spinlock_t *' {aka 'struct raw_spinlock *'}
323 | static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
| ~~~~~~~~~~~~^~~~
mm/slub.c:2247:32: error: passing argument 1 of 'spin_unlock_irqrestore' from incompatible pointer type [-Werror=incompatible-pointer-types]
2247 | spin_unlock_irqrestore(&n->list_lock, flags);
| ^~~~~~~~~~~~~
| |
| raw_spinlock_t * {aka struct raw_spinlock *}
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
include/linux/spinlock.h:403:64: note: expected 'spinlock_t *' {aka 'struct spinlock *'} but argument is of type 'raw_spinlock_t *' {aka 'struct raw_spinlock *'}
403 | static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
| ~~~~~~~~~~~~^~~~
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
mm/slub.c: In function 'deactivate_slab':
mm/slub.c:2489:35: error: passing argument 1 of 'spinlock_check' from incompatible pointer type [-Werror=incompatible-pointer-types]
2489 | spin_lock_irqsave(&n->list_lock, flags);
| ^~~~~~~~~~~~~
| |
| raw_spinlock_t * {aka struct raw_spinlock *}
include/linux/spinlock.h:243:48: note: in definition of macro 'raw_spin_lock_irqsave'
243 | flags = _raw_spin_lock_irqsave(lock); \
| ^~~~
mm/slub.c:2489:17: note: in expansion of macro 'spin_lock_irqsave'
2489 | spin_lock_irqsave(&n->list_lock, flags);
| ^~~~~~~~~~~~~~~~~
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
include/linux/spinlock.h:323:67: note: expected 'spinlock_t *' {aka 'struct spinlock *'} but argument is of type 'raw_spinlock_t *' {aka 'struct raw_spinlock *'}
323 | static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
| ~~~~~~~~~~~~^~~~
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
mm/slub.c:2497:35: error: passing argument 1 of 'spinlock_check' from incompatible pointer type [-Werror=incompatible-pointer-types]
2497 | spin_lock_irqsave(&n->list_lock, flags);
| ^~~~~~~~~~~~~
| |
| raw_spinlock_t * {aka struct raw_spinlock *}
include/linux/spinlock.h:243:48: note: in definition of macro 'raw_spin_lock_irqsave'
243 | flags = _raw_spin_lock_irqsave(lock); \
| ^~~~
mm/slub.c:2497:17: note: in expansion of macro 'spin_lock_irqsave'
2497 | spin_lock_irqsave(&n->list_lock, flags);
| ^~~~~~~~~~~~~~~~~
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
include/linux/spinlock.h:323:67: note: expected 'spinlock_t *' {aka 'struct spinlock *'} but argument is of type 'raw_spinlock_t *' {aka 'struct raw_spinlock *'}
323 | static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
| ~~~~~~~~~~~~^~~~
mm/slub.c:2508:48: error: passing argument 1 of 'spin_unlock_irqrestore' from incompatible pointer type [-Werror=incompatible-pointer-types]
2508 | spin_unlock_irqrestore(&n->list_lock, flags);
| ^~~~~~~~~~~~~
| |
| raw_spinlock_t * {aka struct raw_spinlock *}
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
--
243 | flags = _raw_spin_lock_irqsave(lock); \
| ^~~~
mm/slub.c:2946:9: note: in expansion of macro 'spin_lock_irqsave'
2946 | spin_lock_irqsave(&n->list_lock, flags);
| ^~~~~~~~~~~~~~~~~
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
include/linux/spinlock.h:323:67: note: expected 'spinlock_t *' {aka 'struct spinlock *'} but argument is of type 'raw_spinlock_t *' {aka 'struct raw_spinlock *'}
323 | static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
| ~~~~~~~~~~~~^~~~
mm/slub.c:2949:32: error: passing argument 1 of 'spin_unlock_irqrestore' from incompatible pointer type [-Werror=incompatible-pointer-types]
2949 | spin_unlock_irqrestore(&n->list_lock, flags);
| ^~~~~~~~~~~~~
| |
| raw_spinlock_t * {aka struct raw_spinlock *}
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
include/linux/spinlock.h:403:64: note: expected 'spinlock_t *' {aka 'struct spinlock *'} but argument is of type 'raw_spinlock_t *' {aka 'struct raw_spinlock *'}
403 | static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
| ~~~~~~~~~~~~^~~~
mm/slub.c: In function '__slab_free':
mm/slub.c:3483:48: error: passing argument 1 of 'spin_unlock_irqrestore' from incompatible pointer type [-Werror=incompatible-pointer-types]
3483 | spin_unlock_irqrestore(&n->list_lock, flags);
| ^~~~~~~~~~~~~
| |
| raw_spinlock_t * {aka struct raw_spinlock *}
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
include/linux/spinlock.h:403:64: note: expected 'spinlock_t *' {aka 'struct spinlock *'} but argument is of type 'raw_spinlock_t *' {aka 'struct raw_spinlock *'}
403 | static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
| ~~~~~~~~~~~~^~~~
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
mm/slub.c:3515:51: error: passing argument 1 of 'spinlock_check' from incompatible pointer type [-Werror=incompatible-pointer-types]
3515 | spin_lock_irqsave(&n->list_lock, flags);
| ^~~~~~~~~~~~~
| |
| raw_spinlock_t * {aka struct raw_spinlock *}
include/linux/spinlock.h:243:48: note: in definition of macro 'raw_spin_lock_irqsave'
243 | flags = _raw_spin_lock_irqsave(lock); \
| ^~~~
mm/slub.c:3515:33: note: in expansion of macro 'spin_lock_irqsave'
3515 | spin_lock_irqsave(&n->list_lock, flags);
| ^~~~~~~~~~~~~~~~~
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
include/linux/spinlock.h:323:67: note: expected 'spinlock_t *' {aka 'struct spinlock *'} but argument is of type 'raw_spinlock_t *' {aka 'struct raw_spinlock *'}
323 | static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
| ~~~~~~~~~~~~^~~~
mm/slub.c:3557:32: error: passing argument 1 of 'spin_unlock_irqrestore' from incompatible pointer type [-Werror=incompatible-pointer-types]
3557 | spin_unlock_irqrestore(&n->list_lock, flags);
| ^~~~~~~~~~~~~
| |
| raw_spinlock_t * {aka struct raw_spinlock *}
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
include/linux/spinlock.h:403:64: note: expected 'spinlock_t *' {aka 'struct spinlock *'} but argument is of type 'raw_spinlock_t *' {aka 'struct raw_spinlock *'}
403 | static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
| ~~~~~~~~~~~~^~~~
mm/slub.c:3572:32: error: passing argument 1 of 'spin_unlock_irqrestore' from incompatible pointer type [-Werror=incompatible-pointer-types]
3572 | spin_unlock_irqrestore(&n->list_lock, flags);
| ^~~~~~~~~~~~~
| |
| raw_spinlock_t * {aka struct raw_spinlock *}
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
include/linux/spinlock.h:403:64: note: expected 'spinlock_t *' {aka 'struct spinlock *'} but argument is of type 'raw_spinlock_t *' {aka 'struct raw_spinlock *'}
403 | static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
| ~~~~~~~~~~~~^~~~
mm/slub.c: In function 'init_kmem_cache_node':
mm/slub.c:4008:24: error: passing argument 1 of 'spinlock_check' from incompatible pointer type [-Werror=incompatible-pointer-types]
4008 | spin_lock_init(&n->list_lock);
| ^~~~~~~~~~~~~
| |
| raw_spinlock_t * {aka struct raw_spinlock *}
include/linux/spinlock.h:342:24: note: in definition of macro 'spin_lock_init'
342 | spinlock_check(_lock); \
| ^~~~~
include/linux/spinlock.h:323:67: note: expected 'spinlock_t *' {aka 'struct spinlock *'} but argument is of type 'raw_spinlock_t *' {aka 'struct raw_spinlock *'}
323 | static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
| ~~~~~~~~~~~~^~~~
In file included from include/linux/spinlock.h:88,
from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
>> include/linux/spinlock_types.h:41:9: error: incompatible types when assigning to type 'raw_spinlock_t' {aka 'struct raw_spinlock'} from type 'spinlock_t' {aka 'struct spinlock'}
41 | (spinlock_t) __SPIN_LOCK_INITIALIZER(lockname)
| ^
include/linux/spinlock.h:343:20: note: in expansion of macro '__SPIN_LOCK_UNLOCKED'
343 | *(_lock) = __SPIN_LOCK_UNLOCKED(_lock); \
| ^~~~~~~~~~~~~~~~~~~~
mm/slub.c:4008:9: note: in expansion of macro 'spin_lock_init'
4008 | spin_lock_init(&n->list_lock);
| ^~~~~~~~~~~~~~
mm/slub.c: In function 'free_partial':
>> mm/slub.c:4393:23: error: passing argument 1 of 'spin_lock_irq' from incompatible pointer type [-Werror=incompatible-pointer-types]
4393 | spin_lock_irq(&n->list_lock);
| ^~~~~~~~~~~~~
| |
| raw_spinlock_t * {aka struct raw_spinlock *}
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
include/linux/spinlock.h:373:55: note: expected 'spinlock_t *' {aka 'struct spinlock *'} but argument is of type 'raw_spinlock_t *' {aka 'struct raw_spinlock *'}
373 | static __always_inline void spin_lock_irq(spinlock_t *lock)
| ~~~~~~~~~~~~^~~~
>> mm/slub.c:4403:25: error: passing argument 1 of 'spin_unlock_irq' from incompatible pointer type [-Werror=incompatible-pointer-types]
4403 | spin_unlock_irq(&n->list_lock);
| ^~~~~~~~~~~~~
| |
| raw_spinlock_t * {aka struct raw_spinlock *}
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
include/linux/spinlock.h:398:57: note: expected 'spinlock_t *' {aka 'struct spinlock *'} but argument is of type 'raw_spinlock_t *' {aka 'struct raw_spinlock *'}
398 | static __always_inline void spin_unlock_irq(spinlock_t *lock)
| ~~~~~~~~~~~~^~~~
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
mm/slub.c: In function '__kmem_cache_do_shrink':
mm/slub.c:4607:35: error: passing argument 1 of 'spinlock_check' from incompatible pointer type [-Werror=incompatible-pointer-types]
4607 | spin_lock_irqsave(&n->list_lock, flags);
| ^~~~~~~~~~~~~
| |
| raw_spinlock_t * {aka struct raw_spinlock *}
include/linux/spinlock.h:243:48: note: in definition of macro 'raw_spin_lock_irqsave'
243 | flags = _raw_spin_lock_irqsave(lock); \
| ^~~~
mm/slub.c:4607:17: note: in expansion of macro 'spin_lock_irqsave'
4607 | spin_lock_irqsave(&n->list_lock, flags);
| ^~~~~~~~~~~~~~~~~
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
include/linux/spinlock.h:323:67: note: expected 'spinlock_t *' {aka 'struct spinlock *'} but argument is of type 'raw_spinlock_t *' {aka 'struct raw_spinlock *'}
323 | static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
| ~~~~~~~~~~~~^~~~
mm/slub.c:4639:40: error: passing argument 1 of 'spin_unlock_irqrestore' from incompatible pointer type [-Werror=incompatible-pointer-types]
4639 | spin_unlock_irqrestore(&n->list_lock, flags);
| ^~~~~~~~~~~~~
| |
| raw_spinlock_t * {aka struct raw_spinlock *}
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
include/linux/spinlock.h:403:64: note: expected 'spinlock_t *' {aka 'struct spinlock *'} but argument is of type 'raw_spinlock_t *' {aka 'struct raw_spinlock *'}
403 | static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
| ~~~~~~~~~~~~^~~~
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
mm/slub.c: In function 'validate_slab_node':
mm/slub.c:4962:27: error: passing argument 1 of 'spinlock_check' from incompatible pointer type [-Werror=incompatible-pointer-types]
4962 | spin_lock_irqsave(&n->list_lock, flags);
| ^~~~~~~~~~~~~
| |
| raw_spinlock_t * {aka struct raw_spinlock *}
include/linux/spinlock.h:243:48: note: in definition of macro 'raw_spin_lock_irqsave'
243 | flags = _raw_spin_lock_irqsave(lock); \
| ^~~~
mm/slub.c:4962:9: note: in expansion of macro 'spin_lock_irqsave'
4962 | spin_lock_irqsave(&n->list_lock, flags);
| ^~~~~~~~~~~~~~~~~
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
include/linux/spinlock.h:323:67: note: expected 'spinlock_t *' {aka 'struct spinlock *'} but argument is of type 'raw_spinlock_t *' {aka 'struct raw_spinlock *'}
323 | static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
| ~~~~~~~~~~~~^~~~
mm/slub.c:4988:32: error: passing argument 1 of 'spin_unlock_irqrestore' from incompatible pointer type [-Werror=incompatible-pointer-types]
4988 | spin_unlock_irqrestore(&n->list_lock, flags);
| ^~~~~~~~~~~~~
| |
| raw_spinlock_t * {aka struct raw_spinlock *}
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
include/linux/spinlock.h:403:64: note: expected 'spinlock_t *' {aka 'struct spinlock *'} but argument is of type 'raw_spinlock_t *' {aka 'struct raw_spinlock *'}
403 | static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
| ~~~~~~~~~~~~^~~~
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
from mm/slub.c:13:
mm/slub.c: In function 'slab_debug_trace_open':
mm/slub.c:6202:35: error: passing argument 1 of 'spinlock_check' from incompatible pointer type [-Werror=incompatible-pointer-types]
6202 | spin_lock_irqsave(&n->list_lock, flags);
| ^~~~~~~~~~~~~
| |
| raw_spinlock_t * {aka struct raw_spinlock *}
include/linux/spinlock.h:243:48: note: in definition of macro 'raw_spin_lock_irqsave'
243 | flags = _raw_spin_lock_irqsave(lock); \
| ^~~~
mm/slub.c:6202:17: note: in expansion of macro 'spin_lock_irqsave'
6202 | spin_lock_irqsave(&n->list_lock, flags);
| ^~~~~~~~~~~~~~~~~
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:7,
vim +/spin_unlock_irqrestore +2132 mm/slub.c
c7323a5ad07863 Vlastimil Babka 2022-08-23 2097
c7323a5ad07863 Vlastimil Babka 2022-08-23 2098 /*
c7323a5ad07863 Vlastimil Babka 2022-08-23 2099 * Called only for kmem_cache_debug() caches to allocate from a freshly
c7323a5ad07863 Vlastimil Babka 2022-08-23 2100 * allocated slab. Allocate a single object instead of whole freelist
c7323a5ad07863 Vlastimil Babka 2022-08-23 2101 * and put the slab to the partial (or full) list.
c7323a5ad07863 Vlastimil Babka 2022-08-23 2102 */
c7323a5ad07863 Vlastimil Babka 2022-08-23 2103 static void *alloc_single_from_new_slab(struct kmem_cache *s,
6edf2576a6cc46 Feng Tang 2022-09-13 2104 struct slab *slab, int orig_size)
c7323a5ad07863 Vlastimil Babka 2022-08-23 2105 {
c7323a5ad07863 Vlastimil Babka 2022-08-23 2106 int nid = slab_nid(slab);
c7323a5ad07863 Vlastimil Babka 2022-08-23 2107 struct kmem_cache_node *n = get_node(s, nid);
c7323a5ad07863 Vlastimil Babka 2022-08-23 2108 unsigned long flags;
c7323a5ad07863 Vlastimil Babka 2022-08-23 2109 void *object;
c7323a5ad07863 Vlastimil Babka 2022-08-23 2110
c7323a5ad07863 Vlastimil Babka 2022-08-23 2111
c7323a5ad07863 Vlastimil Babka 2022-08-23 2112 object = slab->freelist;
c7323a5ad07863 Vlastimil Babka 2022-08-23 2113 slab->freelist = get_freepointer(s, object);
c7323a5ad07863 Vlastimil Babka 2022-08-23 2114 slab->inuse = 1;
c7323a5ad07863 Vlastimil Babka 2022-08-23 2115
6edf2576a6cc46 Feng Tang 2022-09-13 2116 if (!alloc_debug_processing(s, slab, object, orig_size))
c7323a5ad07863 Vlastimil Babka 2022-08-23 2117 /*
c7323a5ad07863 Vlastimil Babka 2022-08-23 2118 * It's not really expected that this would fail on a
c7323a5ad07863 Vlastimil Babka 2022-08-23 2119 * freshly allocated slab, but a concurrent memory
c7323a5ad07863 Vlastimil Babka 2022-08-23 2120 * corruption in theory could cause that.
c7323a5ad07863 Vlastimil Babka 2022-08-23 2121 */
c7323a5ad07863 Vlastimil Babka 2022-08-23 2122 return NULL;
c7323a5ad07863 Vlastimil Babka 2022-08-23 2123
c7323a5ad07863 Vlastimil Babka 2022-08-23 @2124 spin_lock_irqsave(&n->list_lock, flags);
c7323a5ad07863 Vlastimil Babka 2022-08-23 2125
c7323a5ad07863 Vlastimil Babka 2022-08-23 2126 if (slab->inuse == slab->objects)
c7323a5ad07863 Vlastimil Babka 2022-08-23 2127 add_full(s, n, slab);
c7323a5ad07863 Vlastimil Babka 2022-08-23 2128 else
c7323a5ad07863 Vlastimil Babka 2022-08-23 2129 add_partial(n, slab, DEACTIVATE_TO_HEAD);
c7323a5ad07863 Vlastimil Babka 2022-08-23 2130
c7323a5ad07863 Vlastimil Babka 2022-08-23 2131 inc_slabs_node(s, nid, slab->objects);
c7323a5ad07863 Vlastimil Babka 2022-08-23 @2132 spin_unlock_irqrestore(&n->list_lock, flags);
c7323a5ad07863 Vlastimil Babka 2022-08-23 2133
c7323a5ad07863 Vlastimil Babka 2022-08-23 2134 return object;
c7323a5ad07863 Vlastimil Babka 2022-08-23 2135 }
c7323a5ad07863 Vlastimil Babka 2022-08-23 2136
--
0-DAY CI Kernel Test Service
https://01.org/lkp
On 10/21/22 21:18, Jiri Kosina wrote:
> From: Jiri Kosina <[email protected]>
>
> The list_lock can be taken in hardirq context when do_drain() is being
> called via IPI on all cores, and therefore lockdep complains about it,
> because it can't be preempted on PREEMPT_RT.
>
> That's not a real issue, as SLAB can't be built on PREEMPT_RT anyway, but
> we still want to get rid of the warning on non-PREEMPT_RT builds.
>
> Annotate it therefore as a raw lock in order to get rid of he lockdep
> warning below.
>
> =============================
> [ BUG: Invalid wait context ]
> 6.1.0-rc1-00134-ge35184f32151 #4 Not tainted
> -----------------------------
> swapper/3/0 is trying to lock:
> ffff8bc88086dc18 (&parent->list_lock){..-.}-{3:3}, at: do_drain+0x57/0xb0
> other info that might help us debug this:
> context-{2:2}
> no locks held by swapper/3/0.
> stack backtrace:
> CPU: 3 PID: 0 Comm: swapper/3 Not tainted 6.1.0-rc1-00134-ge35184f32151 #4
> Hardware name: LENOVO 20K5S22R00/20K5S22R00, BIOS R0IET38W (1.16 ) 05/31/2017
> Call Trace:
> <IRQ>
> dump_stack_lvl+0x6b/0x9d
> __lock_acquire+0x1519/0x1730
> ? build_sched_domains+0x4bd/0x1590
> ? __lock_acquire+0xad2/0x1730
> lock_acquire+0x294/0x340
> ? do_drain+0x57/0xb0
> ? sched_clock_tick+0x41/0x60
> _raw_spin_lock+0x2c/0x40
> ? do_drain+0x57/0xb0
> do_drain+0x57/0xb0
> __flush_smp_call_function_queue+0x138/0x220
> __sysvec_call_function+0x4f/0x210
> sysvec_call_function+0x4b/0x90
> </IRQ>
> <TASK>
> asm_sysvec_call_function+0x16/0x20
> RIP: 0010:mwait_idle+0x5e/0x80
> Code: 31 d2 65 48 8b 04 25 80 ed 01 00 48 89 d1 0f 01 c8 48 8b 00 a8 08 75 14 66 90 0f 00 2d 0b 78 46 00 31 c0 48 89 c1 fb 0f 01 c9 <eb> 06 fb 0f 1f 44 00 00 65 48 8b 04 25 80 ed 01 00 f0 80 60 02 df
> RSP: 0000:ffffa90940217ee0 EFLAGS: 00000246
> RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000000
> RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffffffff9bb9f93a
> RBP: 0000000000000003 R08: 0000000000000001 R09: 0000000000000001
> R10: ffffa90940217ea8 R11: 0000000000000000 R12: ffffffffffffffff
> R13: 0000000000000000 R14: ffff8bc88127c500 R15: 0000000000000000
> ? default_idle_call+0x1a/0xa0
> default_idle_call+0x4b/0xa0
> do_idle+0x1f1/0x2c0
> ? _raw_spin_unlock_irqrestore+0x56/0x70
> cpu_startup_entry+0x19/0x20
> start_secondary+0x122/0x150
> secondary_startup_64_no_verify+0xce/0xdb
> </TASK>
>
> Signed-off-by: Jiri Kosina <[email protected]>
Thanks, added to slab/for-6.2/locking
...
> diff --git a/mm/slab.h b/mm/slab.h
> index 0202a8c2f0d2..7a705e4228c8 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -750,7 +750,11 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
> * The slab lists for all objects.
> */
> struct kmem_cache_node {
> +#ifdef CONFIG_SLAB
> + raw_spinlock_t list_lock;
> +#else
> spinlock_t list_lock;
> +#endif
>
> #ifdef CONFIG_SLAB
> struct list_head slabs_partial; /* partial list first, better asm code */
Simplified a bit. Since we already have CONFIG_SLAB/CONFIG_SLUB #ifdef
sections, moved the list_lock there.