We would like to use hlist_unhashed() from timer_pending(),
which runs without protection of a lock.
Note that other callers might also want to use this variant.
Instead of forcing a READ_ONCE() for all hlist_unhashed()
callers, add a new helper with an explicit _lockless suffix
in the name to better document what is going on.
Also add various WRITE_ONCE() in __hlist_del(), hlist_add_head()
and hlist_add_before()/hlist_add_behind() to pair with
the READ_ONCE().
Signed-off-by: Eric Dumazet <[email protected]>
Cc: "Paul E. McKenney" <[email protected]>
Cc: Thomas Gleixner <[email protected]>
---
include/linux/list.h | 32 +++++++++++++++++++++-----------
1 file changed, 21 insertions(+), 11 deletions(-)
diff --git a/include/linux/list.h b/include/linux/list.h
index 85c92555e31f85f019354e54d6efb8e79c2aee17..61f5aaf96192cdc4c1644741a415590b63c3c201 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -749,6 +749,16 @@ static inline int hlist_unhashed(const struct hlist_node *h)
return !h->pprev;
}
+/* This variant of hlist_unhashed() must be used in lockless contexts
+ * to avoid potential load-tearing.
+ * The READ_ONCE() is paired with the various WRITE_ONCE() in hlist
+ * helpers that are defined below.
+ */
+static inline int hlist_unhashed_lockless(const struct hlist_node *h)
+{
+ return !READ_ONCE(h->pprev);
+}
+
static inline int hlist_empty(const struct hlist_head *h)
{
return !READ_ONCE(h->first);
@@ -761,7 +771,7 @@ static inline void __hlist_del(struct hlist_node *n)
WRITE_ONCE(*pprev, next);
if (next)
- next->pprev = pprev;
+ WRITE_ONCE(next->pprev, pprev);
}
static inline void hlist_del(struct hlist_node *n)
@@ -782,32 +792,32 @@ static inline void hlist_del_init(struct hlist_node *n)
static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
{
struct hlist_node *first = h->first;
- n->next = first;
+ WRITE_ONCE(n->next, first);
if (first)
- first->pprev = &n->next;
+ WRITE_ONCE(first->pprev, &n->next);
WRITE_ONCE(h->first, n);
- n->pprev = &h->first;
+ WRITE_ONCE(n->pprev, &h->first);
}
/* next must be != NULL */
static inline void hlist_add_before(struct hlist_node *n,
struct hlist_node *next)
{
- n->pprev = next->pprev;
- n->next = next;
- next->pprev = &n->next;
+ WRITE_ONCE(n->pprev, next->pprev);
+ WRITE_ONCE(n->next, next);
+ WRITE_ONCE(next->pprev, &n->next);
WRITE_ONCE(*(n->pprev), n);
}
static inline void hlist_add_behind(struct hlist_node *n,
struct hlist_node *prev)
{
- n->next = prev->next;
- prev->next = n;
- n->pprev = &prev->next;
+ WRITE_ONCE(n->next, prev->next);
+ WRITE_ONCE(prev->next, n);
+ WRITE_ONCE(n->pprev, &prev->next);
if (n->next)
- n->next->pprev = &n->next;
+ WRITE_ONCE(n->next->pprev, &n->next);
}
/* after that we'll appear to be on some hlist and hlist_del will work */
--
2.24.0.432.g9d3f5f5b63-goog
timer_pending() is mostly used in lockless contexts.
Without proper annotations, KCSAN might detect a data-race [1]
Using hlist_unhashed_lockless() instead of hand-coding it
seems appropriate (as suggested by Paul E. McKenney).
[1]
BUG: KCSAN: data-race in del_timer / detach_if_pending
write to 0xffff88808697d870 of 8 bytes by task 10 on cpu 0:
__hlist_del include/linux/list.h:764 [inline]
detach_timer kernel/time/timer.c:815 [inline]
detach_if_pending+0xcd/0x2d0 kernel/time/timer.c:832
try_to_del_timer_sync+0x60/0xb0 kernel/time/timer.c:1226
del_timer_sync+0x6b/0xa0 kernel/time/timer.c:1365
schedule_timeout+0x2d2/0x6e0 kernel/time/timer.c:1896
rcu_gp_fqs_loop+0x37c/0x580 kernel/rcu/tree.c:1639
rcu_gp_kthread+0x143/0x230 kernel/rcu/tree.c:1799
kthread+0x1d4/0x200 drivers/block/aoe/aoecmd.c:1253
ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:352
read to 0xffff88808697d870 of 8 bytes by task 12060 on cpu 1:
del_timer+0x3b/0xb0 kernel/time/timer.c:1198
sk_stop_timer+0x25/0x60 net/core/sock.c:2845
inet_csk_clear_xmit_timers+0x69/0xa0 net/ipv4/inet_connection_sock.c:523
tcp_clear_xmit_timers include/net/tcp.h:606 [inline]
tcp_v4_destroy_sock+0xa3/0x3f0 net/ipv4/tcp_ipv4.c:2096
inet_csk_destroy_sock+0xf4/0x250 net/ipv4/inet_connection_sock.c:836
tcp_close+0x6f3/0x970 net/ipv4/tcp.c:2497
inet_release+0x86/0x100 net/ipv4/af_inet.c:427
__sock_release+0x85/0x160 net/socket.c:590
sock_close+0x24/0x30 net/socket.c:1268
__fput+0x1e1/0x520 fs/file_table.c:280
____fput+0x1f/0x30 fs/file_table.c:313
task_work_run+0xf6/0x130 kernel/task_work.c:113
tracehook_notify_resume include/linux/tracehook.h:188 [inline]
exit_to_usermode_loop+0x2b4/0x2c0 arch/x86/entry/common.c:163
Reported by Kernel Concurrency Sanitizer on:
CPU: 1 PID: 12060 Comm: syz-executor.5 Not tainted 5.4.0-rc3+ #0
Hardware name: Google Google Compute Engine/Google Compute Engine,
Signed-off-by: Eric Dumazet <[email protected]>
Cc: "Paul E. McKenney" <[email protected]>
Cc: Thomas Gleixner <[email protected]>
---
include/linux/timer.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 1e6650ed066d5d28251b0bd385fc37ef94c96532..0dc19a8c39c9e49a7cde3d34bfa4be8871cbc1c2 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -164,7 +164,7 @@ static inline void destroy_timer_on_stack(struct timer_list *timer) { }
*/
static inline int timer_pending(const struct timer_list * timer)
{
- return timer->entry.pprev != NULL;
+ return !hlist_unhashed_lockless(&timer->entry);
}
extern void add_timer_on(struct timer_list *timer, int cpu);
--
2.24.0.432.g9d3f5f5b63-goog
On Thu, Nov 07, 2019 at 11:37:37AM -0800, Eric Dumazet wrote:
> We would like to use hlist_unhashed() from timer_pending(),
> which runs without protection of a lock.
>
> Note that other callers might also want to use this variant.
>
> Instead of forcing a READ_ONCE() for all hlist_unhashed()
> callers, add a new helper with an explicit _lockless suffix
> in the name to better document what is going on.
>
> Also add various WRITE_ONCE() in __hlist_del(), hlist_add_head()
> and hlist_add_before()/hlist_add_behind() to pair with
> the READ_ONCE().
>
> Signed-off-by: Eric Dumazet <[email protected]>
> Cc: "Paul E. McKenney" <[email protected]>
> Cc: Thomas Gleixner <[email protected]>
I have queued this, but if you prefer it go some other way:
Acked-by: Paul E. McKenney <[email protected]>
But shouldn't the uses in include/linux/rculist.h also be converted
into the patch below? If so, I will squash the following into your
patch.
Thanx, Paul
------------------------------------------------------------------------
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 61c6728a..4b7ae1b 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -173,7 +173,7 @@ static inline void hlist_del_init_rcu(struct hlist_node *n)
{
if (!hlist_unhashed(n)) {
__hlist_del(n);
- n->pprev = NULL;
+ WRITE_ONCE(n->pprev, NULL);
}
}
@@ -473,7 +473,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
static inline void hlist_del_rcu(struct hlist_node *n)
{
__hlist_del(n);
- n->pprev = LIST_POISON2;
+ WRITE_ONCE(n->pprev, LIST_POISON2);
}
/**
@@ -489,11 +489,11 @@ static inline void hlist_replace_rcu(struct hlist_node *old,
struct hlist_node *next = old->next;
new->next = next;
- new->pprev = old->pprev;
+ WRITE_ONCE(new->pprev, old->pprev);
rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new);
if (next)
- new->next->pprev = &new->next;
- old->pprev = LIST_POISON2;
+ WRITE_ONCE(new->next->pprev, &new->next);
+ WRITE_ONCE(old->pprev, LIST_POISON2);
}
/*
@@ -528,10 +528,10 @@ static inline void hlist_add_head_rcu(struct hlist_node *n,
struct hlist_node *first = h->first;
n->next = first;
- n->pprev = &h->first;
+ WRITE_ONCE(n->pprev, &h->first);
rcu_assign_pointer(hlist_first_rcu(h), n);
if (first)
- first->pprev = &n->next;
+ WRITE_ONCE(first->pprev, &n->next);
}
/**
@@ -564,7 +564,7 @@ static inline void hlist_add_tail_rcu(struct hlist_node *n,
if (last) {
n->next = last->next;
- n->pprev = &last->next;
+ WRITE_ONCE(n->pprev, &last->next);
rcu_assign_pointer(hlist_next_rcu(last), n);
} else {
hlist_add_head_rcu(n, h);
@@ -592,10 +592,10 @@ static inline void hlist_add_tail_rcu(struct hlist_node *n,
static inline void hlist_add_before_rcu(struct hlist_node *n,
struct hlist_node *next)
{
- n->pprev = next->pprev;
+ WRITE_ONCE(n->pprev, next->pprev);
n->next = next;
rcu_assign_pointer(hlist_pprev_rcu(n), n);
- next->pprev = &n->next;
+ WRITE_ONCE(next->pprev, &n->next);
}
/**
@@ -620,10 +620,10 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
struct hlist_node *prev)
{
n->next = prev->next;
- n->pprev = &prev->next;
+ WRITE_ONCE(n->pprev, &prev->next);
rcu_assign_pointer(hlist_next_rcu(prev), n);
if (n->next)
- n->next->pprev = &n->next;
+ WRITE_ONCE(n->next->pprev, &n->next);
}
#define __hlist_for_each_rcu(pos, head) \
On Thu, Nov 07, 2019 at 11:37:38AM -0800, Eric Dumazet wrote:
> timer_pending() is mostly used in lockless contexts.
>
> Without proper annotations, KCSAN might detect a data-race [1]
>
> Using hlist_unhashed_lockless() instead of hand-coding it
> seems appropriate (as suggested by Paul E. McKenney).
>
> [1]
>
> BUG: KCSAN: data-race in del_timer / detach_if_pending
>
> write to 0xffff88808697d870 of 8 bytes by task 10 on cpu 0:
> __hlist_del include/linux/list.h:764 [inline]
> detach_timer kernel/time/timer.c:815 [inline]
> detach_if_pending+0xcd/0x2d0 kernel/time/timer.c:832
> try_to_del_timer_sync+0x60/0xb0 kernel/time/timer.c:1226
> del_timer_sync+0x6b/0xa0 kernel/time/timer.c:1365
> schedule_timeout+0x2d2/0x6e0 kernel/time/timer.c:1896
> rcu_gp_fqs_loop+0x37c/0x580 kernel/rcu/tree.c:1639
> rcu_gp_kthread+0x143/0x230 kernel/rcu/tree.c:1799
> kthread+0x1d4/0x200 drivers/block/aoe/aoecmd.c:1253
> ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:352
>
> read to 0xffff88808697d870 of 8 bytes by task 12060 on cpu 1:
> del_timer+0x3b/0xb0 kernel/time/timer.c:1198
> sk_stop_timer+0x25/0x60 net/core/sock.c:2845
> inet_csk_clear_xmit_timers+0x69/0xa0 net/ipv4/inet_connection_sock.c:523
> tcp_clear_xmit_timers include/net/tcp.h:606 [inline]
> tcp_v4_destroy_sock+0xa3/0x3f0 net/ipv4/tcp_ipv4.c:2096
> inet_csk_destroy_sock+0xf4/0x250 net/ipv4/inet_connection_sock.c:836
> tcp_close+0x6f3/0x970 net/ipv4/tcp.c:2497
> inet_release+0x86/0x100 net/ipv4/af_inet.c:427
> __sock_release+0x85/0x160 net/socket.c:590
> sock_close+0x24/0x30 net/socket.c:1268
> __fput+0x1e1/0x520 fs/file_table.c:280
> ____fput+0x1f/0x30 fs/file_table.c:313
> task_work_run+0xf6/0x130 kernel/task_work.c:113
> tracehook_notify_resume include/linux/tracehook.h:188 [inline]
> exit_to_usermode_loop+0x2b4/0x2c0 arch/x86/entry/common.c:163
>
> Reported by Kernel Concurrency Sanitizer on:
> CPU: 1 PID: 12060 Comm: syz-executor.5 Not tainted 5.4.0-rc3+ #0
> Hardware name: Google Google Compute Engine/Google Compute Engine,
>
> Signed-off-by: Eric Dumazet <[email protected]>
> Cc: "Paul E. McKenney" <[email protected]>
> Cc: Thomas Gleixner <[email protected]>
And I queued this one as well, but again if you would prefer it go
up elsewhere, for whatever it is worth:
Acked-by: Paul E. McKenney <[email protected]>
> ---
> include/linux/timer.h | 2 +-
> 1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/include/linux/timer.h b/include/linux/timer.h
> index 1e6650ed066d5d28251b0bd385fc37ef94c96532..0dc19a8c39c9e49a7cde3d34bfa4be8871cbc1c2 100644
> --- a/include/linux/timer.h
> +++ b/include/linux/timer.h
> @@ -164,7 +164,7 @@ static inline void destroy_timer_on_stack(struct timer_list *timer) { }
> */
> static inline int timer_pending(const struct timer_list * timer)
> {
> - return timer->entry.pprev != NULL;
> + return !hlist_unhashed_lockless(&timer->entry);
> }
>
> extern void add_timer_on(struct timer_list *timer, int cpu);
> --
> 2.24.0.432.g9d3f5f5b63-goog
>
On Fri, Nov 8, 2019 at 11:24 AM Paul E. McKenney <[email protected]> wrote:
>
> On Thu, Nov 07, 2019 at 11:37:37AM -0800, Eric Dumazet wrote:
> > We would like to use hlist_unhashed() from timer_pending(),
> > which runs without protection of a lock.
> >
> > Note that other callers might also want to use this variant.
> >
> > Instead of forcing a READ_ONCE() for all hlist_unhashed()
> > callers, add a new helper with an explicit _lockless suffix
> > in the name to better document what is going on.
> >
> > Also add various WRITE_ONCE() in __hlist_del(), hlist_add_head()
> > and hlist_add_before()/hlist_add_behind() to pair with
> > the READ_ONCE().
> >
> > Signed-off-by: Eric Dumazet <[email protected]>
> > Cc: "Paul E. McKenney" <[email protected]>
> > Cc: Thomas Gleixner <[email protected]>
>
> I have queued this, but if you prefer it go some other way:
>
> Acked-by: Paul E. McKenney <[email protected]>
>
> But shouldn't the uses in include/linux/rculist.h also be converted
> into the patch below? If so, I will squash the following into your
> patch.
>
> Thanx, Paul
>
> ------------------------------------------------------------------------
Agreed, thanks for the addition of this Paul.
On Fri, Nov 08, 2019 at 12:17:49PM -0800, Eric Dumazet wrote:
> On Fri, Nov 8, 2019 at 11:24 AM Paul E. McKenney <[email protected]> wrote:
> >
> > On Thu, Nov 07, 2019 at 11:37:37AM -0800, Eric Dumazet wrote:
> > > We would like to use hlist_unhashed() from timer_pending(),
> > > which runs without protection of a lock.
> > >
> > > Note that other callers might also want to use this variant.
> > >
> > > Instead of forcing a READ_ONCE() for all hlist_unhashed()
> > > callers, add a new helper with an explicit _lockless suffix
> > > in the name to better document what is going on.
> > >
> > > Also add various WRITE_ONCE() in __hlist_del(), hlist_add_head()
> > > and hlist_add_before()/hlist_add_behind() to pair with
> > > the READ_ONCE().
> > >
> > > Signed-off-by: Eric Dumazet <[email protected]>
> > > Cc: "Paul E. McKenney" <[email protected]>
> > > Cc: Thomas Gleixner <[email protected]>
> >
> > I have queued this, but if you prefer it go some other way:
> >
> > Acked-by: Paul E. McKenney <[email protected]>
> >
> > But shouldn't the uses in include/linux/rculist.h also be converted
> > into the patch below? If so, I will squash the following into your
> > patch.
> >
> > Thanx, Paul
> >
> > ------------------------------------------------------------------------
>
> Agreed, thanks for the addition of this Paul.
Very good, squashed and pushed, thank you!
Thanx, Paul
On Fri, Nov 8, 2019 at 3:42 PM Paul E. McKenney <[email protected]> wrote:
>
> On Fri, Nov 08, 2019 at 12:17:49PM -0800, Eric Dumazet wrote:
> > On Fri, Nov 8, 2019 at 11:24 AM Paul E. McKenney <[email protected]> wrote:
> > >
> > > On Thu, Nov 07, 2019 at 11:37:37AM -0800, Eric Dumazet wrote:
> > > > We would like to use hlist_unhashed() from timer_pending(),
> > > > which runs without protection of a lock.
> > > >
> > > > Note that other callers might also want to use this variant.
> > > >
> > > > Instead of forcing a READ_ONCE() for all hlist_unhashed()
> > > > callers, add a new helper with an explicit _lockless suffix
> > > > in the name to better document what is going on.
> > > >
> > > > Also add various WRITE_ONCE() in __hlist_del(), hlist_add_head()
> > > > and hlist_add_before()/hlist_add_behind() to pair with
> > > > the READ_ONCE().
> > > >
> > > > Signed-off-by: Eric Dumazet <[email protected]>
> > > > Cc: "Paul E. McKenney" <[email protected]>
> > > > Cc: Thomas Gleixner <[email protected]>
> > >
> > > I have queued this, but if you prefer it go some other way:
> > >
> > > Acked-by: Paul E. McKenney <[email protected]>
> > >
> > > But shouldn't the uses in include/linux/rculist.h also be converted
> > > into the patch below? If so, I will squash the following into your
> > > patch.
> > >
> > > Thanx, Paul
> > >
> > > ------------------------------------------------------------------------
> >
> > Agreed, thanks for the addition of this Paul.
>
> Very good, squashed and pushed, thank you!
>
I have another KCSAN report of a bug that will force us to use
hlist_unhashed_lockless() from sk_unhashed()
(Meaning we also need to add some WRITE_ONCE() annotations to
include/linux/list_nulls.h )
BUG: KCSAN: data-race in inet_unhash / inet_unhash
write to 0xffff8880a69a0170 of 8 bytes by interrupt on cpu 1:
__hlist_nulls_del include/linux/list_nulls.h:88 [inline]
hlist_nulls_del_init_rcu include/linux/rculist_nulls.h:36 [inline]
__sk_nulls_del_node_init_rcu include/net/sock.h:676 [inline]
inet_unhash+0x38f/0x4a0 net/ipv4/inet_hashtables.c:612
tcp_set_state+0xfa/0x3e0 net/ipv4/tcp.c:2249
tcp_done+0x93/0x1e0 net/ipv4/tcp.c:3854
tcp_write_err+0x7e/0xc0 net/ipv4/tcp_timer.c:56
tcp_retransmit_timer+0x9b8/0x16d0 net/ipv4/tcp_timer.c:479
tcp_write_timer_handler+0x42d/0x510 net/ipv4/tcp_timer.c:599
tcp_write_timer+0xd1/0xf0 net/ipv4/tcp_timer.c:619
call_timer_fn+0x5f/0x2f0 kernel/time/timer.c:1404
expire_timers kernel/time/timer.c:1449 [inline]
__run_timers kernel/time/timer.c:1773 [inline]
__run_timers kernel/time/timer.c:1740 [inline]
run_timer_softirq+0xc0c/0xcd0 kernel/time/timer.c:1786
__do_softirq+0x115/0x33f kernel/softirq.c:292
invoke_softirq kernel/softirq.c:373 [inline]
irq_exit+0xbb/0xe0 kernel/softirq.c:413
exiting_irq arch/x86/include/asm/apic.h:536 [inline]
smp_apic_timer_interrupt+0xe6/0x280 arch/x86/kernel/apic/apic.c:1137
apic_timer_interrupt+0xf/0x20 arch/x86/entry/entry_64.S:830
native_safe_halt+0xe/0x10 arch/x86/kernel/paravirt.c:71
arch_cpu_idle+0x1f/0x30 arch/x86/kernel/process.c:571
default_idle_call+0x1e/0x40 kernel/sched/idle.c:94
cpuidle_idle_call kernel/sched/idle.c:154 [inline]
do_idle+0x1af/0x280 kernel/sched/idle.c:263
cpu_startup_entry+0x1b/0x20 kernel/sched/idle.c:355
start_secondary+0x208/0x260 arch/x86/kernel/smpboot.c:264
secondary_startup_64+0xa4/0xb0 arch/x86/kernel/head_64.S:241
read to 0xffff8880a69a0170 of 8 bytes by interrupt on cpu 0:
sk_unhashed include/net/sock.h:607 [inline]
inet_unhash+0x3d/0x4a0 net/ipv4/inet_hashtables.c:592
tcp_set_state+0xfa/0x3e0 net/ipv4/tcp.c:2249
tcp_done+0x93/0x1e0 net/ipv4/tcp.c:3854
tcp_write_err+0x7e/0xc0 net/ipv4/tcp_timer.c:56
tcp_retransmit_timer+0x9b8/0x16d0 net/ipv4/tcp_timer.c:479
tcp_write_timer_handler+0x42d/0x510 net/ipv4/tcp_timer.c:599
tcp_write_timer+0xd1/0xf0 net/ipv4/tcp_timer.c:619
call_timer_fn+0x5f/0x2f0 kernel/time/timer.c:1404
expire_timers kernel/time/timer.c:1449 [inline]
__run_timers kernel/time/timer.c:1773 [inline]
__run_timers kernel/time/timer.c:1740 [inline]
run_timer_softirq+0xc0c/0xcd0 kernel/time/timer.c:1786
__do_softirq+0x115/0x33f kernel/softirq.c:292
invoke_softirq kernel/softirq.c:373 [inline]
irq_exit+0xbb/0xe0 kernel/softirq.c:413
exiting_irq arch/x86/include/asm/apic.h:536 [inline]
smp_apic_timer_interrupt+0xe6/0x280 arch/x86/kernel/apic/apic.c:1137
apic_timer_interrupt+0xf/0x20 arch/x86/entry/entry_64.S:830
native_safe_halt+0xe/0x10 arch/x86/kernel/paravirt.c:71
arch_cpu_idle+0x1f/0x30 arch/x86/kernel/process.c:571
default_idle_call+0x1e/0x40 kernel/sched/idle.c:94
cpuidle_idle_call kernel/sched/idle.c:154 [inline]
do_idle+0x1af/0x280 kernel/sched/idle.c:263
cpu_startup_entry+0x1b/0x20 kernel/sched/idle.c:355
rest_init+0xec/0xf6 init/main.c:452
arch_call_rest_init+0x17/0x37
start_kernel+0x838/0x85e init/main.c:786
x86_64_start_reservations+0x29/0x2b arch/x86/kernel/head64.c:490
x86_64_start_kernel+0x72/0x76 arch/x86/kernel/head64.c:471
secondary_startup_64+0xa4/0xb0 arch/x86/kernel/head_64.S:241
Reported by Kernel Concurrency Sanitizer on:
CPU: 0 PID: 0 Comm: swapper/0 Not tainted 5.4.0-rc6+ #0
Hardware name: Google Google Compute Engine/Google Compute Engine,
BIOS Google 01/01/2011
On Fri, Nov 08, 2019 at 07:15:16PM -0800, Eric Dumazet wrote:
> On Fri, Nov 8, 2019 at 3:42 PM Paul E. McKenney <[email protected]> wrote:
> >
> > On Fri, Nov 08, 2019 at 12:17:49PM -0800, Eric Dumazet wrote:
> > > On Fri, Nov 8, 2019 at 11:24 AM Paul E. McKenney <[email protected]> wrote:
> > > >
> > > > On Thu, Nov 07, 2019 at 11:37:37AM -0800, Eric Dumazet wrote:
> > > > > We would like to use hlist_unhashed() from timer_pending(),
> > > > > which runs without protection of a lock.
> > > > >
> > > > > Note that other callers might also want to use this variant.
> > > > >
> > > > > Instead of forcing a READ_ONCE() for all hlist_unhashed()
> > > > > callers, add a new helper with an explicit _lockless suffix
> > > > > in the name to better document what is going on.
> > > > >
> > > > > Also add various WRITE_ONCE() in __hlist_del(), hlist_add_head()
> > > > > and hlist_add_before()/hlist_add_behind() to pair with
> > > > > the READ_ONCE().
> > > > >
> > > > > Signed-off-by: Eric Dumazet <[email protected]>
> > > > > Cc: "Paul E. McKenney" <[email protected]>
> > > > > Cc: Thomas Gleixner <[email protected]>
> > > >
> > > > I have queued this, but if you prefer it go some other way:
> > > >
> > > > Acked-by: Paul E. McKenney <[email protected]>
> > > >
> > > > But shouldn't the uses in include/linux/rculist.h also be converted
> > > > into the patch below? If so, I will squash the following into your
> > > > patch.
> > > >
> > > > Thanx, Paul
> > > >
> > > > ------------------------------------------------------------------------
> > >
> > > Agreed, thanks for the addition of this Paul.
> >
> > Very good, squashed and pushed, thank you!
> >
>
> I have another KCSAN report of a bug that will force us to use
> hlist_unhashed_lockless() from sk_unhashed()
>
> (Meaning we also need to add some WRITE_ONCE() annotations to
> include/linux/list_nulls.h )
>
> BUG: KCSAN: data-race in inet_unhash / inet_unhash
>
> write to 0xffff8880a69a0170 of 8 bytes by interrupt on cpu 1:
> __hlist_nulls_del include/linux/list_nulls.h:88 [inline]
> hlist_nulls_del_init_rcu include/linux/rculist_nulls.h:36 [inline]
> __sk_nulls_del_node_init_rcu include/net/sock.h:676 [inline]
> inet_unhash+0x38f/0x4a0 net/ipv4/inet_hashtables.c:612
> tcp_set_state+0xfa/0x3e0 net/ipv4/tcp.c:2249
> tcp_done+0x93/0x1e0 net/ipv4/tcp.c:3854
> tcp_write_err+0x7e/0xc0 net/ipv4/tcp_timer.c:56
> tcp_retransmit_timer+0x9b8/0x16d0 net/ipv4/tcp_timer.c:479
> tcp_write_timer_handler+0x42d/0x510 net/ipv4/tcp_timer.c:599
> tcp_write_timer+0xd1/0xf0 net/ipv4/tcp_timer.c:619
> call_timer_fn+0x5f/0x2f0 kernel/time/timer.c:1404
> expire_timers kernel/time/timer.c:1449 [inline]
> __run_timers kernel/time/timer.c:1773 [inline]
> __run_timers kernel/time/timer.c:1740 [inline]
> run_timer_softirq+0xc0c/0xcd0 kernel/time/timer.c:1786
> __do_softirq+0x115/0x33f kernel/softirq.c:292
> invoke_softirq kernel/softirq.c:373 [inline]
> irq_exit+0xbb/0xe0 kernel/softirq.c:413
> exiting_irq arch/x86/include/asm/apic.h:536 [inline]
> smp_apic_timer_interrupt+0xe6/0x280 arch/x86/kernel/apic/apic.c:1137
> apic_timer_interrupt+0xf/0x20 arch/x86/entry/entry_64.S:830
> native_safe_halt+0xe/0x10 arch/x86/kernel/paravirt.c:71
> arch_cpu_idle+0x1f/0x30 arch/x86/kernel/process.c:571
> default_idle_call+0x1e/0x40 kernel/sched/idle.c:94
> cpuidle_idle_call kernel/sched/idle.c:154 [inline]
> do_idle+0x1af/0x280 kernel/sched/idle.c:263
> cpu_startup_entry+0x1b/0x20 kernel/sched/idle.c:355
> start_secondary+0x208/0x260 arch/x86/kernel/smpboot.c:264
> secondary_startup_64+0xa4/0xb0 arch/x86/kernel/head_64.S:241
>
> read to 0xffff8880a69a0170 of 8 bytes by interrupt on cpu 0:
> sk_unhashed include/net/sock.h:607 [inline]
> inet_unhash+0x3d/0x4a0 net/ipv4/inet_hashtables.c:592
> tcp_set_state+0xfa/0x3e0 net/ipv4/tcp.c:2249
> tcp_done+0x93/0x1e0 net/ipv4/tcp.c:3854
> tcp_write_err+0x7e/0xc0 net/ipv4/tcp_timer.c:56
> tcp_retransmit_timer+0x9b8/0x16d0 net/ipv4/tcp_timer.c:479
> tcp_write_timer_handler+0x42d/0x510 net/ipv4/tcp_timer.c:599
> tcp_write_timer+0xd1/0xf0 net/ipv4/tcp_timer.c:619
> call_timer_fn+0x5f/0x2f0 kernel/time/timer.c:1404
> expire_timers kernel/time/timer.c:1449 [inline]
> __run_timers kernel/time/timer.c:1773 [inline]
> __run_timers kernel/time/timer.c:1740 [inline]
> run_timer_softirq+0xc0c/0xcd0 kernel/time/timer.c:1786
> __do_softirq+0x115/0x33f kernel/softirq.c:292
> invoke_softirq kernel/softirq.c:373 [inline]
> irq_exit+0xbb/0xe0 kernel/softirq.c:413
> exiting_irq arch/x86/include/asm/apic.h:536 [inline]
> smp_apic_timer_interrupt+0xe6/0x280 arch/x86/kernel/apic/apic.c:1137
> apic_timer_interrupt+0xf/0x20 arch/x86/entry/entry_64.S:830
> native_safe_halt+0xe/0x10 arch/x86/kernel/paravirt.c:71
> arch_cpu_idle+0x1f/0x30 arch/x86/kernel/process.c:571
> default_idle_call+0x1e/0x40 kernel/sched/idle.c:94
> cpuidle_idle_call kernel/sched/idle.c:154 [inline]
> do_idle+0x1af/0x280 kernel/sched/idle.c:263
> cpu_startup_entry+0x1b/0x20 kernel/sched/idle.c:355
> rest_init+0xec/0xf6 init/main.c:452
> arch_call_rest_init+0x17/0x37
> start_kernel+0x838/0x85e init/main.c:786
> x86_64_start_reservations+0x29/0x2b arch/x86/kernel/head64.c:490
> x86_64_start_kernel+0x72/0x76 arch/x86/kernel/head64.c:471
> secondary_startup_64+0xa4/0xb0 arch/x86/kernel/head_64.S:241
>
> Reported by Kernel Concurrency Sanitizer on:
> CPU: 0 PID: 0 Comm: swapper/0 Not tainted 5.4.0-rc6+ #0
> Hardware name: Google Google Compute Engine/Google Compute Engine,
> BIOS Google 01/01/2011
Like this?
Thanx, Paul
------------------------------------------------------------------------
commit fef2da9c0cfa4f9ec405ff059fceb00d29de34dc
Author: Paul E. McKenney <[email protected]>
Date: Sat Nov 9 09:42:13 2019 -0800
rcu: Use WRITE_ONCE() for assignments to ->pprev for hlist_nulls
Eric Dumazet supplied a KCSAN report of a bug that forces use
of hlist_unhashed_lockless() from sk_unhashed():
------------------------------------------------------------------------
BUG: KCSAN: data-race in inet_unhash / inet_unhash
write to 0xffff8880a69a0170 of 8 bytes by interrupt on cpu 1:
__hlist_nulls_del include/linux/list_nulls.h:88 [inline]
hlist_nulls_del_init_rcu include/linux/rculist_nulls.h:36 [inline]
__sk_nulls_del_node_init_rcu include/net/sock.h:676 [inline]
inet_unhash+0x38f/0x4a0 net/ipv4/inet_hashtables.c:612
tcp_set_state+0xfa/0x3e0 net/ipv4/tcp.c:2249
tcp_done+0x93/0x1e0 net/ipv4/tcp.c:3854
tcp_write_err+0x7e/0xc0 net/ipv4/tcp_timer.c:56
tcp_retransmit_timer+0x9b8/0x16d0 net/ipv4/tcp_timer.c:479
tcp_write_timer_handler+0x42d/0x510 net/ipv4/tcp_timer.c:599
tcp_write_timer+0xd1/0xf0 net/ipv4/tcp_timer.c:619
call_timer_fn+0x5f/0x2f0 kernel/time/timer.c:1404
expire_timers kernel/time/timer.c:1449 [inline]
__run_timers kernel/time/timer.c:1773 [inline]
__run_timers kernel/time/timer.c:1740 [inline]
run_timer_softirq+0xc0c/0xcd0 kernel/time/timer.c:1786
__do_softirq+0x115/0x33f kernel/softirq.c:292
invoke_softirq kernel/softirq.c:373 [inline]
irq_exit+0xbb/0xe0 kernel/softirq.c:413
exiting_irq arch/x86/include/asm/apic.h:536 [inline]
smp_apic_timer_interrupt+0xe6/0x280 arch/x86/kernel/apic/apic.c:1137
apic_timer_interrupt+0xf/0x20 arch/x86/entry/entry_64.S:830
native_safe_halt+0xe/0x10 arch/x86/kernel/paravirt.c:71
arch_cpu_idle+0x1f/0x30 arch/x86/kernel/process.c:571
default_idle_call+0x1e/0x40 kernel/sched/idle.c:94
cpuidle_idle_call kernel/sched/idle.c:154 [inline]
do_idle+0x1af/0x280 kernel/sched/idle.c:263
cpu_startup_entry+0x1b/0x20 kernel/sched/idle.c:355
start_secondary+0x208/0x260 arch/x86/kernel/smpboot.c:264
secondary_startup_64+0xa4/0xb0 arch/x86/kernel/head_64.S:241
read to 0xffff8880a69a0170 of 8 bytes by interrupt on cpu 0:
sk_unhashed include/net/sock.h:607 [inline]
inet_unhash+0x3d/0x4a0 net/ipv4/inet_hashtables.c:592
tcp_set_state+0xfa/0x3e0 net/ipv4/tcp.c:2249
tcp_done+0x93/0x1e0 net/ipv4/tcp.c:3854
tcp_write_err+0x7e/0xc0 net/ipv4/tcp_timer.c:56
tcp_retransmit_timer+0x9b8/0x16d0 net/ipv4/tcp_timer.c:479
tcp_write_timer_handler+0x42d/0x510 net/ipv4/tcp_timer.c:599
tcp_write_timer+0xd1/0xf0 net/ipv4/tcp_timer.c:619
call_timer_fn+0x5f/0x2f0 kernel/time/timer.c:1404
expire_timers kernel/time/timer.c:1449 [inline]
__run_timers kernel/time/timer.c:1773 [inline]
__run_timers kernel/time/timer.c:1740 [inline]
run_timer_softirq+0xc0c/0xcd0 kernel/time/timer.c:1786
__do_softirq+0x115/0x33f kernel/softirq.c:292
invoke_softirq kernel/softirq.c:373 [inline]
irq_exit+0xbb/0xe0 kernel/softirq.c:413
exiting_irq arch/x86/include/asm/apic.h:536 [inline]
smp_apic_timer_interrupt+0xe6/0x280 arch/x86/kernel/apic/apic.c:1137
apic_timer_interrupt+0xf/0x20 arch/x86/entry/entry_64.S:830
native_safe_halt+0xe/0x10 arch/x86/kernel/paravirt.c:71
arch_cpu_idle+0x1f/0x30 arch/x86/kernel/process.c:571
default_idle_call+0x1e/0x40 kernel/sched/idle.c:94
cpuidle_idle_call kernel/sched/idle.c:154 [inline]
do_idle+0x1af/0x280 kernel/sched/idle.c:263
cpu_startup_entry+0x1b/0x20 kernel/sched/idle.c:355
rest_init+0xec/0xf6 init/main.c:452
arch_call_rest_init+0x17/0x37
start_kernel+0x838/0x85e init/main.c:786
x86_64_start_reservations+0x29/0x2b arch/x86/kernel/head64.c:490
x86_64_start_kernel+0x72/0x76 arch/x86/kernel/head64.c:471
secondary_startup_64+0xa4/0xb0 arch/x86/kernel/head_64.S:241
Reported by Kernel Concurrency Sanitizer on:
CPU: 0 PID: 0 Comm: swapper/0 Not tainted 5.4.0-rc6+ #0
Hardware name: Google Google Compute Engine/Google Compute Engine,
BIOS Google 01/01/2011
------------------------------------------------------------------------
This commit therefore replaces C-language assignments with WRITE_ONCE()
in include/linux/list_nulls.h and include/linux/rculist_nulls.h.
Reported-by: Eric Dumazet <[email protected]> # For KCSAN
Signed-off-by: Paul E. McKenney <[email protected]>
diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h
index 3ef9674..1ecd356 100644
--- a/include/linux/list_nulls.h
+++ b/include/linux/list_nulls.h
@@ -72,10 +72,10 @@ static inline void hlist_nulls_add_head(struct hlist_nulls_node *n,
struct hlist_nulls_node *first = h->first;
n->next = first;
- n->pprev = &h->first;
+ WRITE_ONCE(n->pprev, &h->first);
h->first = n;
if (!is_a_nulls(first))
- first->pprev = &n->next;
+ WRITE_ONCE(first->pprev, &n->next);
}
static inline void __hlist_nulls_del(struct hlist_nulls_node *n)
@@ -85,13 +85,13 @@ static inline void __hlist_nulls_del(struct hlist_nulls_node *n)
WRITE_ONCE(*pprev, next);
if (!is_a_nulls(next))
- next->pprev = pprev;
+ WRITE_ONCE(next->pprev, pprev);
}
static inline void hlist_nulls_del(struct hlist_nulls_node *n)
{
__hlist_nulls_del(n);
- n->pprev = LIST_POISON2;
+ WRITE_ONCE(n->pprev, LIST_POISON2);
}
/**
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
index bc8206a..517a06f 100644
--- a/include/linux/rculist_nulls.h
+++ b/include/linux/rculist_nulls.h
@@ -34,7 +34,7 @@ static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n)
{
if (!hlist_nulls_unhashed(n)) {
__hlist_nulls_del(n);
- n->pprev = NULL;
+ WRITE_ONCE(n->pprev, NULL);
}
}
@@ -66,7 +66,7 @@ static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n)
static inline void hlist_nulls_del_rcu(struct hlist_nulls_node *n)
{
__hlist_nulls_del(n);
- n->pprev = LIST_POISON2;
+ WRITE_ONCE(n->pprev, LIST_POISON2);
}
/**
@@ -94,10 +94,10 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
struct hlist_nulls_node *first = h->first;
n->next = first;
- n->pprev = &h->first;
+ WRITE_ONCE(n->pprev, &h->first);
rcu_assign_pointer(hlist_nulls_first_rcu(h), n);
if (!is_a_nulls(first))
- first->pprev = &n->next;
+ WRITE_ONCE(first->pprev, &n->next);
}
/**
On Sat, Nov 09, 2019 at 09:54:40AM -0800, Paul E. McKenney wrote:
> On Fri, Nov 08, 2019 at 07:15:16PM -0800, Eric Dumazet wrote:
> > On Fri, Nov 8, 2019 at 3:42 PM Paul E. McKenney <[email protected]> wrote:
> > >
> > > On Fri, Nov 08, 2019 at 12:17:49PM -0800, Eric Dumazet wrote:
> > > > On Fri, Nov 8, 2019 at 11:24 AM Paul E. McKenney <[email protected]> wrote:
> > > > >
> > > > > On Thu, Nov 07, 2019 at 11:37:37AM -0800, Eric Dumazet wrote:
> > > > > > We would like to use hlist_unhashed() from timer_pending(),
> > > > > > which runs without protection of a lock.
> > > > > >
> > > > > > Note that other callers might also want to use this variant.
> > > > > >
> > > > > > Instead of forcing a READ_ONCE() for all hlist_unhashed()
> > > > > > callers, add a new helper with an explicit _lockless suffix
> > > > > > in the name to better document what is going on.
> > > > > >
> > > > > > Also add various WRITE_ONCE() in __hlist_del(), hlist_add_head()
> > > > > > and hlist_add_before()/hlist_add_behind() to pair with
> > > > > > the READ_ONCE().
> > > > > >
> > > > > > Signed-off-by: Eric Dumazet <[email protected]>
> > > > > > Cc: "Paul E. McKenney" <[email protected]>
> > > > > > Cc: Thomas Gleixner <[email protected]>
> > > > >
> > > > > I have queued this, but if you prefer it go some other way:
> > > > >
> > > > > Acked-by: Paul E. McKenney <[email protected]>
> > > > >
> > > > > But shouldn't the uses in include/linux/rculist.h also be converted
> > > > > into the patch below? If so, I will squash the following into your
> > > > > patch.
> > > > >
> > > > > Thanx, Paul
> > > > >
> > > > > ------------------------------------------------------------------------
> > > >
> > > > Agreed, thanks for the addition of this Paul.
> > >
> > > Very good, squashed and pushed, thank you!
> > >
> >
> > I have another KCSAN report of a bug that will force us to use
> > hlist_unhashed_lockless() from sk_unhashed()
> >
> > (Meaning we also need to add some WRITE_ONCE() annotations to
> > include/linux/list_nulls.h )
> >
> > BUG: KCSAN: data-race in inet_unhash / inet_unhash
> >
> > write to 0xffff8880a69a0170 of 8 bytes by interrupt on cpu 1:
> > __hlist_nulls_del include/linux/list_nulls.h:88 [inline]
> > hlist_nulls_del_init_rcu include/linux/rculist_nulls.h:36 [inline]
> > __sk_nulls_del_node_init_rcu include/net/sock.h:676 [inline]
> > inet_unhash+0x38f/0x4a0 net/ipv4/inet_hashtables.c:612
> > tcp_set_state+0xfa/0x3e0 net/ipv4/tcp.c:2249
> > tcp_done+0x93/0x1e0 net/ipv4/tcp.c:3854
> > tcp_write_err+0x7e/0xc0 net/ipv4/tcp_timer.c:56
> > tcp_retransmit_timer+0x9b8/0x16d0 net/ipv4/tcp_timer.c:479
> > tcp_write_timer_handler+0x42d/0x510 net/ipv4/tcp_timer.c:599
> > tcp_write_timer+0xd1/0xf0 net/ipv4/tcp_timer.c:619
> > call_timer_fn+0x5f/0x2f0 kernel/time/timer.c:1404
> > expire_timers kernel/time/timer.c:1449 [inline]
> > __run_timers kernel/time/timer.c:1773 [inline]
> > __run_timers kernel/time/timer.c:1740 [inline]
> > run_timer_softirq+0xc0c/0xcd0 kernel/time/timer.c:1786
> > __do_softirq+0x115/0x33f kernel/softirq.c:292
> > invoke_softirq kernel/softirq.c:373 [inline]
> > irq_exit+0xbb/0xe0 kernel/softirq.c:413
> > exiting_irq arch/x86/include/asm/apic.h:536 [inline]
> > smp_apic_timer_interrupt+0xe6/0x280 arch/x86/kernel/apic/apic.c:1137
> > apic_timer_interrupt+0xf/0x20 arch/x86/entry/entry_64.S:830
> > native_safe_halt+0xe/0x10 arch/x86/kernel/paravirt.c:71
> > arch_cpu_idle+0x1f/0x30 arch/x86/kernel/process.c:571
> > default_idle_call+0x1e/0x40 kernel/sched/idle.c:94
> > cpuidle_idle_call kernel/sched/idle.c:154 [inline]
> > do_idle+0x1af/0x280 kernel/sched/idle.c:263
> > cpu_startup_entry+0x1b/0x20 kernel/sched/idle.c:355
> > start_secondary+0x208/0x260 arch/x86/kernel/smpboot.c:264
> > secondary_startup_64+0xa4/0xb0 arch/x86/kernel/head_64.S:241
> >
> > read to 0xffff8880a69a0170 of 8 bytes by interrupt on cpu 0:
> > sk_unhashed include/net/sock.h:607 [inline]
> > inet_unhash+0x3d/0x4a0 net/ipv4/inet_hashtables.c:592
> > tcp_set_state+0xfa/0x3e0 net/ipv4/tcp.c:2249
> > tcp_done+0x93/0x1e0 net/ipv4/tcp.c:3854
> > tcp_write_err+0x7e/0xc0 net/ipv4/tcp_timer.c:56
> > tcp_retransmit_timer+0x9b8/0x16d0 net/ipv4/tcp_timer.c:479
> > tcp_write_timer_handler+0x42d/0x510 net/ipv4/tcp_timer.c:599
> > tcp_write_timer+0xd1/0xf0 net/ipv4/tcp_timer.c:619
> > call_timer_fn+0x5f/0x2f0 kernel/time/timer.c:1404
> > expire_timers kernel/time/timer.c:1449 [inline]
> > __run_timers kernel/time/timer.c:1773 [inline]
> > __run_timers kernel/time/timer.c:1740 [inline]
> > run_timer_softirq+0xc0c/0xcd0 kernel/time/timer.c:1786
> > __do_softirq+0x115/0x33f kernel/softirq.c:292
> > invoke_softirq kernel/softirq.c:373 [inline]
> > irq_exit+0xbb/0xe0 kernel/softirq.c:413
> > exiting_irq arch/x86/include/asm/apic.h:536 [inline]
> > smp_apic_timer_interrupt+0xe6/0x280 arch/x86/kernel/apic/apic.c:1137
> > apic_timer_interrupt+0xf/0x20 arch/x86/entry/entry_64.S:830
> > native_safe_halt+0xe/0x10 arch/x86/kernel/paravirt.c:71
> > arch_cpu_idle+0x1f/0x30 arch/x86/kernel/process.c:571
> > default_idle_call+0x1e/0x40 kernel/sched/idle.c:94
> > cpuidle_idle_call kernel/sched/idle.c:154 [inline]
> > do_idle+0x1af/0x280 kernel/sched/idle.c:263
> > cpu_startup_entry+0x1b/0x20 kernel/sched/idle.c:355
> > rest_init+0xec/0xf6 init/main.c:452
> > arch_call_rest_init+0x17/0x37
> > start_kernel+0x838/0x85e init/main.c:786
> > x86_64_start_reservations+0x29/0x2b arch/x86/kernel/head64.c:490
> > x86_64_start_kernel+0x72/0x76 arch/x86/kernel/head64.c:471
> > secondary_startup_64+0xa4/0xb0 arch/x86/kernel/head_64.S:241
> >
> > Reported by Kernel Concurrency Sanitizer on:
> > CPU: 0 PID: 0 Comm: swapper/0 Not tainted 5.4.0-rc6+ #0
> > Hardware name: Google Google Compute Engine/Google Compute Engine,
> > BIOS Google 01/01/2011
>
> Like this?
Hmmm... Do you also need this?
Thanx, Paul
------------------------------------------------------------------------
commit cf78c8772c9dc26a36c0e5eae1262cc396bbfb3f
Author: Paul E. McKenney <[email protected]>
Date: Sat Nov 9 10:45:47 2019 -0800
rcu: Add a hlist_nulls_unhashed_lockless() function
This commit adds an hlist_nulls_unhashed_lockless() to allow lockless
checking for whether or note an hlist_nulls_node is hashed or not.
While in the area, this commit also adds a docbook comment to the existing
hlist_nulls_unhashed() function.
Signed-off-by: Paul E. McKenney <[email protected]>
diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h
index 1ecd356..fa6e847 100644
--- a/include/linux/list_nulls.h
+++ b/include/linux/list_nulls.h
@@ -56,11 +56,33 @@ static inline unsigned long get_nulls_value(const struct hlist_nulls_node *ptr)
return ((unsigned long)ptr) >> 1;
}
+/**
+ * hlist_nulls_unhashed - Has node been removed and reinitialized?
+ * @h: Node to be checked
+ *
+ * Not that not all removal functions will leave a node in unhashed state.
+ * For example, hlist_del_init_rcu() leaves the node in unhashed state,
+ * but hlist_nulls_del() does not.
+ */
static inline int hlist_nulls_unhashed(const struct hlist_nulls_node *h)
{
return !h->pprev;
}
+/**
+ * hlist_nulls_unhashed_lockless - Has node been removed and reinitialized?
+ * @h: Node to be checked
+ *
+ * Not that not all removal functions will leave a node in unhashed state.
+ * For example, hlist_del_init_rcu() leaves the node in unhashed state,
+ * but hlist_nulls_del() does not. Unlike hlist_nulls_unhashed(), this
+ * function may be used locklessly.
+ */
+static inline int hlist_nulls_unhashed_lockless(const struct hlist_nulls_node *h)
+{
+ return !READ_ONCE(h->pprev);
+}
+
static inline int hlist_nulls_empty(const struct hlist_nulls_head *h)
{
return is_a_nulls(READ_ONCE(h->first));