2019-10-09 16:00:58

by Marco Elver

[permalink] [raw]
Subject: [PATCH] rcu: Fix data-race due to atomic_t copy-by-value

This fixes a data-race where `atomic_t dynticks` is copied by value. The
copy is performed non-atomically, resulting in a data-race if `dynticks`
is updated concurrently.

This data-race was found with KCSAN:
==================================================================
BUG: KCSAN: data-race in dyntick_save_progress_counter / rcu_irq_enter

write to 0xffff989dbdbe98e0 of 4 bytes by task 10 on cpu 3:
atomic_add_return include/asm-generic/atomic-instrumented.h:78 [inline]
rcu_dynticks_snap kernel/rcu/tree.c:310 [inline]
dyntick_save_progress_counter+0x43/0x1b0 kernel/rcu/tree.c:984
force_qs_rnp+0x183/0x200 kernel/rcu/tree.c:2286
rcu_gp_fqs kernel/rcu/tree.c:1601 [inline]
rcu_gp_fqs_loop+0x71/0x880 kernel/rcu/tree.c:1653
rcu_gp_kthread+0x22c/0x3b0 kernel/rcu/tree.c:1799
kthread+0x1b5/0x200 kernel/kthread.c:255
<snip>

read to 0xffff989dbdbe98e0 of 4 bytes by task 154 on cpu 7:
rcu_nmi_enter_common kernel/rcu/tree.c:828 [inline]
rcu_irq_enter+0xda/0x240 kernel/rcu/tree.c:870
irq_enter+0x5/0x50 kernel/softirq.c:347
<snip>

Reported by Kernel Concurrency Sanitizer on:
CPU: 7 PID: 154 Comm: kworker/7:1H Not tainted 5.3.0+ #5
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-1 04/01/2014
Workqueue: kblockd blk_mq_run_work_fn
==================================================================

Signed-off-by: Marco Elver <[email protected]>
Cc: Paul E. McKenney <[email protected]>
Cc: Josh Triplett <[email protected]>
Cc: Steven Rostedt <[email protected]>
Cc: Mathieu Desnoyers <[email protected]>
Cc: Lai Jiangshan <[email protected]>
Cc: Joel Fernandes <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Dmitry Vyukov <[email protected]>
Cc: [email protected]
Cc: [email protected]
---
include/trace/events/rcu.h | 4 ++--
kernel/rcu/tree.c | 11 ++++++-----
2 files changed, 8 insertions(+), 7 deletions(-)

diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 694bd040cf51..fdd31c5fd126 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -442,7 +442,7 @@ TRACE_EVENT_RCU(rcu_fqs,
*/
TRACE_EVENT_RCU(rcu_dyntick,

- TP_PROTO(const char *polarity, long oldnesting, long newnesting, atomic_t dynticks),
+ TP_PROTO(const char *polarity, long oldnesting, long newnesting, int dynticks),

TP_ARGS(polarity, oldnesting, newnesting, dynticks),

@@ -457,7 +457,7 @@ TRACE_EVENT_RCU(rcu_dyntick,
__entry->polarity = polarity;
__entry->oldnesting = oldnesting;
__entry->newnesting = newnesting;
- __entry->dynticks = atomic_read(&dynticks);
+ __entry->dynticks = dynticks;
),

TP_printk("%s %lx %lx %#3x", __entry->polarity,
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 81105141b6a8..62e59596a30a 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -576,7 +576,7 @@ static void rcu_eqs_enter(bool user)
}

lockdep_assert_irqs_disabled();
- trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, rdp->dynticks);
+ trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks));
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
rdp = this_cpu_ptr(&rcu_data);
do_nocb_deferred_wakeup(rdp);
@@ -649,14 +649,15 @@ static __always_inline void rcu_nmi_exit_common(bool irq)
* leave it in non-RCU-idle state.
*/
if (rdp->dynticks_nmi_nesting != 1) {
- trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2, rdp->dynticks);
+ trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2,
+ atomic_read(&rdp->dynticks));
WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
rdp->dynticks_nmi_nesting - 2);
return;
}

/* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
- trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, rdp->dynticks);
+ trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */

if (irq)
@@ -743,7 +744,7 @@ static void rcu_eqs_exit(bool user)
rcu_dynticks_task_exit();
rcu_dynticks_eqs_exit();
rcu_cleanup_after_idle();
- trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, rdp->dynticks);
+ trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks));
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
WRITE_ONCE(rdp->dynticks_nesting, 1);
WARN_ON_ONCE(rdp->dynticks_nmi_nesting);
@@ -827,7 +828,7 @@ static __always_inline void rcu_nmi_enter_common(bool irq)
}
trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
rdp->dynticks_nmi_nesting,
- rdp->dynticks_nmi_nesting + incby, rdp->dynticks);
+ rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks));
WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */
rdp->dynticks_nmi_nesting + incby);
barrier();
--
2.23.0.581.g78d2f28ef7-goog


2019-10-10 03:09:24

by Paul E. McKenney

[permalink] [raw]
Subject: Re: [PATCH] rcu: Fix data-race due to atomic_t copy-by-value

On Wed, Oct 09, 2019 at 05:57:43PM +0200, Marco Elver wrote:
> This fixes a data-race where `atomic_t dynticks` is copied by value. The
> copy is performed non-atomically, resulting in a data-race if `dynticks`
> is updated concurrently.
>
> This data-race was found with KCSAN:
> ==================================================================
> BUG: KCSAN: data-race in dyntick_save_progress_counter / rcu_irq_enter
>
> write to 0xffff989dbdbe98e0 of 4 bytes by task 10 on cpu 3:
> atomic_add_return include/asm-generic/atomic-instrumented.h:78 [inline]
> rcu_dynticks_snap kernel/rcu/tree.c:310 [inline]
> dyntick_save_progress_counter+0x43/0x1b0 kernel/rcu/tree.c:984
> force_qs_rnp+0x183/0x200 kernel/rcu/tree.c:2286
> rcu_gp_fqs kernel/rcu/tree.c:1601 [inline]
> rcu_gp_fqs_loop+0x71/0x880 kernel/rcu/tree.c:1653
> rcu_gp_kthread+0x22c/0x3b0 kernel/rcu/tree.c:1799
> kthread+0x1b5/0x200 kernel/kthread.c:255
> <snip>
>
> read to 0xffff989dbdbe98e0 of 4 bytes by task 154 on cpu 7:
> rcu_nmi_enter_common kernel/rcu/tree.c:828 [inline]
> rcu_irq_enter+0xda/0x240 kernel/rcu/tree.c:870
> irq_enter+0x5/0x50 kernel/softirq.c:347
> <snip>
>
> Reported by Kernel Concurrency Sanitizer on:
> CPU: 7 PID: 154 Comm: kworker/7:1H Not tainted 5.3.0+ #5
> Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-1 04/01/2014
> Workqueue: kblockd blk_mq_run_work_fn
> ==================================================================
>
> Signed-off-by: Marco Elver <[email protected]>

Good catch, queued for review and testing.

Thanx, Paul

> Cc: Paul E. McKenney <[email protected]>
> Cc: Josh Triplett <[email protected]>
> Cc: Steven Rostedt <[email protected]>
> Cc: Mathieu Desnoyers <[email protected]>
> Cc: Lai Jiangshan <[email protected]>
> Cc: Joel Fernandes <[email protected]>
> Cc: Ingo Molnar <[email protected]>
> Cc: Dmitry Vyukov <[email protected]>
> Cc: [email protected]
> Cc: [email protected]
> ---
> include/trace/events/rcu.h | 4 ++--
> kernel/rcu/tree.c | 11 ++++++-----
> 2 files changed, 8 insertions(+), 7 deletions(-)
>
> diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
> index 694bd040cf51..fdd31c5fd126 100644
> --- a/include/trace/events/rcu.h
> +++ b/include/trace/events/rcu.h
> @@ -442,7 +442,7 @@ TRACE_EVENT_RCU(rcu_fqs,
> */
> TRACE_EVENT_RCU(rcu_dyntick,
>
> - TP_PROTO(const char *polarity, long oldnesting, long newnesting, atomic_t dynticks),
> + TP_PROTO(const char *polarity, long oldnesting, long newnesting, int dynticks),
>
> TP_ARGS(polarity, oldnesting, newnesting, dynticks),
>
> @@ -457,7 +457,7 @@ TRACE_EVENT_RCU(rcu_dyntick,
> __entry->polarity = polarity;
> __entry->oldnesting = oldnesting;
> __entry->newnesting = newnesting;
> - __entry->dynticks = atomic_read(&dynticks);
> + __entry->dynticks = dynticks;
> ),
>
> TP_printk("%s %lx %lx %#3x", __entry->polarity,
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index 81105141b6a8..62e59596a30a 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -576,7 +576,7 @@ static void rcu_eqs_enter(bool user)
> }
>
> lockdep_assert_irqs_disabled();
> - trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, rdp->dynticks);
> + trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks));
> WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
> rdp = this_cpu_ptr(&rcu_data);
> do_nocb_deferred_wakeup(rdp);
> @@ -649,14 +649,15 @@ static __always_inline void rcu_nmi_exit_common(bool irq)
> * leave it in non-RCU-idle state.
> */
> if (rdp->dynticks_nmi_nesting != 1) {
> - trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2, rdp->dynticks);
> + trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2,
> + atomic_read(&rdp->dynticks));
> WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
> rdp->dynticks_nmi_nesting - 2);
> return;
> }
>
> /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
> - trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, rdp->dynticks);
> + trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
> WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
>
> if (irq)
> @@ -743,7 +744,7 @@ static void rcu_eqs_exit(bool user)
> rcu_dynticks_task_exit();
> rcu_dynticks_eqs_exit();
> rcu_cleanup_after_idle();
> - trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, rdp->dynticks);
> + trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks));
> WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
> WRITE_ONCE(rdp->dynticks_nesting, 1);
> WARN_ON_ONCE(rdp->dynticks_nmi_nesting);
> @@ -827,7 +828,7 @@ static __always_inline void rcu_nmi_enter_common(bool irq)
> }
> trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
> rdp->dynticks_nmi_nesting,
> - rdp->dynticks_nmi_nesting + incby, rdp->dynticks);
> + rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks));
> WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */
> rdp->dynticks_nmi_nesting + incby);
> barrier();
> --
> 2.23.0.581.g78d2f28ef7-goog
>

2019-10-14 23:03:08

by Joel Fernandes

[permalink] [raw]
Subject: Re: [PATCH] rcu: Fix data-race due to atomic_t copy-by-value

On Wed, Oct 09, 2019 at 05:57:43PM +0200, Marco Elver wrote:
> This fixes a data-race where `atomic_t dynticks` is copied by value. The
> copy is performed non-atomically, resulting in a data-race if `dynticks`
> is updated concurrently.
>
> This data-race was found with KCSAN:
> ==================================================================
> BUG: KCSAN: data-race in dyntick_save_progress_counter / rcu_irq_enter
>
> write to 0xffff989dbdbe98e0 of 4 bytes by task 10 on cpu 3:
> atomic_add_return include/asm-generic/atomic-instrumented.h:78 [inline]
> rcu_dynticks_snap kernel/rcu/tree.c:310 [inline]
> dyntick_save_progress_counter+0x43/0x1b0 kernel/rcu/tree.c:984
> force_qs_rnp+0x183/0x200 kernel/rcu/tree.c:2286
> rcu_gp_fqs kernel/rcu/tree.c:1601 [inline]
> rcu_gp_fqs_loop+0x71/0x880 kernel/rcu/tree.c:1653
> rcu_gp_kthread+0x22c/0x3b0 kernel/rcu/tree.c:1799
> kthread+0x1b5/0x200 kernel/kthread.c:255
> <snip>
>
> read to 0xffff989dbdbe98e0 of 4 bytes by task 154 on cpu 7:
> rcu_nmi_enter_common kernel/rcu/tree.c:828 [inline]
> rcu_irq_enter+0xda/0x240 kernel/rcu/tree.c:870
> irq_enter+0x5/0x50 kernel/softirq.c:347
> <snip>
>
> Reported by Kernel Concurrency Sanitizer on:
> CPU: 7 PID: 154 Comm: kworker/7:1H Not tainted 5.3.0+ #5
> Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-1 04/01/2014
> Workqueue: kblockd blk_mq_run_work_fn
> ==================================================================
>
> Signed-off-by: Marco Elver <[email protected]>

I believe Paul has already queued this, but anyway I have reviewed it as well
and it LGTM.

Reviewed-by: Joel Fernandes (Google) <[email protected]>

thanks,

- Joel


> ---
> include/trace/events/rcu.h | 4 ++--
> kernel/rcu/tree.c | 11 ++++++-----
> 2 files changed, 8 insertions(+), 7 deletions(-)
>
> diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
> index 694bd040cf51..fdd31c5fd126 100644
> --- a/include/trace/events/rcu.h
> +++ b/include/trace/events/rcu.h
> @@ -442,7 +442,7 @@ TRACE_EVENT_RCU(rcu_fqs,
> */
> TRACE_EVENT_RCU(rcu_dyntick,
>
> - TP_PROTO(const char *polarity, long oldnesting, long newnesting, atomic_t dynticks),
> + TP_PROTO(const char *polarity, long oldnesting, long newnesting, int dynticks),
>
> TP_ARGS(polarity, oldnesting, newnesting, dynticks),
>
> @@ -457,7 +457,7 @@ TRACE_EVENT_RCU(rcu_dyntick,
> __entry->polarity = polarity;
> __entry->oldnesting = oldnesting;
> __entry->newnesting = newnesting;
> - __entry->dynticks = atomic_read(&dynticks);
> + __entry->dynticks = dynticks;
> ),
>
> TP_printk("%s %lx %lx %#3x", __entry->polarity,
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index 81105141b6a8..62e59596a30a 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -576,7 +576,7 @@ static void rcu_eqs_enter(bool user)
> }
>
> lockdep_assert_irqs_disabled();
> - trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, rdp->dynticks);
> + trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks));
> WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
> rdp = this_cpu_ptr(&rcu_data);
> do_nocb_deferred_wakeup(rdp);
> @@ -649,14 +649,15 @@ static __always_inline void rcu_nmi_exit_common(bool irq)
> * leave it in non-RCU-idle state.
> */
> if (rdp->dynticks_nmi_nesting != 1) {
> - trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2, rdp->dynticks);
> + trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2,
> + atomic_read(&rdp->dynticks));
> WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
> rdp->dynticks_nmi_nesting - 2);
> return;
> }
>
> /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
> - trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, rdp->dynticks);
> + trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
> WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
>
> if (irq)
> @@ -743,7 +744,7 @@ static void rcu_eqs_exit(bool user)
> rcu_dynticks_task_exit();
> rcu_dynticks_eqs_exit();
> rcu_cleanup_after_idle();
> - trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, rdp->dynticks);
> + trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks));
> WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
> WRITE_ONCE(rdp->dynticks_nesting, 1);
> WARN_ON_ONCE(rdp->dynticks_nmi_nesting);
> @@ -827,7 +828,7 @@ static __always_inline void rcu_nmi_enter_common(bool irq)
> }
> trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
> rdp->dynticks_nmi_nesting,
> - rdp->dynticks_nmi_nesting + incby, rdp->dynticks);
> + rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks));
> WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */
> rdp->dynticks_nmi_nesting + incby);
> barrier();
> --
> 2.23.0.581.g78d2f28ef7-goog
>
>

2019-10-14 23:52:10

by Marco Elver

[permalink] [raw]
Subject: Re: [PATCH] rcu: Fix data-race due to atomic_t copy-by-value

On Mon, 14 Oct 2019 at 20:00, Joel Fernandes <[email protected]> wrote:
>
> On Wed, Oct 09, 2019 at 05:57:43PM +0200, Marco Elver wrote:
> > This fixes a data-race where `atomic_t dynticks` is copied by value. The
> > copy is performed non-atomically, resulting in a data-race if `dynticks`
> > is updated concurrently.
> >
> > This data-race was found with KCSAN:
> > ==================================================================
> > BUG: KCSAN: data-race in dyntick_save_progress_counter / rcu_irq_enter
> >
> > write to 0xffff989dbdbe98e0 of 4 bytes by task 10 on cpu 3:
> > atomic_add_return include/asm-generic/atomic-instrumented.h:78 [inline]
> > rcu_dynticks_snap kernel/rcu/tree.c:310 [inline]
> > dyntick_save_progress_counter+0x43/0x1b0 kernel/rcu/tree.c:984
> > force_qs_rnp+0x183/0x200 kernel/rcu/tree.c:2286
> > rcu_gp_fqs kernel/rcu/tree.c:1601 [inline]
> > rcu_gp_fqs_loop+0x71/0x880 kernel/rcu/tree.c:1653
> > rcu_gp_kthread+0x22c/0x3b0 kernel/rcu/tree.c:1799
> > kthread+0x1b5/0x200 kernel/kthread.c:255
> > <snip>
> >
> > read to 0xffff989dbdbe98e0 of 4 bytes by task 154 on cpu 7:
> > rcu_nmi_enter_common kernel/rcu/tree.c:828 [inline]
> > rcu_irq_enter+0xda/0x240 kernel/rcu/tree.c:870
> > irq_enter+0x5/0x50 kernel/softirq.c:347
> > <snip>
> >
> > Reported by Kernel Concurrency Sanitizer on:
> > CPU: 7 PID: 154 Comm: kworker/7:1H Not tainted 5.3.0+ #5
> > Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-1 04/01/2014
> > Workqueue: kblockd blk_mq_run_work_fn
> > ==================================================================
> >
> > Signed-off-by: Marco Elver <[email protected]>
>
> I believe Paul has already queued this, but anyway I have reviewed it as well
> and it LGTM.
>
> Reviewed-by: Joel Fernandes (Google) <[email protected]>

Thank you! As an aside, I was wondering if there is a way to disallow
copy-by-value for certain structs, such as atomic_t (in the absence of
constructors). Presumably that'd be useful to have here to just let
the type-system prevent similar cases in future.

Thanks,
-- Marco

> thanks,
>
> - Joel
>
>
> > ---
> > include/trace/events/rcu.h | 4 ++--
> > kernel/rcu/tree.c | 11 ++++++-----
> > 2 files changed, 8 insertions(+), 7 deletions(-)
> >
> > diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
> > index 694bd040cf51..fdd31c5fd126 100644
> > --- a/include/trace/events/rcu.h
> > +++ b/include/trace/events/rcu.h
> > @@ -442,7 +442,7 @@ TRACE_EVENT_RCU(rcu_fqs,
> > */
> > TRACE_EVENT_RCU(rcu_dyntick,
> >
> > - TP_PROTO(const char *polarity, long oldnesting, long newnesting, atomic_t dynticks),
> > + TP_PROTO(const char *polarity, long oldnesting, long newnesting, int dynticks),
> >
> > TP_ARGS(polarity, oldnesting, newnesting, dynticks),
> >
> > @@ -457,7 +457,7 @@ TRACE_EVENT_RCU(rcu_dyntick,
> > __entry->polarity = polarity;
> > __entry->oldnesting = oldnesting;
> > __entry->newnesting = newnesting;
> > - __entry->dynticks = atomic_read(&dynticks);
> > + __entry->dynticks = dynticks;
> > ),
> >
> > TP_printk("%s %lx %lx %#3x", __entry->polarity,
> > diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> > index 81105141b6a8..62e59596a30a 100644
> > --- a/kernel/rcu/tree.c
> > +++ b/kernel/rcu/tree.c
> > @@ -576,7 +576,7 @@ static void rcu_eqs_enter(bool user)
> > }
> >
> > lockdep_assert_irqs_disabled();
> > - trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, rdp->dynticks);
> > + trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks));
> > WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
> > rdp = this_cpu_ptr(&rcu_data);
> > do_nocb_deferred_wakeup(rdp);
> > @@ -649,14 +649,15 @@ static __always_inline void rcu_nmi_exit_common(bool irq)
> > * leave it in non-RCU-idle state.
> > */
> > if (rdp->dynticks_nmi_nesting != 1) {
> > - trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2, rdp->dynticks);
> > + trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2,
> > + atomic_read(&rdp->dynticks));
> > WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
> > rdp->dynticks_nmi_nesting - 2);
> > return;
> > }
> >
> > /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
> > - trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, rdp->dynticks);
> > + trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
> > WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
> >
> > if (irq)
> > @@ -743,7 +744,7 @@ static void rcu_eqs_exit(bool user)
> > rcu_dynticks_task_exit();
> > rcu_dynticks_eqs_exit();
> > rcu_cleanup_after_idle();
> > - trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, rdp->dynticks);
> > + trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks));
> > WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
> > WRITE_ONCE(rdp->dynticks_nesting, 1);
> > WARN_ON_ONCE(rdp->dynticks_nmi_nesting);
> > @@ -827,7 +828,7 @@ static __always_inline void rcu_nmi_enter_common(bool irq)
> > }
> > trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
> > rdp->dynticks_nmi_nesting,
> > - rdp->dynticks_nmi_nesting + incby, rdp->dynticks);
> > + rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks));
> > WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */
> > rdp->dynticks_nmi_nesting + incby);
> > barrier();
> > --
> > 2.23.0.581.g78d2f28ef7-goog
> >
> >

2019-10-15 05:35:03

by Paul E. McKenney

[permalink] [raw]
Subject: Re: [PATCH] rcu: Fix data-race due to atomic_t copy-by-value

On Mon, Oct 14, 2019 at 02:00:39PM -0400, Joel Fernandes wrote:
> On Wed, Oct 09, 2019 at 05:57:43PM +0200, Marco Elver wrote:
> > This fixes a data-race where `atomic_t dynticks` is copied by value. The
> > copy is performed non-atomically, resulting in a data-race if `dynticks`
> > is updated concurrently.
> >
> > This data-race was found with KCSAN:
> > ==================================================================
> > BUG: KCSAN: data-race in dyntick_save_progress_counter / rcu_irq_enter
> >
> > write to 0xffff989dbdbe98e0 of 4 bytes by task 10 on cpu 3:
> > atomic_add_return include/asm-generic/atomic-instrumented.h:78 [inline]
> > rcu_dynticks_snap kernel/rcu/tree.c:310 [inline]
> > dyntick_save_progress_counter+0x43/0x1b0 kernel/rcu/tree.c:984
> > force_qs_rnp+0x183/0x200 kernel/rcu/tree.c:2286
> > rcu_gp_fqs kernel/rcu/tree.c:1601 [inline]
> > rcu_gp_fqs_loop+0x71/0x880 kernel/rcu/tree.c:1653
> > rcu_gp_kthread+0x22c/0x3b0 kernel/rcu/tree.c:1799
> > kthread+0x1b5/0x200 kernel/kthread.c:255
> > <snip>
> >
> > read to 0xffff989dbdbe98e0 of 4 bytes by task 154 on cpu 7:
> > rcu_nmi_enter_common kernel/rcu/tree.c:828 [inline]
> > rcu_irq_enter+0xda/0x240 kernel/rcu/tree.c:870
> > irq_enter+0x5/0x50 kernel/softirq.c:347
> > <snip>
> >
> > Reported by Kernel Concurrency Sanitizer on:
> > CPU: 7 PID: 154 Comm: kworker/7:1H Not tainted 5.3.0+ #5
> > Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-1 04/01/2014
> > Workqueue: kblockd blk_mq_run_work_fn
> > ==================================================================
> >
> > Signed-off-by: Marco Elver <[email protected]>
>
> I believe Paul has already queued this, but anyway I have reviewed it as well
> and it LGTM.

It is still in the development portion, so it is still mutable. ;-)

> Reviewed-by: Joel Fernandes (Google) <[email protected]>

Applied, thank you both!

Thanx, Paul

> thanks,
>
> - Joel
>
>
> > ---
> > include/trace/events/rcu.h | 4 ++--
> > kernel/rcu/tree.c | 11 ++++++-----
> > 2 files changed, 8 insertions(+), 7 deletions(-)
> >
> > diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
> > index 694bd040cf51..fdd31c5fd126 100644
> > --- a/include/trace/events/rcu.h
> > +++ b/include/trace/events/rcu.h
> > @@ -442,7 +442,7 @@ TRACE_EVENT_RCU(rcu_fqs,
> > */
> > TRACE_EVENT_RCU(rcu_dyntick,
> >
> > - TP_PROTO(const char *polarity, long oldnesting, long newnesting, atomic_t dynticks),
> > + TP_PROTO(const char *polarity, long oldnesting, long newnesting, int dynticks),
> >
> > TP_ARGS(polarity, oldnesting, newnesting, dynticks),
> >
> > @@ -457,7 +457,7 @@ TRACE_EVENT_RCU(rcu_dyntick,
> > __entry->polarity = polarity;
> > __entry->oldnesting = oldnesting;
> > __entry->newnesting = newnesting;
> > - __entry->dynticks = atomic_read(&dynticks);
> > + __entry->dynticks = dynticks;
> > ),
> >
> > TP_printk("%s %lx %lx %#3x", __entry->polarity,
> > diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> > index 81105141b6a8..62e59596a30a 100644
> > --- a/kernel/rcu/tree.c
> > +++ b/kernel/rcu/tree.c
> > @@ -576,7 +576,7 @@ static void rcu_eqs_enter(bool user)
> > }
> >
> > lockdep_assert_irqs_disabled();
> > - trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, rdp->dynticks);
> > + trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks));
> > WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
> > rdp = this_cpu_ptr(&rcu_data);
> > do_nocb_deferred_wakeup(rdp);
> > @@ -649,14 +649,15 @@ static __always_inline void rcu_nmi_exit_common(bool irq)
> > * leave it in non-RCU-idle state.
> > */
> > if (rdp->dynticks_nmi_nesting != 1) {
> > - trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2, rdp->dynticks);
> > + trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2,
> > + atomic_read(&rdp->dynticks));
> > WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
> > rdp->dynticks_nmi_nesting - 2);
> > return;
> > }
> >
> > /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
> > - trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, rdp->dynticks);
> > + trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
> > WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
> >
> > if (irq)
> > @@ -743,7 +744,7 @@ static void rcu_eqs_exit(bool user)
> > rcu_dynticks_task_exit();
> > rcu_dynticks_eqs_exit();
> > rcu_cleanup_after_idle();
> > - trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, rdp->dynticks);
> > + trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks));
> > WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
> > WRITE_ONCE(rdp->dynticks_nesting, 1);
> > WARN_ON_ONCE(rdp->dynticks_nmi_nesting);
> > @@ -827,7 +828,7 @@ static __always_inline void rcu_nmi_enter_common(bool irq)
> > }
> > trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
> > rdp->dynticks_nmi_nesting,
> > - rdp->dynticks_nmi_nesting + incby, rdp->dynticks);
> > + rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks));
> > WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */
> > rdp->dynticks_nmi_nesting + incby);
> > barrier();
> > --
> > 2.23.0.581.g78d2f28ef7-goog
> >
> >