2020-09-04 15:33:21

by Ahmed S. Darwish

[permalink] [raw]
Subject: [PATCH v2 0/5] seqlock: Introduce PREEMPT_RT support

Hi,

Changelog-v2
============

- Standardize on seqcount_LOCKNAME_t as the canonical reference for
sequence counters with associated locks, instead of v1
seqcount_LOCKTYPE_t.

- Use unique prefix "seqprop_*" for all seqcount_t/seqcount_LOCKNAME_t
property accessors.

- Touch-up the lock-unlock rationale for more clarity. Enforce writer
non-preemitiblity using "__seq_enforce_writer_non_preemptibility()".

Cover letter (v1)
=================

https://lkml.kernel.org/r/[email protected]

Preemption must be disabled before entering a sequence counter write
side critical section. Otherwise the read side section can preempt the
write side section and spin for the entire scheduler tick. If that
reader belongs to a real-time scheduling class, it can spin forever and
the kernel will livelock.

Disabling preemption cannot be done for PREEMPT_RT though: it can lead
to higher latencies, and the write side sections will not be able to
acquire locks which become sleeping locks (e.g. spinlock_t).

To remain preemptible, while avoiding a possible livelock caused by the
reader preempting the writer, use a different technique: let the reader
detect if a seqcount_LOCKNAME_t writer is in progress. If that's the
case, acquire then release the associated LOCKNAME writer serialization
lock. This will allow any possibly-preempted writer to make progress
until the end of its writer serialization lock critical section.

Implement this lock-unlock technique for all seqcount_LOCKNAME_t with
an associated (PREEMPT_RT) sleeping lock, and for seqlock_t.

8<--------------

Ahmed S. Darwish (5):
seqlock: seqcount_LOCKNAME_t: Standardize naming convention
seqlock: Use unique prefix for seqcount_t property accessors
seqlock: seqcount_t: Implement all read APIs as statement expressions
seqlock: seqcount_LOCKNAME_t: Introduce PREEMPT_RT support
seqlock: PREEMPT_RT: Do not starve seqlock_t writers

include/linux/seqlock.h | 281 ++++++++++++++++++++++++----------------
1 file changed, 167 insertions(+), 114 deletions(-)

base-commit: f75aef392f869018f78cfedf3c320a6b3fcfda6b
--
2.28.0


2020-09-04 15:33:32

by Ahmed S. Darwish

[permalink] [raw]
Subject: [PATCH v2 1/5] seqlock: seqcount_LOCKNAME_t: Standardize naming convention

At seqlock.h, sequence counters with associated locks are either called
seqcount_LOCKNAME_t, seqcount_LOCKTYPE_t, or seqcount_locktype_t.

Standardize on seqcount_LOCKNAME_t for all instances in comments,
kernel-doc, and SEQCOUNT_LOCKNAME() generative macro paramters.

Signed-off-by: Ahmed S. Darwish <[email protected]>
---
include/linux/seqlock.h | 83 +++++++++++++++++++++--------------------
1 file changed, 42 insertions(+), 41 deletions(-)

diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 962d9768945f..4f219df659b1 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -53,7 +53,7 @@
*
* If the write serialization mechanism is one of the common kernel
* locking primitives, use a sequence counter with associated lock
- * (seqcount_LOCKTYPE_t) instead.
+ * (seqcount_LOCKNAME_t) instead.
*
* If it's desired to automatically handle the sequence counter writer
* serialization and non-preemptibility requirements, use a sequential
@@ -117,7 +117,7 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
#define SEQCNT_ZERO(name) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(name) }

/*
- * Sequence counters with associated locks (seqcount_LOCKTYPE_t)
+ * Sequence counters with associated locks (seqcount_LOCKNAME_t)
*
* A sequence counter which associates the lock used for writer
* serialization at initialization time. This enables lockdep to validate
@@ -138,30 +138,32 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
#endif

/**
- * typedef seqcount_LOCKNAME_t - sequence counter with LOCKTYPR associated
+ * typedef seqcount_LOCKNAME_t - sequence counter with LOCKNAME associated
* @seqcount: The real sequence counter
- * @lock: Pointer to the associated spinlock
+ * @lock: Pointer to the associated lock
*
- * A plain sequence counter with external writer synchronization by a
- * spinlock. The spinlock is associated to the sequence count in the
+ * A plain sequence counter with external writer synchronization by
+ * LOCKNAME @lock. The lock is associated to the sequence counter in the
* static initializer or init function. This enables lockdep to validate
* that the write side critical section is properly serialized.
+ *
+ * LOCKNAME: raw_spinlock, spinlock, rwlock, mutex, or ww_mutex.
*/

/**
* seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t
* @s: Pointer to the seqcount_LOCKNAME_t instance
- * @lock: Pointer to the associated LOCKTYPE
+ * @lock: Pointer to the associated lock
*/

/*
- * SEQCOUNT_LOCKTYPE() - Instantiate seqcount_LOCKNAME_t and helpers
- * @locktype: actual typename
- * @lockname: name
- * @preemptible: preemptibility of above locktype
+ * SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers
+ * @lockname: "LOCKNAME" part of seqcount_LOCKNAME_t
+ * @locktype: LOCKNAME canonical C data type
+ * @preemptible: preemptibility of above lockname
* @lockmember: argument for lockdep_assert_held()
*/
-#define SEQCOUNT_LOCKTYPE(locktype, lockname, preemptible, lockmember) \
+#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockmember) \
typedef struct seqcount_##lockname { \
seqcount_t seqcount; \
__SEQ_LOCK(locktype *lock); \
@@ -211,29 +213,28 @@ static inline void __seqcount_assert(seqcount_t *s)
lockdep_assert_preemption_disabled();
}

-SEQCOUNT_LOCKTYPE(raw_spinlock_t, raw_spinlock, false, s->lock)
-SEQCOUNT_LOCKTYPE(spinlock_t, spinlock, false, s->lock)
-SEQCOUNT_LOCKTYPE(rwlock_t, rwlock, false, s->lock)
-SEQCOUNT_LOCKTYPE(struct mutex, mutex, true, s->lock)
-SEQCOUNT_LOCKTYPE(struct ww_mutex, ww_mutex, true, &s->lock->base)
+SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, s->lock)
+SEQCOUNT_LOCKNAME(spinlock, spinlock_t, false, s->lock)
+SEQCOUNT_LOCKNAME(rwlock, rwlock_t, false, s->lock)
+SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock)
+SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base)

/**
* SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t
* @name: Name of the seqcount_LOCKNAME_t instance
- * @lock: Pointer to the associated LOCKTYPE
+ * @lock: Pointer to the associated LOCKNAME
*/

-#define SEQCOUNT_LOCKTYPE_ZERO(seq_name, assoc_lock) { \
+#define SEQCOUNT_LOCKNAME_ZERO(seq_name, assoc_lock) { \
.seqcount = SEQCNT_ZERO(seq_name.seqcount), \
__SEQ_LOCK(.lock = (assoc_lock)) \
}

-#define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
-#define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
-#define SEQCNT_RWLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
-#define SEQCNT_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
-#define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
-
+#define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
+#define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
+#define SEQCNT_RWLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
+#define SEQCNT_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
+#define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)

#define __seqprop_case(s, lockname, prop) \
seqcount_##lockname##_t: __seqcount_##lockname##_##prop((void *)(s))
@@ -252,7 +253,7 @@ SEQCOUNT_LOCKTYPE(struct ww_mutex, ww_mutex, true, &s->lock->base)

/**
* __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
* barrier. Callers should ensure that smp_rmb() or equivalent ordering is
@@ -283,7 +284,7 @@ static inline unsigned __read_seqcount_t_begin(const seqcount_t *s)

/**
* raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* Return: count to be passed to read_seqcount_retry()
*/
@@ -299,7 +300,7 @@ static inline unsigned raw_read_seqcount_t_begin(const seqcount_t *s)

/**
* read_seqcount_begin() - begin a seqcount_t read critical section
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* Return: count to be passed to read_seqcount_retry()
*/
@@ -314,7 +315,7 @@ static inline unsigned read_seqcount_t_begin(const seqcount_t *s)

/**
* raw_read_seqcount() - read the raw seqcount_t counter value
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* raw_read_seqcount opens a read critical section of the given
* seqcount_t, without any lockdep checking, and without checking or
@@ -337,7 +338,7 @@ static inline unsigned raw_read_seqcount_t(const seqcount_t *s)
/**
* raw_seqcount_begin() - begin a seqcount_t read critical section w/o
* lockdep and w/o counter stabilization
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* raw_seqcount_begin opens a read critical section of the given
* seqcount_t. Unlike read_seqcount_begin(), this function will not wait
@@ -365,7 +366,7 @@ static inline unsigned raw_seqcount_t_begin(const seqcount_t *s)

/**
* __read_seqcount_retry() - end a seqcount_t read section w/o barrier
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
* @start: count, from read_seqcount_begin()
*
* __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
@@ -389,7 +390,7 @@ static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start)

/**
* read_seqcount_retry() - end a seqcount_t read critical section
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
* @start: count, from read_seqcount_begin()
*
* read_seqcount_retry closes the read critical section of given
@@ -409,7 +410,7 @@ static inline int read_seqcount_t_retry(const seqcount_t *s, unsigned start)

/**
* raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*/
#define raw_write_seqcount_begin(s) \
do { \
@@ -428,7 +429,7 @@ static inline void raw_write_seqcount_t_begin(seqcount_t *s)

/**
* raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*/
#define raw_write_seqcount_end(s) \
do { \
@@ -448,7 +449,7 @@ static inline void raw_write_seqcount_t_end(seqcount_t *s)
/**
* write_seqcount_begin_nested() - start a seqcount_t write section with
* custom lockdep nesting level
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
* @subclass: lockdep nesting level
*
* See Documentation/locking/lockdep-design.rst
@@ -471,7 +472,7 @@ static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass)

/**
* write_seqcount_begin() - start a seqcount_t write side critical section
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* write_seqcount_begin opens a write side critical section of the given
* seqcount_t.
@@ -497,7 +498,7 @@ static inline void write_seqcount_t_begin(seqcount_t *s)

/**
* write_seqcount_end() - end a seqcount_t write side critical section
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* The write section must've been opened with write_seqcount_begin().
*/
@@ -517,7 +518,7 @@ static inline void write_seqcount_t_end(seqcount_t *s)

/**
* raw_write_seqcount_barrier() - do a seqcount_t write barrier
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* This can be used to provide an ordering guarantee instead of the usual
* consistency guarantee. It is one wmb cheaper, because it can collapse
@@ -571,7 +572,7 @@ static inline void raw_write_seqcount_t_barrier(seqcount_t *s)
/**
* write_seqcount_invalidate() - invalidate in-progress seqcount_t read
* side operations
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* After write_seqcount_invalidate, no seqcount_t read side operations
* will complete successfully and see data older than this.
@@ -589,7 +590,7 @@ static inline void write_seqcount_t_invalidate(seqcount_t *s)

/**
* raw_read_seqcount_latch() - pick even/odd seqcount_t latch data copy
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* Use seqcount_t latching to switch between two storage places protected
* by a sequence counter. Doing so allows having interruptible, preemptible,
@@ -614,7 +615,7 @@ static inline int raw_read_seqcount_t_latch(seqcount_t *s)

/**
* raw_write_seqcount_latch() - redirect readers to even/odd copy
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* The latch technique is a multiversion concurrency control method that allows
* queries during non-atomic modifications. If you can guarantee queries never
--
2.28.0

2020-09-04 15:33:50

by Ahmed S. Darwish

[permalink] [raw]
Subject: [PATCH v2 3/5] seqlock: seqcount_t: Implement all read APIs as statement expressions

The sequence counters read APIs are implemented as CPP macros, so they
can take either seqcount_t or any of the seqcount_LOCKNAME_t variants.
Such macros then get *directly* transformed to internal C functions that
only take plain seqcount_t.

Further commits need access to seqcount_LOCKNAME_t inside of the actual
read APIs code. Thus transform all of the seqcount read APIs to pure GCC
statement expressions instead.

This will not break type-safety: all of the transformed APIs resolve to
a _Generic() selection that does not have a "default" case.

This will also not affect the transformed APIs readability: previously
added kernel-doc above all of seqlock.h functions makes the expectations
quite clear for call-site developers.

Signed-off-by: Ahmed S. Darwish <[email protected]>
---
include/linux/seqlock.h | 94 ++++++++++++++++++++---------------------
1 file changed, 45 insertions(+), 49 deletions(-)

diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 96198da7debc..ed1c6c0ff8bb 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -184,6 +184,12 @@ __seqprop_seqcount_##lockname##_ptr(seqcount_##lockname##_t *s) \
return &s->seqcount; \
} \
\
+static __always_inline unsigned \
+__seqprop_seqcount_##lockname##_sequence(const seqcount_##lockname##_t *s)\
+{ \
+ return READ_ONCE(s->seqcount.sequence); \
+} \
+ \
static __always_inline bool \
__seqprop_seqcount_##lockname##_preemptible(const seqcount_##lockname##_t *s)\
{ \
@@ -205,6 +211,11 @@ static inline seqcount_t *__seqprop_seqcount_ptr(seqcount_t *s)
return s;
}

+static inline unsigned __seqprop_seqcount_sequence(const seqcount_t *s)
+{
+ return READ_ONCE(s->sequence);
+}
+
static inline bool __seqprop_seqcount_preemptible(const seqcount_t *s)
{
return false;
@@ -250,6 +261,7 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base)
__seqprop_case((s), ww_mutex, prop))

#define __seqcount_ptr(s) __seqprop(s, ptr)
+#define __seqcount_sequence(s) __seqprop(s, sequence)
#define __seqcount_lock_preemptible(s) __seqprop(s, preemptible)
#define __seqcount_assert_lock_held(s) __seqprop(s, assert)

@@ -268,21 +280,15 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base)
* Return: count to be passed to read_seqcount_retry()
*/
#define __read_seqcount_begin(s) \
- __read_seqcount_t_begin(__seqcount_ptr(s))
-
-static inline unsigned __read_seqcount_t_begin(const seqcount_t *s)
-{
- unsigned ret;
-
-repeat:
- ret = READ_ONCE(s->sequence);
- if (unlikely(ret & 1)) {
- cpu_relax();
- goto repeat;
- }
- kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
- return ret;
-}
+({ \
+ unsigned seq; \
+ \
+ while ((seq = __seqcount_sequence(s)) & 1) \
+ cpu_relax(); \
+ \
+ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
+ seq; \
+})

/**
* raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
@@ -291,14 +297,12 @@ static inline unsigned __read_seqcount_t_begin(const seqcount_t *s)
* Return: count to be passed to read_seqcount_retry()
*/
#define raw_read_seqcount_begin(s) \
- raw_read_seqcount_t_begin(__seqcount_ptr(s))
-
-static inline unsigned raw_read_seqcount_t_begin(const seqcount_t *s)
-{
- unsigned ret = __read_seqcount_t_begin(s);
- smp_rmb();
- return ret;
-}
+({ \
+ unsigned seq = __read_seqcount_begin(s); \
+ \
+ smp_rmb(); \
+ seq; \
+})

/**
* read_seqcount_begin() - begin a seqcount_t read critical section
@@ -307,13 +311,10 @@ static inline unsigned raw_read_seqcount_t_begin(const seqcount_t *s)
* Return: count to be passed to read_seqcount_retry()
*/
#define read_seqcount_begin(s) \
- read_seqcount_t_begin(__seqcount_ptr(s))
-
-static inline unsigned read_seqcount_t_begin(const seqcount_t *s)
-{
- seqcount_lockdep_reader_access(s);
- return raw_read_seqcount_t_begin(s);
-}
+({ \
+ seqcount_lockdep_reader_access(__seqcount_ptr(s)); \
+ raw_read_seqcount_begin(s); \
+})

/**
* raw_read_seqcount() - read the raw seqcount_t counter value
@@ -327,15 +328,13 @@ static inline unsigned read_seqcount_t_begin(const seqcount_t *s)
* Return: count to be passed to read_seqcount_retry()
*/
#define raw_read_seqcount(s) \
- raw_read_seqcount_t(__seqcount_ptr(s))
-
-static inline unsigned raw_read_seqcount_t(const seqcount_t *s)
-{
- unsigned ret = READ_ONCE(s->sequence);
- smp_rmb();
- kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
- return ret;
-}
+({ \
+ unsigned seq = __seqcount_sequence(s); \
+ \
+ smp_rmb(); \
+ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
+ seq; \
+})

/**
* raw_seqcount_begin() - begin a seqcount_t read critical section w/o
@@ -355,16 +354,13 @@ static inline unsigned raw_read_seqcount_t(const seqcount_t *s)
* Return: count to be passed to read_seqcount_retry()
*/
#define raw_seqcount_begin(s) \
- raw_seqcount_t_begin(__seqcount_ptr(s))
-
-static inline unsigned raw_seqcount_t_begin(const seqcount_t *s)
-{
- /*
- * If the counter is odd, let read_seqcount_retry() fail
- * by decrementing the counter.
- */
- return raw_read_seqcount_t(s) & ~1;
-}
+({ \
+ /* \
+ * If the counter is odd, let read_seqcount_retry() fail \
+ * by decrementing the counter. \
+ */ \
+ raw_read_seqcount(s) & ~1; \
+})

/**
* __read_seqcount_retry() - end a seqcount_t read section w/o barrier
--
2.28.0

2020-09-04 15:34:46

by Ahmed S. Darwish

[permalink] [raw]
Subject: [PATCH v2 4/5] seqlock: seqcount_LOCKNAME_t: Introduce PREEMPT_RT support

Preemption must be disabled before entering a sequence counter write
side critical section. Otherwise the read side section can preempt the
write side section and spin for the entire scheduler tick. If that
reader belongs to a real-time scheduling class, it can spin forever and
the kernel will livelock.

Disabling preemption cannot be done for PREEMPT_RT though: it can lead
to higher latencies, and the write side sections will not be able to
acquire locks which become sleeping locks (e.g. spinlock_t).

To remain preemptible, while avoiding a possible livelock caused by the
reader preempting the writer, use a different technique: let the reader
detect if a seqcount_LOCKNAME_t writer is in progress. If that's the
case, acquire then release the associated LOCKNAME writer serialization
lock. This will allow any possibly-preempted writer to make progress
until the end of its writer serialization lock critical section.

Implement this lock-unlock technique for all seqcount_LOCKNAME_t with
an associated (PREEMPT_RT) sleeping lock.

Link: https://lkml.kernel.org/r/159708609435.2571.13948681727529247231.tglx@nanos
Link: https://lkml.kernel.org/r/[email protected]
References: 55f3560df975 ("seqlock: Extend seqcount API with associated locks")
Signed-off-by: Ahmed S. Darwish <[email protected]>
---
include/linux/seqlock.h | 72 +++++++++++++++++++++++++++++++++--------
1 file changed, 58 insertions(+), 14 deletions(-)

diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index ed1c6c0ff8bb..6ac5a63fc536 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -17,6 +17,7 @@
#include <linux/kcsan-checks.h>
#include <linux/lockdep.h>
#include <linux/mutex.h>
+#include <linux/ww_mutex.h>
#include <linux/preempt.h>
#include <linux/spinlock.h>

@@ -131,7 +132,23 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
* See Documentation/locking/seqlock.rst
*/

-#ifdef CONFIG_LOCKDEP
+/*
+ * For PREEMPT_RT, seqcount_LOCKNAME_t write side critical sections cannot
+ * disable preemption. It can lead to higher latencies, and the write side
+ * sections will not be able to acquire locks which become sleeping locks
+ * (e.g. spinlock_t).
+ *
+ * To remain preemptible while avoiding a possible livelock caused by the
+ * reader preempting the writer, use a different technique: let the reader
+ * detect if a seqcount_LOCKNAME_t writer is in progress. If that is the
+ * case, acquire then release the associated LOCKNAME writer serialization
+ * lock. This will allow any possibly-preempted writer to make progress
+ * until the end of its writer serialization lock critical section.
+ *
+ * This lock-unlock technique must be implemented for all of PREEMPT_RT
+ * sleeping locks. See Documentation/locking/locktypes.rst
+ */
+#if defined(CONFIG_LOCKDEP) || defined(CONFIG_PREEMPT_RT)
#define __SEQ_LOCK(expr) expr
#else
#define __SEQ_LOCK(expr)
@@ -162,10 +179,12 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
*
* @lockname: "LOCKNAME" part of seqcount_LOCKNAME_t
* @locktype: LOCKNAME canonical C data type
- * @preemptible: preemptibility of above lockname
+ * @preemptible: preemptibility of above locktype
* @lockmember: argument for lockdep_assert_held()
+ * @lockbase: associated lock release function (prefix only)
+ * @lock_acquire: associated lock acquisition function (full call)
*/
-#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockmember) \
+#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockmember, lockbase, lock_acquire) \
typedef struct seqcount_##lockname { \
seqcount_t seqcount; \
__SEQ_LOCK(locktype *lock); \
@@ -187,7 +206,23 @@ __seqprop_seqcount_##lockname##_ptr(seqcount_##lockname##_t *s) \
static __always_inline unsigned \
__seqprop_seqcount_##lockname##_sequence(const seqcount_##lockname##_t *s)\
{ \
- return READ_ONCE(s->seqcount.sequence); \
+ unsigned seq = READ_ONCE(s->seqcount.sequence); \
+ \
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \
+ return seq; \
+ \
+ if (preemptible && unlikely(seq & 1)) { \
+ __SEQ_LOCK(lock_acquire); \
+ __SEQ_LOCK(lockbase##_unlock(s->lock)); \
+ \
+ /* \
+ * Re-read the sequence counter since the (possibly \
+ * preempted) writer made progress. \
+ */ \
+ seq = READ_ONCE(s->seqcount.sequence); \
+ } \
+ \
+ return seq; \
} \
\
static __always_inline bool \
@@ -226,11 +261,13 @@ static inline void __seqprop_seqcount_assert(const seqcount_t *s)
lockdep_assert_preemption_disabled();
}

-SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, s->lock)
-SEQCOUNT_LOCKNAME(spinlock, spinlock_t, false, s->lock)
-SEQCOUNT_LOCKNAME(rwlock, rwlock_t, false, s->lock)
-SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock)
-SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base)
+#define __SEQ_RT IS_ENABLED(CONFIG_PREEMPT_RT)
+
+SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, s->lock, raw_spin, raw_spin_lock(s->lock))
+SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, s->lock, spin, spin_lock(s->lock))
+SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, s->lock, read, read_lock(s->lock))
+SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock, mutex, mutex_lock(s->lock))
+SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mutex, ww_mutex_lock(s->lock, NULL))

/**
* SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t
@@ -406,13 +443,20 @@ static inline int read_seqcount_t_retry(const seqcount_t *s, unsigned start)
return __read_seqcount_t_retry(s, start);
}

+/*
+ * Enforce non-preemptibility for all seqcount_LOCKNAME_t writers. Don't
+ * do it for PREEMPT_RT, for the reasons outlined at __SEQ_LOCK().
+ */
+#define __seq_enforce_writer_non_preemptibility(s) \
+ (!IS_ENABLED(CONFIG_PREEMPT_RT) && __seqcount_lock_preemptible(s))
+
/**
* raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep
* @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*/
#define raw_write_seqcount_begin(s) \
do { \
- if (__seqcount_lock_preemptible(s)) \
+ if (__seq_enforce_writer_non_preemptibility(s)) \
preempt_disable(); \
\
raw_write_seqcount_t_begin(__seqcount_ptr(s)); \
@@ -433,7 +477,7 @@ static inline void raw_write_seqcount_t_begin(seqcount_t *s)
do { \
raw_write_seqcount_t_end(__seqcount_ptr(s)); \
\
- if (__seqcount_lock_preemptible(s)) \
+ if (__seq_enforce_writer_non_preemptibility(s)) \
preempt_enable(); \
} while (0)

@@ -456,7 +500,7 @@ static inline void raw_write_seqcount_t_end(seqcount_t *s)
do { \
__seqcount_assert_lock_held(s); \
\
- if (__seqcount_lock_preemptible(s)) \
+ if (__seq_enforce_writer_non_preemptibility(s)) \
preempt_disable(); \
\
write_seqcount_t_begin_nested(__seqcount_ptr(s), subclass); \
@@ -483,7 +527,7 @@ static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass)
do { \
__seqcount_assert_lock_held(s); \
\
- if (__seqcount_lock_preemptible(s)) \
+ if (__seq_enforce_writer_non_preemptibility(s)) \
preempt_disable(); \
\
write_seqcount_t_begin(__seqcount_ptr(s)); \
@@ -504,7 +548,7 @@ static inline void write_seqcount_t_begin(seqcount_t *s)
do { \
write_seqcount_t_end(__seqcount_ptr(s)); \
\
- if (__seqcount_lock_preemptible(s)) \
+ if (__seq_enforce_writer_non_preemptibility(s)) \
preempt_enable(); \
} while (0)

--
2.28.0

2020-09-04 15:35:20

by Ahmed S. Darwish

[permalink] [raw]
Subject: [PATCH v2 5/5] seqlock: PREEMPT_RT: Do not starve seqlock_t writers

On PREEMPT_RT, seqlock_t is transformed to a sleeping lock that do not
disable preemption. A seqlock_t reader can thus preempt its write side
section and spin for the enter scheduler tick. If that reader belongs to
a real-time scheduling class, it can spin forever and the kernel will
livelock.

To break this livelock possibility on PREEMPT_RT, implement seqlock_t in
terms of "seqcount_spinlock_t" instead of plain "seqcount_t".

Beside its pure annotational value, this will leverage the existing
seqcount_LOCKNAME_T PREEMPT_RT anti-livelock mechanisms, without adding
any extra code.

Signed-off-by: Ahmed S. Darwish <[email protected]>
---
include/linux/seqlock.h | 32 +++++++++++++++++++++-----------
1 file changed, 21 insertions(+), 11 deletions(-)

diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 6ac5a63fc536..06a339355c3a 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -757,13 +757,17 @@ static inline void raw_write_seqcount_t_latch(seqcount_t *s)
* - Documentation/locking/seqlock.rst
*/
typedef struct {
- struct seqcount seqcount;
+ /*
+ * Make sure that readers don't starve writers on PREEMPT_RT: use
+ * seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK().
+ */
+ seqcount_spinlock_t seqcount;
spinlock_t lock;
} seqlock_t;

#define __SEQLOCK_UNLOCKED(lockname) \
{ \
- .seqcount = SEQCNT_ZERO(lockname), \
+ .seqcount = SEQCNT_SPINLOCK_ZERO(lockname, &(lockname).lock), \
.lock = __SPIN_LOCK_UNLOCKED(lockname) \
}

@@ -773,8 +777,8 @@ typedef struct {
*/
#define seqlock_init(sl) \
do { \
- seqcount_init(&(sl)->seqcount); \
spin_lock_init(&(sl)->lock); \
+ seqcount_spinlock_init(&(sl)->seqcount, &(sl)->lock); \
} while (0)

/**
@@ -821,6 +825,12 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
return read_seqcount_retry(&sl->seqcount, start);
}

+/*
+ * For all seqlock_t write side functions, use write_seqcount_*t*_begin()
+ * instead of the generic write_seqcount_begin(). This way, no redundant
+ * lockdep_assert_held() checks are added.
+ */
+
/**
* write_seqlock() - start a seqlock_t write side critical section
* @sl: Pointer to seqlock_t
@@ -837,7 +847,7 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
static inline void write_seqlock(seqlock_t *sl)
{
spin_lock(&sl->lock);
- write_seqcount_t_begin(&sl->seqcount);
+ write_seqcount_t_begin(&sl->seqcount.seqcount);
}

/**
@@ -849,7 +859,7 @@ static inline void write_seqlock(seqlock_t *sl)
*/
static inline void write_sequnlock(seqlock_t *sl)
{
- write_seqcount_t_end(&sl->seqcount);
+ write_seqcount_t_end(&sl->seqcount.seqcount);
spin_unlock(&sl->lock);
}

@@ -863,7 +873,7 @@ static inline void write_sequnlock(seqlock_t *sl)
static inline void write_seqlock_bh(seqlock_t *sl)
{
spin_lock_bh(&sl->lock);
- write_seqcount_t_begin(&sl->seqcount);
+ write_seqcount_t_begin(&sl->seqcount.seqcount);
}

/**
@@ -876,7 +886,7 @@ static inline void write_seqlock_bh(seqlock_t *sl)
*/
static inline void write_sequnlock_bh(seqlock_t *sl)
{
- write_seqcount_t_end(&sl->seqcount);
+ write_seqcount_t_end(&sl->seqcount.seqcount);
spin_unlock_bh(&sl->lock);
}

@@ -890,7 +900,7 @@ static inline void write_sequnlock_bh(seqlock_t *sl)
static inline void write_seqlock_irq(seqlock_t *sl)
{
spin_lock_irq(&sl->lock);
- write_seqcount_t_begin(&sl->seqcount);
+ write_seqcount_t_begin(&sl->seqcount.seqcount);
}

/**
@@ -902,7 +912,7 @@ static inline void write_seqlock_irq(seqlock_t *sl)
*/
static inline void write_sequnlock_irq(seqlock_t *sl)
{
- write_seqcount_t_end(&sl->seqcount);
+ write_seqcount_t_end(&sl->seqcount.seqcount);
spin_unlock_irq(&sl->lock);
}

@@ -911,7 +921,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
unsigned long flags;

spin_lock_irqsave(&sl->lock, flags);
- write_seqcount_t_begin(&sl->seqcount);
+ write_seqcount_t_begin(&sl->seqcount.seqcount);
return flags;
}

@@ -940,7 +950,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
static inline void
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
{
- write_seqcount_t_end(&sl->seqcount);
+ write_seqcount_t_end(&sl->seqcount.seqcount);
spin_unlock_irqrestore(&sl->lock, flags);
}

--
2.28.0

2020-09-04 15:37:12

by Ahmed S. Darwish

[permalink] [raw]
Subject: [PATCH v2 2/5] seqlock: Use unique prefix for seqcount_t property accessors

At seqlock.h, the following set of functions:

- __seqcount_ptr()
- __seqcount_preemptible()
- __seqcount_assert()

act as plain seqcount_t "property" accessors. Meanwhile, the following
group:

- __seqcount_ptr()
- __seqcount_lock_preemptible()
- __seqcount_assert_lock_held()

act as the equivalent set, but in the generic form, taking either
seqcount_t or any of the seqcount_LOCKNAME_t variants.

This is quite confusing, especially the first member where it is called
exactly the same in both groups.

Differentiate the first group by using "__seqprop" as prefix, and also
use that same prefix for all of seqcount_LOCKNAME_t property accessors.

While at it, constify the property accessors first parameter when
appropriate.

References: 55f3560df975 ("seqlock: Extend seqcount API with associated locks")
Signed-off-by: Ahmed S. Darwish <[email protected]>
---
include/linux/seqlock.h | 20 +++++++++++---------
1 file changed, 11 insertions(+), 9 deletions(-)

diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 4f219df659b1..96198da7debc 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -157,7 +157,9 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
*/

/*
- * SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers
+ * SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers
+ * seqprop_seqcount_LOCKNAME_*() - Property accessors for seqcount_LOCKNAME_t
+ *
* @lockname: "LOCKNAME" part of seqcount_LOCKNAME_t
* @locktype: LOCKNAME canonical C data type
* @preemptible: preemptibility of above lockname
@@ -177,19 +179,19 @@ seqcount_##lockname##_init(seqcount_##lockname##_t *s, locktype *lock) \
} \
\
static __always_inline seqcount_t * \
-__seqcount_##lockname##_ptr(seqcount_##lockname##_t *s) \
+__seqprop_seqcount_##lockname##_ptr(seqcount_##lockname##_t *s) \
{ \
return &s->seqcount; \
} \
\
static __always_inline bool \
-__seqcount_##lockname##_preemptible(seqcount_##lockname##_t *s) \
+__seqprop_seqcount_##lockname##_preemptible(const seqcount_##lockname##_t *s)\
{ \
return preemptible; \
} \
\
static __always_inline void \
-__seqcount_##lockname##_assert(seqcount_##lockname##_t *s) \
+__seqprop_seqcount_##lockname##_assert(const seqcount_##lockname##_t *s)\
{ \
__SEQ_LOCK(lockdep_assert_held(lockmember)); \
}
@@ -198,17 +200,17 @@ __seqcount_##lockname##_assert(seqcount_##lockname##_t *s) \
* __seqprop() for seqcount_t
*/

-static inline seqcount_t *__seqcount_ptr(seqcount_t *s)
+static inline seqcount_t *__seqprop_seqcount_ptr(seqcount_t *s)
{
return s;
}

-static inline bool __seqcount_preemptible(seqcount_t *s)
+static inline bool __seqprop_seqcount_preemptible(const seqcount_t *s)
{
return false;
}

-static inline void __seqcount_assert(seqcount_t *s)
+static inline void __seqprop_seqcount_assert(const seqcount_t *s)
{
lockdep_assert_preemption_disabled();
}
@@ -237,10 +239,10 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base)
#define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)

#define __seqprop_case(s, lockname, prop) \
- seqcount_##lockname##_t: __seqcount_##lockname##_##prop((void *)(s))
+ seqcount_##lockname##_t: __seqprop_seqcount_##lockname##_##prop((void *)(s))

#define __seqprop(s, prop) _Generic(*(s), \
- seqcount_t: __seqcount_##prop((void *)(s)), \
+ seqcount_t: __seqprop_seqcount_##prop((void *)(s)), \
__seqprop_case((s), raw_spinlock, prop), \
__seqprop_case((s), spinlock, prop), \
__seqprop_case((s), rwlock, prop), \
--
2.28.0

2020-09-08 12:31:49

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH v2 2/5] seqlock: Use unique prefix for seqcount_t property accessors

On Fri, Sep 04, 2020 at 05:32:28PM +0200, Ahmed S. Darwish wrote:

> static __always_inline seqcount_t * \
> -__seqcount_##lockname##_ptr(seqcount_##lockname##_t *s) \
> +__seqprop_seqcount_##lockname##_ptr(seqcount_##lockname##_t *s) \

I did s/__seqprop_seqcount_/__seqprop_/g on all this.

seqprop is a contraction of seqcount property, no need to put in yet
another seqcount I feel.

2020-09-08 12:35:45

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH v2 4/5] seqlock: seqcount_LOCKNAME_t: Introduce PREEMPT_RT support

On Fri, Sep 04, 2020 at 05:32:30PM +0200, Ahmed S. Darwish wrote:
> @@ -406,13 +443,20 @@ static inline int read_seqcount_t_retry(const seqcount_t *s, unsigned start)
> return __read_seqcount_t_retry(s, start);
> }
>
> +/*
> + * Enforce non-preemptibility for all seqcount_LOCKNAME_t writers. Don't
> + * do it for PREEMPT_RT, for the reasons outlined at __SEQ_LOCK().
> + */
> +#define __seq_enforce_writer_non_preemptibility(s) \
> + (!IS_ENABLED(CONFIG_PREEMPT_RT) && __seqcount_lock_preemptible(s))
> +
> /**
> * raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep
> * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
> */
> #define raw_write_seqcount_begin(s) \
> do { \
> - if (__seqcount_lock_preemptible(s)) \
> + if (__seq_enforce_writer_non_preemptibility(s)) \
> preempt_disable(); \
> \
> raw_write_seqcount_t_begin(__seqcount_ptr(s)); \
> @@ -433,7 +477,7 @@ static inline void raw_write_seqcount_t_begin(seqcount_t *s)
> do { \
> raw_write_seqcount_t_end(__seqcount_ptr(s)); \
> \
> - if (__seqcount_lock_preemptible(s)) \
> + if (__seq_enforce_writer_non_preemptibility(s)) \
> preempt_enable(); \
> } while (0)
>
> @@ -456,7 +500,7 @@ static inline void raw_write_seqcount_t_end(seqcount_t *s)
> do { \
> __seqcount_assert_lock_held(s); \
> \
> - if (__seqcount_lock_preemptible(s)) \
> + if (__seq_enforce_writer_non_preemptibility(s)) \
> preempt_disable(); \
> \
> write_seqcount_t_begin_nested(__seqcount_ptr(s), subclass); \
> @@ -483,7 +527,7 @@ static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass)
> do { \
> __seqcount_assert_lock_held(s); \
> \
> - if (__seqcount_lock_preemptible(s)) \
> + if (__seq_enforce_writer_non_preemptibility(s)) \
> preempt_disable(); \
> \
> write_seqcount_t_begin(__seqcount_ptr(s)); \
> @@ -504,7 +548,7 @@ static inline void write_seqcount_t_begin(seqcount_t *s)
> do { \
> write_seqcount_t_end(__seqcount_ptr(s)); \
> \
> - if (__seqcount_lock_preemptible(s)) \
> + if (__seq_enforce_writer_non_preemptibility(s)) \
> preempt_enable(); \
> } while (0)

I've replaced the above with the below, afaict there were no users of
__seqcount_lock_preemptible() left.

--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -228,7 +228,11 @@ __seqprop_##lockname##_sequence(const se
static __always_inline bool \
__seqprop_##lockname##_preemptible(const seqcount_##lockname##_t *s) \
{ \
- return preemptible; \
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \
+ return preemptible; \
+ \
+ /* PREEMPT_RT relies on the above LOCK+UNLOCK */ \
+ return false; \
} \
\
static __always_inline void \

2020-09-08 17:45:05

by Ahmed S. Darwish

[permalink] [raw]
Subject: Re: [PATCH v2 4/5] seqlock: seqcount_LOCKNAME_t: Introduce PREEMPT_RT support

On Tue, Sep 08, 2020 at 01:45:20PM +0200, [email protected] wrote:
> On Fri, Sep 04, 2020 at 05:32:30PM +0200, Ahmed S. Darwish wrote:
> > @@ -406,13 +443,20 @@ static inline int read_seqcount_t_retry(const seqcount_t *s, unsigned start)
> > return __read_seqcount_t_retry(s, start);
> > }
> >
> > +/*
> > + * Enforce non-preemptibility for all seqcount_LOCKNAME_t writers. Don't
> > + * do it for PREEMPT_RT, for the reasons outlined at __SEQ_LOCK().
> > + */
> > +#define __seq_enforce_writer_non_preemptibility(s) \
> > + (!IS_ENABLED(CONFIG_PREEMPT_RT) && __seqcount_lock_preemptible(s))
> > +
> > /**
> > * raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep
> > * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
> > */
> > #define raw_write_seqcount_begin(s) \
> > do { \
> > - if (__seqcount_lock_preemptible(s)) \
> > + if (__seq_enforce_writer_non_preemptibility(s)) \
> > preempt_disable(); \
> > \
> > raw_write_seqcount_t_begin(__seqcount_ptr(s)); \
> > @@ -433,7 +477,7 @@ static inline void raw_write_seqcount_t_begin(seqcount_t *s)
> > do { \
> > raw_write_seqcount_t_end(__seqcount_ptr(s)); \
> > \
> > - if (__seqcount_lock_preemptible(s)) \
> > + if (__seq_enforce_writer_non_preemptibility(s)) \
> > preempt_enable(); \
> > } while (0)
> >
> > @@ -456,7 +500,7 @@ static inline void raw_write_seqcount_t_end(seqcount_t *s)
> > do { \
> > __seqcount_assert_lock_held(s); \
> > \
> > - if (__seqcount_lock_preemptible(s)) \
> > + if (__seq_enforce_writer_non_preemptibility(s)) \
> > preempt_disable(); \
> > \
> > write_seqcount_t_begin_nested(__seqcount_ptr(s), subclass); \
> > @@ -483,7 +527,7 @@ static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass)
> > do { \
> > __seqcount_assert_lock_held(s); \
> > \
> > - if (__seqcount_lock_preemptible(s)) \
> > + if (__seq_enforce_writer_non_preemptibility(s)) \
> > preempt_disable(); \
> > \
> > write_seqcount_t_begin(__seqcount_ptr(s)); \
> > @@ -504,7 +548,7 @@ static inline void write_seqcount_t_begin(seqcount_t *s)
> > do { \
> > write_seqcount_t_end(__seqcount_ptr(s)); \
> > \
> > - if (__seqcount_lock_preemptible(s)) \
> > + if (__seq_enforce_writer_non_preemptibility(s)) \
> > preempt_enable(); \
> > } while (0)
>
> I've replaced the above with the below, afaict there were no users of
> __seqcount_lock_preemptible() left.
>
> --- a/include/linux/seqlock.h
> +++ b/include/linux/seqlock.h
> @@ -228,7 +228,11 @@ __seqprop_##lockname##_sequence(const se
> static __always_inline bool \
> __seqprop_##lockname##_preemptible(const seqcount_##lockname##_t *s) \
> { \
> - return preemptible; \
> + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \
> + return preemptible; \
> + \
> + /* PREEMPT_RT relies on the above LOCK+UNLOCK */ \
> + return false; \
> } \
> \

Sounds good.

Subject: [tip: locking/core] seqlock: Use unique prefix for seqcount_t property accessors

The following commit has been merged into the locking/core branch of tip:

Commit-ID: 5cdd25572a29e46f932d3e6eedbd07429de66431
Gitweb: https://git.kernel.org/tip/5cdd25572a29e46f932d3e6eedbd07429de66431
Author: Ahmed S. Darwish <[email protected]>
AuthorDate: Fri, 04 Sep 2020 17:32:28 +02:00
Committer: Peter Zijlstra <[email protected]>
CommitterDate: Thu, 10 Sep 2020 11:19:30 +02:00

seqlock: Use unique prefix for seqcount_t property accessors

At seqlock.h, the following set of functions:

- __seqcount_ptr()
- __seqcount_preemptible()
- __seqcount_assert()

act as plain seqcount_t "property" accessors. Meanwhile, the following
group:

- __seqcount_ptr()
- __seqcount_lock_preemptible()
- __seqcount_assert_lock_held()

act as the equivalent set, but in the generic form, taking either
seqcount_t or any of the seqcount_LOCKNAME_t variants.

This is quite confusing, especially the first member where it is called
exactly the same in both groups.

Differentiate the first group by using "__seqprop" as prefix, and also
use that same prefix for all of seqcount_LOCKNAME_t property accessors.

While at it, constify the property accessors first parameter when
appropriate.

References: 55f3560df975 ("seqlock: Extend seqcount API with associated locks")
Signed-off-by: Ahmed S. Darwish <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
---
include/linux/seqlock.h | 20 +++++++++++---------
1 file changed, 11 insertions(+), 9 deletions(-)

diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 820ace2..0b4a22f 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -157,7 +157,9 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
*/

/*
- * SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers
+ * SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers
+ * seqprop_LOCKNAME_*() - Property accessors for seqcount_LOCKNAME_t
+ *
* @lockname: "LOCKNAME" part of seqcount_LOCKNAME_t
* @locktype: LOCKNAME canonical C data type
* @preemptible: preemptibility of above lockname
@@ -177,19 +179,19 @@ seqcount_##lockname##_init(seqcount_##lockname##_t *s, locktype *lock) \
} \
\
static __always_inline seqcount_t * \
-__seqcount_##lockname##_ptr(seqcount_##lockname##_t *s) \
+__seqprop_##lockname##_ptr(seqcount_##lockname##_t *s) \
{ \
return &s->seqcount; \
} \
\
static __always_inline bool \
-__seqcount_##lockname##_preemptible(seqcount_##lockname##_t *s) \
+__seqprop_##lockname##_preemptible(const seqcount_##lockname##_t *s) \
{ \
return preemptible; \
} \
\
static __always_inline void \
-__seqcount_##lockname##_assert(seqcount_##lockname##_t *s) \
+__seqprop_##lockname##_assert(const seqcount_##lockname##_t *s) \
{ \
__SEQ_LOCK(lockdep_assert_held(lockmember)); \
}
@@ -198,17 +200,17 @@ __seqcount_##lockname##_assert(seqcount_##lockname##_t *s) \
* __seqprop() for seqcount_t
*/

-static inline seqcount_t *__seqcount_ptr(seqcount_t *s)
+static inline seqcount_t *__seqprop_ptr(seqcount_t *s)
{
return s;
}

-static inline bool __seqcount_preemptible(seqcount_t *s)
+static inline bool __seqprop_preemptible(const seqcount_t *s)
{
return false;
}

-static inline void __seqcount_assert(seqcount_t *s)
+static inline void __seqprop_assert(const seqcount_t *s)
{
lockdep_assert_preemption_disabled();
}
@@ -237,10 +239,10 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base)
#define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)

#define __seqprop_case(s, lockname, prop) \
- seqcount_##lockname##_t: __seqcount_##lockname##_##prop((void *)(s))
+ seqcount_##lockname##_t: __seqprop_##lockname##_##prop((void *)(s))

#define __seqprop(s, prop) _Generic(*(s), \
- seqcount_t: __seqcount_##prop((void *)(s)), \
+ seqcount_t: __seqprop_##prop((void *)(s)), \
__seqprop_case((s), raw_spinlock, prop), \
__seqprop_case((s), spinlock, prop), \
__seqprop_case((s), rwlock, prop), \

Subject: [tip: locking/core] seqlock: PREEMPT_RT: Do not starve seqlock_t writers

The following commit has been merged into the locking/core branch of tip:

Commit-ID: 1909760f5fc3f123e47b4e24e0ccdc0fc8f3f106
Gitweb: https://git.kernel.org/tip/1909760f5fc3f123e47b4e24e0ccdc0fc8f3f106
Author: Ahmed S. Darwish <[email protected]>
AuthorDate: Fri, 04 Sep 2020 17:32:31 +02:00
Committer: Peter Zijlstra <[email protected]>
CommitterDate: Thu, 10 Sep 2020 11:19:31 +02:00

seqlock: PREEMPT_RT: Do not starve seqlock_t writers

On PREEMPT_RT, seqlock_t is transformed to a sleeping lock that do not
disable preemption. A seqlock_t reader can thus preempt its write side
section and spin for the enter scheduler tick. If that reader belongs to
a real-time scheduling class, it can spin forever and the kernel will
livelock.

To break this livelock possibility on PREEMPT_RT, implement seqlock_t in
terms of "seqcount_spinlock_t" instead of plain "seqcount_t".

Beside its pure annotational value, this will leverage the existing
seqcount_LOCKNAME_T PREEMPT_RT anti-livelock mechanisms, without adding
any extra code.

Signed-off-by: Ahmed S. Darwish <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
---
include/linux/seqlock.h | 32 +++++++++++++++++++++-----------
1 file changed, 21 insertions(+), 11 deletions(-)

diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 2bc9510..f73c7eb 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -790,13 +790,17 @@ static inline void raw_write_seqcount_latch(seqcount_latch_t *s)
* - Documentation/locking/seqlock.rst
*/
typedef struct {
- struct seqcount seqcount;
+ /*
+ * Make sure that readers don't starve writers on PREEMPT_RT: use
+ * seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK().
+ */
+ seqcount_spinlock_t seqcount;
spinlock_t lock;
} seqlock_t;

#define __SEQLOCK_UNLOCKED(lockname) \
{ \
- .seqcount = SEQCNT_ZERO(lockname), \
+ .seqcount = SEQCNT_SPINLOCK_ZERO(lockname, &(lockname).lock), \
.lock = __SPIN_LOCK_UNLOCKED(lockname) \
}

@@ -806,8 +810,8 @@ typedef struct {
*/
#define seqlock_init(sl) \
do { \
- seqcount_init(&(sl)->seqcount); \
spin_lock_init(&(sl)->lock); \
+ seqcount_spinlock_init(&(sl)->seqcount, &(sl)->lock); \
} while (0)

/**
@@ -854,6 +858,12 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
return read_seqcount_retry(&sl->seqcount, start);
}

+/*
+ * For all seqlock_t write side functions, use write_seqcount_*t*_begin()
+ * instead of the generic write_seqcount_begin(). This way, no redundant
+ * lockdep_assert_held() checks are added.
+ */
+
/**
* write_seqlock() - start a seqlock_t write side critical section
* @sl: Pointer to seqlock_t
@@ -870,7 +880,7 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
static inline void write_seqlock(seqlock_t *sl)
{
spin_lock(&sl->lock);
- write_seqcount_t_begin(&sl->seqcount);
+ write_seqcount_t_begin(&sl->seqcount.seqcount);
}

/**
@@ -882,7 +892,7 @@ static inline void write_seqlock(seqlock_t *sl)
*/
static inline void write_sequnlock(seqlock_t *sl)
{
- write_seqcount_t_end(&sl->seqcount);
+ write_seqcount_t_end(&sl->seqcount.seqcount);
spin_unlock(&sl->lock);
}

@@ -896,7 +906,7 @@ static inline void write_sequnlock(seqlock_t *sl)
static inline void write_seqlock_bh(seqlock_t *sl)
{
spin_lock_bh(&sl->lock);
- write_seqcount_t_begin(&sl->seqcount);
+ write_seqcount_t_begin(&sl->seqcount.seqcount);
}

/**
@@ -909,7 +919,7 @@ static inline void write_seqlock_bh(seqlock_t *sl)
*/
static inline void write_sequnlock_bh(seqlock_t *sl)
{
- write_seqcount_t_end(&sl->seqcount);
+ write_seqcount_t_end(&sl->seqcount.seqcount);
spin_unlock_bh(&sl->lock);
}

@@ -923,7 +933,7 @@ static inline void write_sequnlock_bh(seqlock_t *sl)
static inline void write_seqlock_irq(seqlock_t *sl)
{
spin_lock_irq(&sl->lock);
- write_seqcount_t_begin(&sl->seqcount);
+ write_seqcount_t_begin(&sl->seqcount.seqcount);
}

/**
@@ -935,7 +945,7 @@ static inline void write_seqlock_irq(seqlock_t *sl)
*/
static inline void write_sequnlock_irq(seqlock_t *sl)
{
- write_seqcount_t_end(&sl->seqcount);
+ write_seqcount_t_end(&sl->seqcount.seqcount);
spin_unlock_irq(&sl->lock);
}

@@ -944,7 +954,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
unsigned long flags;

spin_lock_irqsave(&sl->lock, flags);
- write_seqcount_t_begin(&sl->seqcount);
+ write_seqcount_t_begin(&sl->seqcount.seqcount);
return flags;
}

@@ -973,7 +983,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
static inline void
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
{
- write_seqcount_t_end(&sl->seqcount);
+ write_seqcount_t_end(&sl->seqcount.seqcount);
spin_unlock_irqrestore(&sl->lock, flags);
}

Subject: [tip: locking/core] seqlock: seqcount_LOCKNAME_t: Standardize naming convention

The following commit has been merged into the locking/core branch of tip:

Commit-ID: 6dd699b13d53f26a7603702d8bada3482312df74
Gitweb: https://git.kernel.org/tip/6dd699b13d53f26a7603702d8bada3482312df74
Author: Ahmed S. Darwish <[email protected]>
AuthorDate: Fri, 04 Sep 2020 17:32:27 +02:00
Committer: Peter Zijlstra <[email protected]>
CommitterDate: Thu, 10 Sep 2020 11:19:30 +02:00

seqlock: seqcount_LOCKNAME_t: Standardize naming convention

At seqlock.h, sequence counters with associated locks are either called
seqcount_LOCKNAME_t, seqcount_LOCKTYPE_t, or seqcount_locktype_t.

Standardize on seqcount_LOCKNAME_t for all instances in comments,
kernel-doc, and SEQCOUNT_LOCKNAME() generative macro paramters.

Signed-off-by: Ahmed S. Darwish <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
---
include/linux/seqlock.h | 79 ++++++++++++++++++++--------------------
1 file changed, 40 insertions(+), 39 deletions(-)

diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index f2a7a46..820ace2 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -53,7 +53,7 @@
*
* If the write serialization mechanism is one of the common kernel
* locking primitives, use a sequence counter with associated lock
- * (seqcount_LOCKTYPE_t) instead.
+ * (seqcount_LOCKNAME_t) instead.
*
* If it's desired to automatically handle the sequence counter writer
* serialization and non-preemptibility requirements, use a sequential
@@ -117,7 +117,7 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
#define SEQCNT_ZERO(name) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(name) }

/*
- * Sequence counters with associated locks (seqcount_LOCKTYPE_t)
+ * Sequence counters with associated locks (seqcount_LOCKNAME_t)
*
* A sequence counter which associates the lock used for writer
* serialization at initialization time. This enables lockdep to validate
@@ -138,30 +138,32 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
#endif

/**
- * typedef seqcount_LOCKNAME_t - sequence counter with LOCKTYPE associated
+ * typedef seqcount_LOCKNAME_t - sequence counter with LOCKNAME associated
* @seqcount: The real sequence counter
- * @lock: Pointer to the associated spinlock
+ * @lock: Pointer to the associated lock
*
- * A plain sequence counter with external writer synchronization by a
- * spinlock. The spinlock is associated to the sequence count in the
+ * A plain sequence counter with external writer synchronization by
+ * LOCKNAME @lock. The lock is associated to the sequence counter in the
* static initializer or init function. This enables lockdep to validate
* that the write side critical section is properly serialized.
+ *
+ * LOCKNAME: raw_spinlock, spinlock, rwlock, mutex, or ww_mutex.
*/

/*
* seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t
* @s: Pointer to the seqcount_LOCKNAME_t instance
- * @lock: Pointer to the associated LOCKTYPE
+ * @lock: Pointer to the associated lock
*/

/*
- * SEQCOUNT_LOCKTYPE() - Instantiate seqcount_LOCKNAME_t and helpers
- * @locktype: actual typename
- * @lockname: name
- * @preemptible: preemptibility of above locktype
+ * SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers
+ * @lockname: "LOCKNAME" part of seqcount_LOCKNAME_t
+ * @locktype: LOCKNAME canonical C data type
+ * @preemptible: preemptibility of above lockname
* @lockmember: argument for lockdep_assert_held()
*/
-#define SEQCOUNT_LOCKTYPE(locktype, lockname, preemptible, lockmember) \
+#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockmember) \
typedef struct seqcount_##lockname { \
seqcount_t seqcount; \
__SEQ_LOCK(locktype *lock); \
@@ -211,29 +213,28 @@ static inline void __seqcount_assert(seqcount_t *s)
lockdep_assert_preemption_disabled();
}

-SEQCOUNT_LOCKTYPE(raw_spinlock_t, raw_spinlock, false, s->lock)
-SEQCOUNT_LOCKTYPE(spinlock_t, spinlock, false, s->lock)
-SEQCOUNT_LOCKTYPE(rwlock_t, rwlock, false, s->lock)
-SEQCOUNT_LOCKTYPE(struct mutex, mutex, true, s->lock)
-SEQCOUNT_LOCKTYPE(struct ww_mutex, ww_mutex, true, &s->lock->base)
+SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, s->lock)
+SEQCOUNT_LOCKNAME(spinlock, spinlock_t, false, s->lock)
+SEQCOUNT_LOCKNAME(rwlock, rwlock_t, false, s->lock)
+SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock)
+SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base)

/*
* SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t
* @name: Name of the seqcount_LOCKNAME_t instance
- * @lock: Pointer to the associated LOCKTYPE
+ * @lock: Pointer to the associated LOCKNAME
*/

-#define SEQCOUNT_LOCKTYPE_ZERO(seq_name, assoc_lock) { \
+#define SEQCOUNT_LOCKNAME_ZERO(seq_name, assoc_lock) { \
.seqcount = SEQCNT_ZERO(seq_name.seqcount), \
__SEQ_LOCK(.lock = (assoc_lock)) \
}

-#define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
-#define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
-#define SEQCNT_RWLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
-#define SEQCNT_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
-#define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
-
+#define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
+#define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
+#define SEQCNT_RWLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
+#define SEQCNT_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
+#define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)

#define __seqprop_case(s, lockname, prop) \
seqcount_##lockname##_t: __seqcount_##lockname##_##prop((void *)(s))
@@ -252,7 +253,7 @@ SEQCOUNT_LOCKTYPE(struct ww_mutex, ww_mutex, true, &s->lock->base)

/**
* __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
* barrier. Callers should ensure that smp_rmb() or equivalent ordering is
@@ -283,7 +284,7 @@ repeat:

/**
* raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* Return: count to be passed to read_seqcount_retry()
*/
@@ -299,7 +300,7 @@ static inline unsigned raw_read_seqcount_t_begin(const seqcount_t *s)

/**
* read_seqcount_begin() - begin a seqcount_t read critical section
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* Return: count to be passed to read_seqcount_retry()
*/
@@ -314,7 +315,7 @@ static inline unsigned read_seqcount_t_begin(const seqcount_t *s)

/**
* raw_read_seqcount() - read the raw seqcount_t counter value
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* raw_read_seqcount opens a read critical section of the given
* seqcount_t, without any lockdep checking, and without checking or
@@ -337,7 +338,7 @@ static inline unsigned raw_read_seqcount_t(const seqcount_t *s)
/**
* raw_seqcount_begin() - begin a seqcount_t read critical section w/o
* lockdep and w/o counter stabilization
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* raw_seqcount_begin opens a read critical section of the given
* seqcount_t. Unlike read_seqcount_begin(), this function will not wait
@@ -365,7 +366,7 @@ static inline unsigned raw_seqcount_t_begin(const seqcount_t *s)

/**
* __read_seqcount_retry() - end a seqcount_t read section w/o barrier
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
* @start: count, from read_seqcount_begin()
*
* __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
@@ -389,7 +390,7 @@ static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start)

/**
* read_seqcount_retry() - end a seqcount_t read critical section
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
* @start: count, from read_seqcount_begin()
*
* read_seqcount_retry closes the read critical section of given
@@ -409,7 +410,7 @@ static inline int read_seqcount_t_retry(const seqcount_t *s, unsigned start)

/**
* raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*/
#define raw_write_seqcount_begin(s) \
do { \
@@ -428,7 +429,7 @@ static inline void raw_write_seqcount_t_begin(seqcount_t *s)

/**
* raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*/
#define raw_write_seqcount_end(s) \
do { \
@@ -448,7 +449,7 @@ static inline void raw_write_seqcount_t_end(seqcount_t *s)
/**
* write_seqcount_begin_nested() - start a seqcount_t write section with
* custom lockdep nesting level
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
* @subclass: lockdep nesting level
*
* See Documentation/locking/lockdep-design.rst
@@ -471,7 +472,7 @@ static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass)

/**
* write_seqcount_begin() - start a seqcount_t write side critical section
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* write_seqcount_begin opens a write side critical section of the given
* seqcount_t.
@@ -497,7 +498,7 @@ static inline void write_seqcount_t_begin(seqcount_t *s)

/**
* write_seqcount_end() - end a seqcount_t write side critical section
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* The write section must've been opened with write_seqcount_begin().
*/
@@ -517,7 +518,7 @@ static inline void write_seqcount_t_end(seqcount_t *s)

/**
* raw_write_seqcount_barrier() - do a seqcount_t write barrier
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* This can be used to provide an ordering guarantee instead of the usual
* consistency guarantee. It is one wmb cheaper, because it can collapse
@@ -571,7 +572,7 @@ static inline void raw_write_seqcount_t_barrier(seqcount_t *s)
/**
* write_seqcount_invalidate() - invalidate in-progress seqcount_t read
* side operations
- * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* After write_seqcount_invalidate, no seqcount_t read side operations
* will complete successfully and see data older than this.

Subject: [tip: locking/core] seqlock: seqcount_t: Implement all read APIs as statement expressions

The following commit has been merged into the locking/core branch of tip:

Commit-ID: 52ac39e5db5148f70392edb654ad882ac8da88a8
Gitweb: https://git.kernel.org/tip/52ac39e5db5148f70392edb654ad882ac8da88a8
Author: Ahmed S. Darwish <[email protected]>
AuthorDate: Fri, 04 Sep 2020 17:32:29 +02:00
Committer: Peter Zijlstra <[email protected]>
CommitterDate: Thu, 10 Sep 2020 11:19:31 +02:00

seqlock: seqcount_t: Implement all read APIs as statement expressions

The sequence counters read APIs are implemented as CPP macros, so they
can take either seqcount_t or any of the seqcount_LOCKNAME_t variants.
Such macros then get *directly* transformed to internal C functions that
only take plain seqcount_t.

Further commits need access to seqcount_LOCKNAME_t inside of the actual
read APIs code. Thus transform all of the seqcount read APIs to pure GCC
statement expressions instead.

This will not break type-safety: all of the transformed APIs resolve to
a _Generic() selection that does not have a "default" case.

This will also not affect the transformed APIs readability: previously
added kernel-doc above all of seqlock.h functions makes the expectations
quite clear for call-site developers.

Signed-off-by: Ahmed S. Darwish <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
---
include/linux/seqlock.h | 94 +++++++++++++++++++---------------------
1 file changed, 45 insertions(+), 49 deletions(-)

diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 0b4a22f..f3b7827 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -184,6 +184,12 @@ __seqprop_##lockname##_ptr(seqcount_##lockname##_t *s) \
return &s->seqcount; \
} \
\
+static __always_inline unsigned \
+__seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \
+{ \
+ return READ_ONCE(s->seqcount.sequence); \
+} \
+ \
static __always_inline bool \
__seqprop_##lockname##_preemptible(const seqcount_##lockname##_t *s) \
{ \
@@ -205,6 +211,11 @@ static inline seqcount_t *__seqprop_ptr(seqcount_t *s)
return s;
}

+static inline unsigned __seqprop_sequence(const seqcount_t *s)
+{
+ return READ_ONCE(s->sequence);
+}
+
static inline bool __seqprop_preemptible(const seqcount_t *s)
{
return false;
@@ -250,6 +261,7 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base)
__seqprop_case((s), ww_mutex, prop))

#define __seqcount_ptr(s) __seqprop(s, ptr)
+#define __seqcount_sequence(s) __seqprop(s, sequence)
#define __seqcount_lock_preemptible(s) __seqprop(s, preemptible)
#define __seqcount_assert_lock_held(s) __seqprop(s, assert)

@@ -268,21 +280,15 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base)
* Return: count to be passed to read_seqcount_retry()
*/
#define __read_seqcount_begin(s) \
- __read_seqcount_t_begin(__seqcount_ptr(s))
-
-static inline unsigned __read_seqcount_t_begin(const seqcount_t *s)
-{
- unsigned ret;
-
-repeat:
- ret = READ_ONCE(s->sequence);
- if (unlikely(ret & 1)) {
- cpu_relax();
- goto repeat;
- }
- kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
- return ret;
-}
+({ \
+ unsigned seq; \
+ \
+ while ((seq = __seqcount_sequence(s)) & 1) \
+ cpu_relax(); \
+ \
+ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
+ seq; \
+})

/**
* raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
@@ -291,14 +297,12 @@ repeat:
* Return: count to be passed to read_seqcount_retry()
*/
#define raw_read_seqcount_begin(s) \
- raw_read_seqcount_t_begin(__seqcount_ptr(s))
-
-static inline unsigned raw_read_seqcount_t_begin(const seqcount_t *s)
-{
- unsigned ret = __read_seqcount_t_begin(s);
- smp_rmb();
- return ret;
-}
+({ \
+ unsigned seq = __read_seqcount_begin(s); \
+ \
+ smp_rmb(); \
+ seq; \
+})

/**
* read_seqcount_begin() - begin a seqcount_t read critical section
@@ -307,13 +311,10 @@ static inline unsigned raw_read_seqcount_t_begin(const seqcount_t *s)
* Return: count to be passed to read_seqcount_retry()
*/
#define read_seqcount_begin(s) \
- read_seqcount_t_begin(__seqcount_ptr(s))
-
-static inline unsigned read_seqcount_t_begin(const seqcount_t *s)
-{
- seqcount_lockdep_reader_access(s);
- return raw_read_seqcount_t_begin(s);
-}
+({ \
+ seqcount_lockdep_reader_access(__seqcount_ptr(s)); \
+ raw_read_seqcount_begin(s); \
+})

/**
* raw_read_seqcount() - read the raw seqcount_t counter value
@@ -327,15 +328,13 @@ static inline unsigned read_seqcount_t_begin(const seqcount_t *s)
* Return: count to be passed to read_seqcount_retry()
*/
#define raw_read_seqcount(s) \
- raw_read_seqcount_t(__seqcount_ptr(s))
-
-static inline unsigned raw_read_seqcount_t(const seqcount_t *s)
-{
- unsigned ret = READ_ONCE(s->sequence);
- smp_rmb();
- kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
- return ret;
-}
+({ \
+ unsigned seq = __seqcount_sequence(s); \
+ \
+ smp_rmb(); \
+ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
+ seq; \
+})

/**
* raw_seqcount_begin() - begin a seqcount_t read critical section w/o
@@ -355,16 +354,13 @@ static inline unsigned raw_read_seqcount_t(const seqcount_t *s)
* Return: count to be passed to read_seqcount_retry()
*/
#define raw_seqcount_begin(s) \
- raw_seqcount_t_begin(__seqcount_ptr(s))
-
-static inline unsigned raw_seqcount_t_begin(const seqcount_t *s)
-{
- /*
- * If the counter is odd, let read_seqcount_retry() fail
- * by decrementing the counter.
- */
- return raw_read_seqcount_t(s) & ~1;
-}
+({ \
+ /* \
+ * If the counter is odd, let read_seqcount_retry() fail \
+ * by decrementing the counter. \
+ */ \
+ raw_read_seqcount(s) & ~1; \
+})

/**
* __read_seqcount_retry() - end a seqcount_t read section w/o barrier

2020-09-15 00:24:24

by Qian Cai

[permalink] [raw]
Subject: Re: [PATCH v2 0/5] seqlock: Introduce PREEMPT_RT support

On Fri, 2020-09-04 at 17:32 +0200, Ahmed S. Darwish wrote:
> Hi,
>
> Changelog-v2
> ============
>
> - Standardize on seqcount_LOCKNAME_t as the canonical reference for
> sequence counters with associated locks, instead of v1
> seqcount_LOCKTYPE_t.
>
> - Use unique prefix "seqprop_*" for all seqcount_t/seqcount_LOCKNAME_t
> property accessors.
>
> - Touch-up the lock-unlock rationale for more clarity. Enforce writer
> non-preemitiblity using "__seq_enforce_writer_non_preemptibility()".
>
> Cover letter (v1)
> =================
>
> https://lkml.kernel.org/r/[email protected]
>
> Preemption must be disabled before entering a sequence counter write
> side critical section. Otherwise the read side section can preempt the
> write side section and spin for the entire scheduler tick. If that
> reader belongs to a real-time scheduling class, it can spin forever and
> the kernel will livelock.
>
> Disabling preemption cannot be done for PREEMPT_RT though: it can lead
> to higher latencies, and the write side sections will not be able to
> acquire locks which become sleeping locks (e.g. spinlock_t).
>
> To remain preemptible, while avoiding a possible livelock caused by the
> reader preempting the writer, use a different technique: let the reader
> detect if a seqcount_LOCKNAME_t writer is in progress. If that's the
> case, acquire then release the associated LOCKNAME writer serialization
> lock. This will allow any possibly-preempted writer to make progress
> until the end of its writer serialization lock critical section.
>
> Implement this lock-unlock technique for all seqcount_LOCKNAME_t with
> an associated (PREEMPT_RT) sleeping lock, and for seqlock_t.

Reverting this patchset [1] from today's linux-next fixed a splat below. The
splat looks like a false positive anyway because the existing locking dependency
chains from the task #1 here:

&s->seqcount#2 ---> pidmap_lock

[ 528.078061][ T7867] -> #1 (pidmap_lock){....}-{2:2}:
[ 528.078078][ T7867] lock_acquire+0x10c/0x560
[ 528.078089][ T7867] _raw_spin_lock_irqsave+0x64/0xb0
[ 528.078108][ T7867] free_pid+0x5c/0x160
free_pid at kernel/pid.c:131
[ 528.078127][ T7867] release_task.part.40+0x59c/0x7f0
__unhash_process at kernel/exit.c:76
(inlined by) __exit_signal at kernel/exit.c:147
(inlined by) release_task at kernel/exit.c:198
[ 528.078145][ T7867] do_exit+0x77c/0xda0
exit_notify at kernel/exit.c:679
(inlined by) do_exit at kernel/exit.c:826
[ 528.078163][ T7867] kthread+0x148/0x1d0
[ 528.078182][ T7867] ret_from_kernel_thread+0x5c/0x80

It is write_seqlock(&sig->stats_lock) in __exit_signal(), but the &s->seqcount#2
in read_mems_allowed_begin() is read_seqcount_begin(&current->mems_allowed_seq),
so there should be no deadlock?

[1] git revert --no-edit 0c9794c8b678..1909760f5fc3

[ 528.077900][ T7867] WARNING: possible circular locking dependency detected
[ 528.077912][ T7867] 5.9.0-rc5-next-20200914 #1 Not tainted
[ 528.077921][ T7867] ------------------------------------------------------
[ 528.077931][ T7867] runc:[1:CHILD]/7867 is trying to acquire lock:
[ 528.077942][ T7867] c000001fce5570c8 (&s->seqcount#2){....}-{0:0}, at: __slab_alloc+0x34/0xf0
[ 528.077972][ T7867]
[ 528.077972][ T7867] but task is already holding lock:
[ 528.077983][ T7867] c0000000056b0198 (pidmap_lock){....}-{2:2}, at: alloc_pid+0x258/0x590
[ 528.078009][ T7867]
[ 528.078009][ T7867] which lock already depends on the new lock.
[ 528.078009][ T7867]
[ 528.078031][ T7867]
[ 528.078031][ T7867] the existing dependency chain (in reverse order) is:
[ 528.078061][ T7867]
[ 528.078061][ T7867] -> #1 (pidmap_lock){....}-{2:2}:
[ 528.078078][ T7867] lock_acquire+0x10c/0x560
[ 528.078089][ T7867] _raw_spin_lock_irqsave+0x64/0xb0
[ 528.078108][ T7867] free_pid+0x5c/0x160
free_pid at kernel/pid.c:131
[ 528.078127][ T7867] release_task.part.40+0x59c/0x7f0
__unhash_process at kernel/exit.c:76
(inlined by) __exit_signal at kernel/exit.c:147
(inlined by) release_task at kernel/exit.c:198
[ 528.078145][ T7867] do_exit+0x77c/0xda0
exit_notify at kernel/exit.c:679
(inlined by) do_exit at kernel/exit.c:826
[ 528.078163][ T7867] kthread+0x148/0x1d0
[ 528.078182][ T7867] ret_from_kernel_thread+0x5c/0x80
[ 528.078208][ T7867]
[ 528.078208][ T7867] -> #0 (&s->seqcount#2){....}-{0:0}:
[ 528.078241][ T7867] check_prevs_add+0x1c4/0x1120
check_prev_add at kernel/locking/lockdep.c:2820
(inlined by) check_prevs_add at kernel/locking/lockdep.c:2944
[ 528.078260][ T7867] __lock_acquire+0x176c/0x1c00
validate_chain at kernel/locking/lockdep.c:3562
(inlined by) __lock_acquire at kernel/locking/lockdep.c:4796
[ 528.078278][ T7867] lock_acquire+0x10c/0x560
[ 528.078297][ T7867] ___slab_alloc+0xa40/0xb40
seqcount_lockdep_reader_access at include/linux/seqlock.h:103
(inlined by) read_mems_allowed_begin at include/linux/cpuset.h:135
(inlined by) get_any_partial at mm/slub.c:2035
(inlined by) get_partial at mm/slub.c:2078
(inlined by) new_slab_objects at mm/slub.c:2577
(inlined by) ___slab_alloc at mm/slub.c:2745
[ 528.078324][ T7867] __slab_alloc+0x34/0xf0
[ 528.078342][ T7867] kmem_cache_alloc+0x2d4/0x470
[ 528.078362][ T7867] create_object+0x74/0x430
[ 528.078381][ T7867] slab_post_alloc_hook+0xa4/0x670
[ 528.078399][ T7867] kmem_cache_alloc+0x1b4/0x470
[ 528.078418][ T7867] radix_tree_node_alloc.constprop.19+0xe4/0x160
[ 528.078438][ T7867] idr_get_free+0x298/0x360
[ 528.078456][ T7867] idr_alloc_u32+0x84/0x130
[ 528.078474][ T7867] idr_alloc_cyclic+0x7c/0x150
[ 528.078493][ T7867] alloc_pid+0x27c/0x590
[ 528.078511][ T7867] copy_process+0xc90/0x1930
copy_process at kernel/fork.c:2104
[ 528.078529][ T7867] kernel_clone+0x120/0xa10
[ 528.078546][ T7867] __do_sys_clone+0x88/0xd0
[ 528.078565][ T7867] system_call_exception+0xf8/0x1d0
[ 528.078592][ T7867] system_call_common+0xe8/0x218
[ 528.078609][ T7867]
[ 528.078609][ T7867] other info that might help us debug this:
[ 528.078609][ T7867]
[ 528.078650][ T7867] Possible unsafe locking scenario:
[ 528.078650][ T7867]
[ 528.078670][ T7867] CPU0 CPU1
[ 528.078695][ T7867] ---- ----
[ 528.078713][ T7867] lock(pidmap_lock);
[ 528.078730][ T7867] lock(&s->seqcount#2);
[ 528.078751][ T7867] lock(pidmap_lock);
[ 528.078770][ T7867] lock(&s->seqcount#2);
[ 528.078788][ T7867]
[ 528.078788][ T7867] *** DEADLOCK ***
[ 528.078788][ T7867]
[ 528.078800][ T7867] 2 locks held by runc:[1:CHILD]/7867:
[ 528.078808][ T7867] #0: c000001ffea6f4f0 (lock#2){+.+.}-{2:2}, at: __radix_tree_preload+0x8/0x370
__radix_tree_preload at lib/radix-tree.c:322
[ 528.078844][ T7867] #1: c0000000056b0198 (pidmap_lock){....}-{2:2}, at: alloc_pid+0x258/0x590
[ 528.078870][ T7867]
[ 528.078870][ T7867] stack backtrace:
[ 528.078890][ T7867] CPU: 46 PID: 7867 Comm: runc:[1:CHILD] Not tainted 5.9.0-rc5-next-20200914 #1
[ 528.078921][ T7867] Call Trace:
[ 528.078940][ T7867] [c000001ff07eefc0] [c00000000063f8c8] dump_stack+0xec/0x144 (unreliable)
[ 528.078964][ T7867] [c000001ff07ef000] [c00000000013f44c] print_circular_bug.isra.43+0x2dc/0x350
[ 528.078978][ T7867] [c000001ff07ef0a0] [c00000000013f640] check_noncircular+0x180/0x1b0
[ 528.079000][ T7867] [c000001ff07ef170] [c000000000140b84] check_prevs_add+0x1c4/0x1120
[ 528.079022][ T7867] [c000001ff07ef280] [c0000000001446ec] __lock_acquire+0x176c/0x1c00
[ 528.079043][ T7867] [c000001ff07ef3a0] [c00000000014578c] lock_acquire+0x10c/0x560
[ 528.079066][ T7867] [c000001ff07ef490] [c0000000003565f0] ___slab_alloc+0xa40/0xb40
[ 528.079079][ T7867] [c000001ff07ef590] [c000000000356724] __slab_alloc+0x34/0xf0
[ 528.079100][ T7867] [c000001ff07ef5e0] [c000000000356ab4] kmem_cache_alloc+0x2d4/0x470
[ 528.079122][ T7867] [c000001ff07ef670] [c000000000397e14] create_object+0x74/0x430
[ 528.079144][ T7867] [c000001ff07ef720] [c000000000351944] slab_post_alloc_hook+0xa4/0x670
[ 528.079165][ T7867] [c000001ff07ef7e0] [c000000000356994] kmem_cache_alloc+0x1b4/0x470
[ 528.079187][ T7867] [c000001ff07ef870] [c00000000064e004] radix_tree_node_alloc.constprop.19+0xe4/0x160
radix_tree_node_alloc at lib/radix-tree.c:252
[ 528.079219][ T7867] [c000001ff07ef8e0] [c00000000064f2b8] idr_get_free+0x298/0x360
idr_get_free at lib/radix-tree.c:1507
[ 528.079249][ T7867] [c000001ff07ef970] [c000000000645db4] idr_alloc_u32+0x84/0x130
idr_alloc_u32 at lib/idr.c:46 (discriminator 4)
[ 528.079271][ T7867] [c000001ff07ef9e0] [c000000000645f8c] idr_alloc_cyclic+0x7c/0x150
idr_alloc_cyclic at lib/idr.c:126 (discriminator 1)
[ 528.079301][ T7867] [c000001ff07efa40] [c0000000000e48ac] alloc_pid+0x27c/0x590
[ 528.079342][ T7867] [c000001ff07efb20] [c0000000000acc60] copy_process+0xc90/0x1930
[ 528.079404][ T7867] [c000001ff07efc40] [c0000000000adc00] kernel_clone+0x120/0xa10
[ 528.079499][ T7867] [c000001ff07efd00] [c0000000000ae578] __do_sys_clone+0x88/0xd0
[ 528.079579][ T7867] [c000001ff07efdc0] [c000000000029c48] system_call_exception+0xf8/0x1d0
[ 528.079691][ T7867] [c000001ff07efe20] [c00000000000d0a8] system_call_common+0xe8/0x218

>
> 8<--------------
>
> Ahmed S. Darwish (5):
> seqlock: seqcount_LOCKNAME_t: Standardize naming convention
> seqlock: Use unique prefix for seqcount_t property accessors
> seqlock: seqcount_t: Implement all read APIs as statement expressions
> seqlock: seqcount_LOCKNAME_t: Introduce PREEMPT_RT support
> seqlock: PREEMPT_RT: Do not starve seqlock_t writers
>
> include/linux/seqlock.h | 281 ++++++++++++++++++++++++----------------
> 1 file changed, 167 insertions(+), 114 deletions(-)
>
> base-commit: f75aef392f869018f78cfedf3c320a6b3fcfda6b
> --
> 2.28.0

2020-09-15 12:59:39

by Boqun Feng

[permalink] [raw]
Subject: Re: [PATCH v2 0/5] seqlock: Introduce PREEMPT_RT support

On Mon, Sep 14, 2020 at 08:20:53PM -0400, Qian Cai wrote:
> On Fri, 2020-09-04 at 17:32 +0200, Ahmed S. Darwish wrote:
> > Hi,
> >
> > Changelog-v2
> > ============
> >
> > - Standardize on seqcount_LOCKNAME_t as the canonical reference for
> > sequence counters with associated locks, instead of v1
> > seqcount_LOCKTYPE_t.
> >
> > - Use unique prefix "seqprop_*" for all seqcount_t/seqcount_LOCKNAME_t
> > property accessors.
> >
> > - Touch-up the lock-unlock rationale for more clarity. Enforce writer
> > non-preemitiblity using "__seq_enforce_writer_non_preemptibility()".
> >
> > Cover letter (v1)
> > =================
> >
> > https://lkml.kernel.org/r/[email protected]
> >
> > Preemption must be disabled before entering a sequence counter write
> > side critical section. Otherwise the read side section can preempt the
> > write side section and spin for the entire scheduler tick. If that
> > reader belongs to a real-time scheduling class, it can spin forever and
> > the kernel will livelock.
> >
> > Disabling preemption cannot be done for PREEMPT_RT though: it can lead
> > to higher latencies, and the write side sections will not be able to
> > acquire locks which become sleeping locks (e.g. spinlock_t).
> >
> > To remain preemptible, while avoiding a possible livelock caused by the
> > reader preempting the writer, use a different technique: let the reader
> > detect if a seqcount_LOCKNAME_t writer is in progress. If that's the
> > case, acquire then release the associated LOCKNAME writer serialization
> > lock. This will allow any possibly-preempted writer to make progress
> > until the end of its writer serialization lock critical section.
> >
> > Implement this lock-unlock technique for all seqcount_LOCKNAME_t with
> > an associated (PREEMPT_RT) sleeping lock, and for seqlock_t.
>
> Reverting this patchset [1] from today's linux-next fixed a splat below. The
> splat looks like a false positive anyway because the existing locking dependency
> chains from the task #1 here:
>
> &s->seqcount#2 ---> pidmap_lock
>
> [ 528.078061][ T7867] -> #1 (pidmap_lock){....}-{2:2}:
> [ 528.078078][ T7867] lock_acquire+0x10c/0x560
> [ 528.078089][ T7867] _raw_spin_lock_irqsave+0x64/0xb0
> [ 528.078108][ T7867] free_pid+0x5c/0x160
> free_pid at kernel/pid.c:131
> [ 528.078127][ T7867] release_task.part.40+0x59c/0x7f0
> __unhash_process at kernel/exit.c:76
> (inlined by) __exit_signal at kernel/exit.c:147
> (inlined by) release_task at kernel/exit.c:198
> [ 528.078145][ T7867] do_exit+0x77c/0xda0
> exit_notify at kernel/exit.c:679
> (inlined by) do_exit at kernel/exit.c:826
> [ 528.078163][ T7867] kthread+0x148/0x1d0
> [ 528.078182][ T7867] ret_from_kernel_thread+0x5c/0x80
>
> It is write_seqlock(&sig->stats_lock) in __exit_signal(), but the &s->seqcount#2
> in read_mems_allowed_begin() is read_seqcount_begin(&current->mems_allowed_seq),
> so there should be no deadlock?
>

I think this happened because seqcount_##lockname##_init() is defined at
function rather than macro, so when the seqcount_init() gets expand in
that function, the lock_class_key of seqcount will be a static variable
of seqcount_##lockname##_init() function, as a result, all
seqcount_##lockname##_t in the same compile unit (in this case it's
kernel/fork.c) share the same lock class key, and lockdep thought they
are the same lock ;-)

Regards,
Boqun

> [1] git revert --no-edit 0c9794c8b678..1909760f5fc3
>
> [ 528.077900][ T7867] WARNING: possible circular locking dependency detected
> [ 528.077912][ T7867] 5.9.0-rc5-next-20200914 #1 Not tainted
> [ 528.077921][ T7867] ------------------------------------------------------
> [ 528.077931][ T7867] runc:[1:CHILD]/7867 is trying to acquire lock:
> [ 528.077942][ T7867] c000001fce5570c8 (&s->seqcount#2){....}-{0:0}, at: __slab_alloc+0x34/0xf0
> [ 528.077972][ T7867]
> [ 528.077972][ T7867] but task is already holding lock:
> [ 528.077983][ T7867] c0000000056b0198 (pidmap_lock){....}-{2:2}, at: alloc_pid+0x258/0x590
> [ 528.078009][ T7867]
> [ 528.078009][ T7867] which lock already depends on the new lock.
> [ 528.078009][ T7867]
> [ 528.078031][ T7867]
> [ 528.078031][ T7867] the existing dependency chain (in reverse order) is:
> [ 528.078061][ T7867]
> [ 528.078061][ T7867] -> #1 (pidmap_lock){....}-{2:2}:
> [ 528.078078][ T7867] lock_acquire+0x10c/0x560
> [ 528.078089][ T7867] _raw_spin_lock_irqsave+0x64/0xb0
> [ 528.078108][ T7867] free_pid+0x5c/0x160
> free_pid at kernel/pid.c:131
> [ 528.078127][ T7867] release_task.part.40+0x59c/0x7f0
> __unhash_process at kernel/exit.c:76
> (inlined by) __exit_signal at kernel/exit.c:147
> (inlined by) release_task at kernel/exit.c:198
> [ 528.078145][ T7867] do_exit+0x77c/0xda0
> exit_notify at kernel/exit.c:679
> (inlined by) do_exit at kernel/exit.c:826
> [ 528.078163][ T7867] kthread+0x148/0x1d0
> [ 528.078182][ T7867] ret_from_kernel_thread+0x5c/0x80
> [ 528.078208][ T7867]
> [ 528.078208][ T7867] -> #0 (&s->seqcount#2){....}-{0:0}:
> [ 528.078241][ T7867] check_prevs_add+0x1c4/0x1120
> check_prev_add at kernel/locking/lockdep.c:2820
> (inlined by) check_prevs_add at kernel/locking/lockdep.c:2944
> [ 528.078260][ T7867] __lock_acquire+0x176c/0x1c00
> validate_chain at kernel/locking/lockdep.c:3562
> (inlined by) __lock_acquire at kernel/locking/lockdep.c:4796
> [ 528.078278][ T7867] lock_acquire+0x10c/0x560
> [ 528.078297][ T7867] ___slab_alloc+0xa40/0xb40
> seqcount_lockdep_reader_access at include/linux/seqlock.h:103
> (inlined by) read_mems_allowed_begin at include/linux/cpuset.h:135
> (inlined by) get_any_partial at mm/slub.c:2035
> (inlined by) get_partial at mm/slub.c:2078
> (inlined by) new_slab_objects at mm/slub.c:2577
> (inlined by) ___slab_alloc at mm/slub.c:2745
> [ 528.078324][ T7867] __slab_alloc+0x34/0xf0
> [ 528.078342][ T7867] kmem_cache_alloc+0x2d4/0x470
> [ 528.078362][ T7867] create_object+0x74/0x430
> [ 528.078381][ T7867] slab_post_alloc_hook+0xa4/0x670
> [ 528.078399][ T7867] kmem_cache_alloc+0x1b4/0x470
> [ 528.078418][ T7867] radix_tree_node_alloc.constprop.19+0xe4/0x160
> [ 528.078438][ T7867] idr_get_free+0x298/0x360
> [ 528.078456][ T7867] idr_alloc_u32+0x84/0x130
> [ 528.078474][ T7867] idr_alloc_cyclic+0x7c/0x150
> [ 528.078493][ T7867] alloc_pid+0x27c/0x590
> [ 528.078511][ T7867] copy_process+0xc90/0x1930
> copy_process at kernel/fork.c:2104
> [ 528.078529][ T7867] kernel_clone+0x120/0xa10
> [ 528.078546][ T7867] __do_sys_clone+0x88/0xd0
> [ 528.078565][ T7867] system_call_exception+0xf8/0x1d0
> [ 528.078592][ T7867] system_call_common+0xe8/0x218
> [ 528.078609][ T7867]
> [ 528.078609][ T7867] other info that might help us debug this:
> [ 528.078609][ T7867]
> [ 528.078650][ T7867] Possible unsafe locking scenario:
> [ 528.078650][ T7867]
> [ 528.078670][ T7867] CPU0 CPU1
> [ 528.078695][ T7867] ---- ----
> [ 528.078713][ T7867] lock(pidmap_lock);
> [ 528.078730][ T7867] lock(&s->seqcount#2);
> [ 528.078751][ T7867] lock(pidmap_lock);
> [ 528.078770][ T7867] lock(&s->seqcount#2);
> [ 528.078788][ T7867]
> [ 528.078788][ T7867] *** DEADLOCK ***
> [ 528.078788][ T7867]
> [ 528.078800][ T7867] 2 locks held by runc:[1:CHILD]/7867:
> [ 528.078808][ T7867] #0: c000001ffea6f4f0 (lock#2){+.+.}-{2:2}, at: __radix_tree_preload+0x8/0x370
> __radix_tree_preload at lib/radix-tree.c:322
> [ 528.078844][ T7867] #1: c0000000056b0198 (pidmap_lock){....}-{2:2}, at: alloc_pid+0x258/0x590
> [ 528.078870][ T7867]
> [ 528.078870][ T7867] stack backtrace:
> [ 528.078890][ T7867] CPU: 46 PID: 7867 Comm: runc:[1:CHILD] Not tainted 5.9.0-rc5-next-20200914 #1
> [ 528.078921][ T7867] Call Trace:
> [ 528.078940][ T7867] [c000001ff07eefc0] [c00000000063f8c8] dump_stack+0xec/0x144 (unreliable)
> [ 528.078964][ T7867] [c000001ff07ef000] [c00000000013f44c] print_circular_bug.isra.43+0x2dc/0x350
> [ 528.078978][ T7867] [c000001ff07ef0a0] [c00000000013f640] check_noncircular+0x180/0x1b0
> [ 528.079000][ T7867] [c000001ff07ef170] [c000000000140b84] check_prevs_add+0x1c4/0x1120
> [ 528.079022][ T7867] [c000001ff07ef280] [c0000000001446ec] __lock_acquire+0x176c/0x1c00
> [ 528.079043][ T7867] [c000001ff07ef3a0] [c00000000014578c] lock_acquire+0x10c/0x560
> [ 528.079066][ T7867] [c000001ff07ef490] [c0000000003565f0] ___slab_alloc+0xa40/0xb40
> [ 528.079079][ T7867] [c000001ff07ef590] [c000000000356724] __slab_alloc+0x34/0xf0
> [ 528.079100][ T7867] [c000001ff07ef5e0] [c000000000356ab4] kmem_cache_alloc+0x2d4/0x470
> [ 528.079122][ T7867] [c000001ff07ef670] [c000000000397e14] create_object+0x74/0x430
> [ 528.079144][ T7867] [c000001ff07ef720] [c000000000351944] slab_post_alloc_hook+0xa4/0x670
> [ 528.079165][ T7867] [c000001ff07ef7e0] [c000000000356994] kmem_cache_alloc+0x1b4/0x470
> [ 528.079187][ T7867] [c000001ff07ef870] [c00000000064e004] radix_tree_node_alloc.constprop.19+0xe4/0x160
> radix_tree_node_alloc at lib/radix-tree.c:252
> [ 528.079219][ T7867] [c000001ff07ef8e0] [c00000000064f2b8] idr_get_free+0x298/0x360
> idr_get_free at lib/radix-tree.c:1507
> [ 528.079249][ T7867] [c000001ff07ef970] [c000000000645db4] idr_alloc_u32+0x84/0x130
> idr_alloc_u32 at lib/idr.c:46 (discriminator 4)
> [ 528.079271][ T7867] [c000001ff07ef9e0] [c000000000645f8c] idr_alloc_cyclic+0x7c/0x150
> idr_alloc_cyclic at lib/idr.c:126 (discriminator 1)
> [ 528.079301][ T7867] [c000001ff07efa40] [c0000000000e48ac] alloc_pid+0x27c/0x590
> [ 528.079342][ T7867] [c000001ff07efb20] [c0000000000acc60] copy_process+0xc90/0x1930
> [ 528.079404][ T7867] [c000001ff07efc40] [c0000000000adc00] kernel_clone+0x120/0xa10
> [ 528.079499][ T7867] [c000001ff07efd00] [c0000000000ae578] __do_sys_clone+0x88/0xd0
> [ 528.079579][ T7867] [c000001ff07efdc0] [c000000000029c48] system_call_exception+0xf8/0x1d0
> [ 528.079691][ T7867] [c000001ff07efe20] [c00000000000d0a8] system_call_common+0xe8/0x218
>
> >
> > 8<--------------
> >
> > Ahmed S. Darwish (5):
> > seqlock: seqcount_LOCKNAME_t: Standardize naming convention
> > seqlock: Use unique prefix for seqcount_t property accessors
> > seqlock: seqcount_t: Implement all read APIs as statement expressions
> > seqlock: seqcount_LOCKNAME_t: Introduce PREEMPT_RT support
> > seqlock: PREEMPT_RT: Do not starve seqlock_t writers
> >
> > include/linux/seqlock.h | 281 ++++++++++++++++++++++++----------------
> > 1 file changed, 167 insertions(+), 114 deletions(-)
> >
> > base-commit: f75aef392f869018f78cfedf3c320a6b3fcfda6b
> > --
> > 2.28.0
>

2020-09-15 13:14:46

by Boqun Feng

[permalink] [raw]
Subject: Re: [PATCH v2 0/5] seqlock: Introduce PREEMPT_RT support

On Tue, Sep 15, 2020 at 08:48:17PM +0800, Boqun Feng wrote:
> On Mon, Sep 14, 2020 at 08:20:53PM -0400, Qian Cai wrote:
> > On Fri, 2020-09-04 at 17:32 +0200, Ahmed S. Darwish wrote:
> > > Hi,
> > >
> > > Changelog-v2
> > > ============
> > >
> > > - Standardize on seqcount_LOCKNAME_t as the canonical reference for
> > > sequence counters with associated locks, instead of v1
> > > seqcount_LOCKTYPE_t.
> > >
> > > - Use unique prefix "seqprop_*" for all seqcount_t/seqcount_LOCKNAME_t
> > > property accessors.
> > >
> > > - Touch-up the lock-unlock rationale for more clarity. Enforce writer
> > > non-preemitiblity using "__seq_enforce_writer_non_preemptibility()".
> > >
> > > Cover letter (v1)
> > > =================
> > >
> > > https://lkml.kernel.org/r/[email protected]
> > >
> > > Preemption must be disabled before entering a sequence counter write
> > > side critical section. Otherwise the read side section can preempt the
> > > write side section and spin for the entire scheduler tick. If that
> > > reader belongs to a real-time scheduling class, it can spin forever and
> > > the kernel will livelock.
> > >
> > > Disabling preemption cannot be done for PREEMPT_RT though: it can lead
> > > to higher latencies, and the write side sections will not be able to
> > > acquire locks which become sleeping locks (e.g. spinlock_t).
> > >
> > > To remain preemptible, while avoiding a possible livelock caused by the
> > > reader preempting the writer, use a different technique: let the reader
> > > detect if a seqcount_LOCKNAME_t writer is in progress. If that's the
> > > case, acquire then release the associated LOCKNAME writer serialization
> > > lock. This will allow any possibly-preempted writer to make progress
> > > until the end of its writer serialization lock critical section.
> > >
> > > Implement this lock-unlock technique for all seqcount_LOCKNAME_t with
> > > an associated (PREEMPT_RT) sleeping lock, and for seqlock_t.
> >
> > Reverting this patchset [1] from today's linux-next fixed a splat below. The
> > splat looks like a false positive anyway because the existing locking dependency
> > chains from the task #1 here:
> >
> > &s->seqcount#2 ---> pidmap_lock
> >
> > [ 528.078061][ T7867] -> #1 (pidmap_lock){....}-{2:2}:
> > [ 528.078078][ T7867] lock_acquire+0x10c/0x560
> > [ 528.078089][ T7867] _raw_spin_lock_irqsave+0x64/0xb0
> > [ 528.078108][ T7867] free_pid+0x5c/0x160
> > free_pid at kernel/pid.c:131
> > [ 528.078127][ T7867] release_task.part.40+0x59c/0x7f0
> > __unhash_process at kernel/exit.c:76
> > (inlined by) __exit_signal at kernel/exit.c:147
> > (inlined by) release_task at kernel/exit.c:198
> > [ 528.078145][ T7867] do_exit+0x77c/0xda0
> > exit_notify at kernel/exit.c:679
> > (inlined by) do_exit at kernel/exit.c:826
> > [ 528.078163][ T7867] kthread+0x148/0x1d0
> > [ 528.078182][ T7867] ret_from_kernel_thread+0x5c/0x80
> >
> > It is write_seqlock(&sig->stats_lock) in __exit_signal(), but the &s->seqcount#2
> > in read_mems_allowed_begin() is read_seqcount_begin(&current->mems_allowed_seq),
> > so there should be no deadlock?
> >
>
> I think this happened because seqcount_##lockname##_init() is defined at
> function rather than macro, so when the seqcount_init() gets expand in
> that function, the lock_class_key of seqcount will be a static variable
> of seqcount_##lockname##_init() function, as a result, all
> seqcount_##lockname##_t in the same compile unit (in this case it's
> kernel/fork.c) share the same lock class key, and lockdep thought they
> are the same lock ;-)
>

Don't know how to fix this properly, but below is an ugly attemption,
only build test, just food for thoughts.

Regards,
Boqun

--------------->8
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index f73c7eb68f27..938a5053def3 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -84,14 +84,18 @@ static inline void __seqcount_init(seqcount_t *s, const char *name,
# define SEQCOUNT_DEP_MAP_INIT(lockname) \
.dep_map = { .name = #lockname }

+# define MSIOCU 8 /* MAX SEQCOUNT IN ON COMPILE UNIT */
/**
* seqcount_init() - runtime initializer for seqcount_t
* @s: Pointer to the seqcount_t instance
*/
# define seqcount_init(s) \
do { \
- static struct lock_class_key __key; \
- __seqcount_init((s), #s, &__key); \
+ static struct lock_class_key __key[MSIOCU]; \
+ static int idx = 0; \
+ \
+ BUG_ON(idx >= MSIOCU); \
+ __seqcount_init((s), #s, &__key[idx++]); \
} while (0)

static inline void seqcount_lockdep_reader_access(const seqcount_t *s)

2020-09-15 23:59:37

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH v2 0/5] seqlock: Introduce PREEMPT_RT support

On Tue, Sep 15, 2020 at 08:48:17PM +0800, Boqun Feng wrote:
> I think this happened because seqcount_##lockname##_init() is defined at
> function rather than macro, so when the seqcount_init() gets expand in

Bah! I hate all this :/

I suspect the below, while more verbose than I'd like is the best
option.

---
include/linux/seqlock.h | 22 ++++++++++++++--------
1 file changed, 14 insertions(+), 8 deletions(-)

diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index f73c7eb68f27..76e44e6c0100 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -173,6 +173,19 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
* @lock: Pointer to the associated lock
*/

+#define seqcount_LOCKNAME_init(s, _lock, lockname) \
+ do { \
+ seqcount_##lockname##_t *____s = (s); \
+ seqcount_init(&____s->seqcount); \
+ __SEQ_LOCK(____s->lock = (_lock)); \
+ } while (0)
+
+#define seqcount_raw_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, raw_spinlock)
+#define seqcount_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, spinlock)
+#define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock);
+#define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex);
+#define seqcount_ww_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, ww_mutex);
+
/*
* SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers
* seqprop_LOCKNAME_*() - Property accessors for seqcount_LOCKNAME_t
@@ -190,13 +203,6 @@ typedef struct seqcount_##lockname { \
__SEQ_LOCK(locktype *lock); \
} seqcount_##lockname##_t; \
\
-static __always_inline void \
-seqcount_##lockname##_init(seqcount_##lockname##_t *s, locktype *lock) \
-{ \
- seqcount_init(&s->seqcount); \
- __SEQ_LOCK(s->lock = lock); \
-} \
- \
static __always_inline seqcount_t * \
__seqprop_##lockname##_ptr(seqcount_##lockname##_t *s) \
{ \
@@ -284,8 +290,8 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu
__SEQ_LOCK(.lock = (assoc_lock)) \
}

-#define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
#define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
+#define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
#define SEQCNT_RWLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
#define SEQCNT_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
#define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)

2020-09-16 14:01:03

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH v2 0/5] seqlock: Introduce PREEMPT_RT support

On Wed, Sep 16, 2020 at 09:00:59AM -0400, Qian Cai wrote:
>
>
> ----- Original Message -----
> > On Wed, Sep 16, 2020 at 08:52:07AM -0400, Qian Cai wrote:
> > > On Tue, 2020-09-15 at 16:30 +0200, [email protected] wrote:
> > > > On Tue, Sep 15, 2020 at 08:48:17PM +0800, Boqun Feng wrote:
> > > > > I think this happened because seqcount_##lockname##_init() is defined
> > > > > at
> > > > > function rather than macro, so when the seqcount_init() gets expand in
> > > >
> > > > Bah! I hate all this :/
> > > >
> > > > I suspect the below, while more verbose than I'd like is the best
> > > > option.
> > >
> > > Stephen, can you add this patch for now until Peter beats you to it?
> >
> > Did you verify it works? I only wrote it..
>
> Yes, I did.

Excellent, I'll stick a Tested-by from you on then.

2020-09-16 18:32:46

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH v2 0/5] seqlock: Introduce PREEMPT_RT support

On Wed, Sep 16, 2020 at 08:52:07AM -0400, Qian Cai wrote:
> On Tue, 2020-09-15 at 16:30 +0200, [email protected] wrote:
> > On Tue, Sep 15, 2020 at 08:48:17PM +0800, Boqun Feng wrote:
> > > I think this happened because seqcount_##lockname##_init() is defined at
> > > function rather than macro, so when the seqcount_init() gets expand in
> >
> > Bah! I hate all this :/
> >
> > I suspect the below, while more verbose than I'd like is the best
> > option.
>
> Stephen, can you add this patch for now until Peter beats you to it?

Did you verify it works? I only wrote it..

2020-09-16 20:33:13

by Qian Cai

[permalink] [raw]
Subject: Re: [PATCH v2 0/5] seqlock: Introduce PREEMPT_RT support

On Tue, 2020-09-15 at 16:30 +0200, [email protected] wrote:
> On Tue, Sep 15, 2020 at 08:48:17PM +0800, Boqun Feng wrote:
> > I think this happened because seqcount_##lockname##_init() is defined at
> > function rather than macro, so when the seqcount_init() gets expand in
>
> Bah! I hate all this :/
>
> I suspect the below, while more verbose than I'd like is the best
> option.

Stephen, can you add this patch for now until Peter beats you to it?

>
> ---
> include/linux/seqlock.h | 22 ++++++++++++++--------
> 1 file changed, 14 insertions(+), 8 deletions(-)
>
> diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
> index f73c7eb68f27..76e44e6c0100 100644
> --- a/include/linux/seqlock.h
> +++ b/include/linux/seqlock.h
> @@ -173,6 +173,19 @@ static inline void seqcount_lockdep_reader_access(const
> seqcount_t *s)
> * @lock: Pointer to the associated lock
> */
>
> +#define seqcount_LOCKNAME_init(s, _lock, lockname) \
> + do { \
> + seqcount_##lockname##_t *____s = (s); \
> + seqcount_init(&____s->seqcount); \
> + __SEQ_LOCK(____s->lock = (_lock)); \
> + } while (0)
> +
> +#define seqcount_raw_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock,
> raw_spinlock)
> +#define seqcount_spinlock_init(s, lock) seqcount_LOCKNAME_init(s
> , lock, spinlock)
> +#define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s
> , lock, rwlock);
> +#define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock,
> mutex);
> +#define seqcount_ww_mutex_init(s, lock) seqcount_LOCKNAME_init(s
> , lock, ww_mutex);
> +
> /*
> * SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers
> * seqprop_LOCKNAME_*() - Property accessors for seqcount_LOCKNAME_t
> @@ -190,13 +203,6 @@ typedef struct seqcount_##lockname {
> \
> __SEQ_LOCK(locktype *lock); \
> } seqcount_##lockname##_t; \
> \
> -static __always_inline void \
> -seqcount_##lockname##_init(seqcount_##lockname##_t *s, locktype *lock)
> \
> -{ \
> - seqcount_init(&s->seqcount); \
> - __SEQ_LOCK(s->lock = lock); \
> -} \
> - \
> static __always_inline seqcount_t * \
> __seqprop_##lockname##_ptr(seqcount_##lockname##_t *s)
> \
> { \
> @@ -284,8 +290,8 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex,
> true, &s->lock->base, ww_mu
> __SEQ_LOCK(.lock = (assoc_lock)) \
> }
>
> -#define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name,
> lock)
> #define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name,
> lock)
> +#define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name,
> lock)
> #define SEQCNT_RWLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(n
> ame, lock)
> #define SEQCNT_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(n
> ame, lock)
> #define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name,
> lock)
>

2020-09-16 21:08:48

by Qian Cai

[permalink] [raw]
Subject: Re: [PATCH v2 0/5] seqlock: Introduce PREEMPT_RT support



----- Original Message -----
> On Wed, Sep 16, 2020 at 08:52:07AM -0400, Qian Cai wrote:
> > On Tue, 2020-09-15 at 16:30 +0200, [email protected] wrote:
> > > On Tue, Sep 15, 2020 at 08:48:17PM +0800, Boqun Feng wrote:
> > > > I think this happened because seqcount_##lockname##_init() is defined
> > > > at
> > > > function rather than macro, so when the seqcount_init() gets expand in
> > >
> > > Bah! I hate all this :/
> > >
> > > I suspect the below, while more verbose than I'd like is the best
> > > option.
> >
> > Stephen, can you add this patch for now until Peter beats you to it?
>
> Did you verify it works? I only wrote it..

Yes, I did.

2020-09-17 02:33:30

by Stephen Rothwell

[permalink] [raw]
Subject: Re: [PATCH v2 0/5] seqlock: Introduce PREEMPT_RT support

Hi all,

On Wed, 16 Sep 2020 15:02:33 +0200 [email protected] wrote:
>
> On Wed, Sep 16, 2020 at 09:00:59AM -0400, Qian Cai wrote:
> >
> >
> > ----- Original Message -----
> > > On Wed, Sep 16, 2020 at 08:52:07AM -0400, Qian Cai wrote:
> > > > On Tue, 2020-09-15 at 16:30 +0200, [email protected] wrote:
> > > > > On Tue, Sep 15, 2020 at 08:48:17PM +0800, Boqun Feng wrote:
> > > > > > I think this happened because seqcount_##lockname##_init() is defined
> > > > > > at
> > > > > > function rather than macro, so when the seqcount_init() gets expand in
> > > > >
> > > > > Bah! I hate all this :/
> > > > >
> > > > > I suspect the below, while more verbose than I'd like is the best
> > > > > option.
> > > >
> > > > Stephen, can you add this patch for now until Peter beats you to it?
> > >
> > > Did you verify it works? I only wrote it..
> >
> > Yes, I did.
>
> Excellent, I'll stick a Tested-by from you on then.

I'll add this into the tip tree merge today (unless the tip tree is
updated in the mean time).

--
Cheers,
Stephen Rothwell


Attachments:
(No filename) (499.00 B)
OpenPGP digital signature
Subject: [tip: locking/core] seqlock: Unbreak lockdep

The following commit has been merged into the locking/core branch of tip:

Commit-ID: 267580db047ef428a70bef8287ca62c5a450c139
Gitweb: https://git.kernel.org/tip/267580db047ef428a70bef8287ca62c5a450c139
Author: [email protected] <[email protected]>
AuthorDate: Tue, 15 Sep 2020 16:30:28 +02:00
Committer: Peter Zijlstra <[email protected]>
CommitterDate: Wed, 16 Sep 2020 16:26:58 +02:00

seqlock: Unbreak lockdep

seqcount_LOCKNAME_init() needs to be a macro due to the lockdep
annotation in seqcount_init(). Since a macro cannot define another
macro, we need to effectively revert commit: e4e9ab3f9f91 ("seqlock:
Fold seqcount_LOCKNAME_init() definition").

Fixes: e4e9ab3f9f91 ("seqlock: Fold seqcount_LOCKNAME_init() definition")
Reported-by: Qian Cai <[email protected]>
Debugged-by: Boqun Feng <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Tested-by: Qian Cai <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
---
include/linux/seqlock.h | 22 ++++++++++++++--------
1 file changed, 14 insertions(+), 8 deletions(-)

diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index f73c7eb..76e44e6 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -173,6 +173,19 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
* @lock: Pointer to the associated lock
*/

+#define seqcount_LOCKNAME_init(s, _lock, lockname) \
+ do { \
+ seqcount_##lockname##_t *____s = (s); \
+ seqcount_init(&____s->seqcount); \
+ __SEQ_LOCK(____s->lock = (_lock)); \
+ } while (0)
+
+#define seqcount_raw_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, raw_spinlock)
+#define seqcount_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, spinlock)
+#define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock);
+#define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex);
+#define seqcount_ww_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, ww_mutex);
+
/*
* SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers
* seqprop_LOCKNAME_*() - Property accessors for seqcount_LOCKNAME_t
@@ -190,13 +203,6 @@ typedef struct seqcount_##lockname { \
__SEQ_LOCK(locktype *lock); \
} seqcount_##lockname##_t; \
\
-static __always_inline void \
-seqcount_##lockname##_init(seqcount_##lockname##_t *s, locktype *lock) \
-{ \
- seqcount_init(&s->seqcount); \
- __SEQ_LOCK(s->lock = lock); \
-} \
- \
static __always_inline seqcount_t * \
__seqprop_##lockname##_ptr(seqcount_##lockname##_t *s) \
{ \
@@ -284,8 +290,8 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu
__SEQ_LOCK(.lock = (assoc_lock)) \
}

-#define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
#define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
+#define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
#define SEQCNT_RWLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
#define SEQCNT_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
#define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)