2023-10-12 14:34:15

by Oleg Nesterov

[permalink] [raw]
Subject: [PATCH v2 1/2] seqlock: simplify SEQCOUNT_LOCKNAME()

1. Kill the "lockmember" argument. It is always s->lock plus
__seqprop_##lockname##_sequence() already uses s->lock and
ignores "lockmember".

2. Kill the "lock_acquire" argument. __seqprop_##lockname##_sequence()
can use the same "lockbase" prefix for _lock and _unlock.

Apart from line numbers, gcc -E outputs the same code.

Signed-off-by: Oleg Nesterov <[email protected]>
---
include/linux/seqlock.h | 18 ++++++++----------
1 file changed, 8 insertions(+), 10 deletions(-)

diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index e9bd2f65d7f4..b9a30c62ffe4 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -191,11 +191,9 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
* @lockname: "LOCKNAME" part of seqcount_LOCKNAME_t
* @locktype: LOCKNAME canonical C data type
* @preemptible: preemptibility of above locktype
- * @lockmember: argument for lockdep_assert_held()
- * @lockbase: associated lock release function (prefix only)
- * @lock_acquire: associated lock acquisition function (full call)
+ * @lockbase: prefix for associated lock/unlock
*/
-#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockmember, lockbase, lock_acquire) \
+#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockbase) \
typedef struct seqcount_##lockname { \
seqcount_t seqcount; \
__SEQ_LOCK(locktype *lock); \
@@ -216,7 +214,7 @@ __seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \
return seq; \
\
if (preemptible && unlikely(seq & 1)) { \
- __SEQ_LOCK(lock_acquire); \
+ __SEQ_LOCK(lockbase##_lock(s->lock)); \
__SEQ_LOCK(lockbase##_unlock(s->lock)); \
\
/* \
@@ -242,7 +240,7 @@ __seqprop_##lockname##_preemptible(const seqcount_##lockname##_t *s) \
static __always_inline void \
__seqprop_##lockname##_assert(const seqcount_##lockname##_t *s) \
{ \
- __SEQ_LOCK(lockdep_assert_held(lockmember)); \
+ __SEQ_LOCK(lockdep_assert_held(s->lock)); \
}

/*
@@ -271,10 +269,10 @@ static inline void __seqprop_assert(const seqcount_t *s)

#define __SEQ_RT IS_ENABLED(CONFIG_PREEMPT_RT)

-SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, s->lock, raw_spin, raw_spin_lock(s->lock))
-SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, s->lock, spin, spin_lock(s->lock))
-SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, s->lock, read, read_lock(s->lock))
-SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock, mutex, mutex_lock(s->lock))
+SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, raw_spin)
+SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, spin)
+SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, read)
+SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)

/*
* SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t
--
2.25.1.362.g51ebf55



2023-10-12 14:34:37

by Oleg Nesterov

[permalink] [raw]
Subject: [PATCH v2 2/2] seqlock: change __seqprop() to return the function pointer

This simplifies the macro and makes it easy to add the new seqprop's
with 2 or more args.

Plus this way we do not lose the type info, the (void*) type cast is
no longer needed.

And the latter reveals the problem: a lot of seqcount_t helpers pass
the "const seqcount_t *s" argument to __seqprop_ptr(seqcount_t *s)
but (before this patch) "(void *)(s)" masked the problem.

So this patch changes __seqprop_ptr() and __seqprop_##lockname##_ptr()
to accept the "const LOCKNAME *s" argument. This is not nice either,
they need to drop the constness on return because these helpers are used
by both the readers and writers, but at least it is clear what's going on.

Signed-off-by: Oleg Nesterov <[email protected]>
---
include/linux/seqlock.h | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index b9a30c62ffe4..bf1435ffe24f 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -200,9 +200,9 @@ typedef struct seqcount_##lockname { \
} seqcount_##lockname##_t; \
\
static __always_inline seqcount_t * \
-__seqprop_##lockname##_ptr(seqcount_##lockname##_t *s) \
+__seqprop_##lockname##_ptr(const seqcount_##lockname##_t *s) \
{ \
- return &s->seqcount; \
+ return (void *)&s->seqcount; /* drop const */ \
} \
\
static __always_inline unsigned \
@@ -247,9 +247,9 @@ __seqprop_##lockname##_assert(const seqcount_##lockname##_t *s) \
* __seqprop() for seqcount_t
*/

-static inline seqcount_t *__seqprop_ptr(seqcount_t *s)
+static inline seqcount_t *__seqprop_ptr(const seqcount_t *s)
{
- return s;
+ return (void *)s; /* drop const */
}

static inline unsigned __seqprop_sequence(const seqcount_t *s)
@@ -292,19 +292,19 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
#define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)

#define __seqprop_case(s, lockname, prop) \
- seqcount_##lockname##_t: __seqprop_##lockname##_##prop((void *)(s))
+ seqcount_##lockname##_t: __seqprop_##lockname##_##prop

#define __seqprop(s, prop) _Generic(*(s), \
- seqcount_t: __seqprop_##prop((void *)(s)), \
+ seqcount_t: __seqprop_##prop, \
__seqprop_case((s), raw_spinlock, prop), \
__seqprop_case((s), spinlock, prop), \
__seqprop_case((s), rwlock, prop), \
__seqprop_case((s), mutex, prop))

-#define seqprop_ptr(s) __seqprop(s, ptr)
-#define seqprop_sequence(s) __seqprop(s, sequence)
-#define seqprop_preemptible(s) __seqprop(s, preemptible)
-#define seqprop_assert(s) __seqprop(s, assert)
+#define seqprop_ptr(s) __seqprop(s, ptr)(s)
+#define seqprop_sequence(s) __seqprop(s, sequence)(s)
+#define seqprop_preemptible(s) __seqprop(s, preemptible)(s)
+#define seqprop_assert(s) __seqprop(s, assert)(s)

/**
* __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
--
2.25.1.362.g51ebf55


2023-10-12 18:21:39

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH v2 2/2] seqlock: change __seqprop() to return the function pointer


* Oleg Nesterov <[email protected]> wrote:

> This simplifies the macro and makes it easy to add the new seqprop's
> with 2 or more args.
>
> Plus this way we do not lose the type info, the (void*) type cast is
> no longer needed.
>
> And the latter reveals the problem: a lot of seqcount_t helpers pass
> the "const seqcount_t *s" argument to __seqprop_ptr(seqcount_t *s)
> but (before this patch) "(void *)(s)" masked the problem.
>
> So this patch changes __seqprop_ptr() and __seqprop_##lockname##_ptr()
> to accept the "const LOCKNAME *s" argument. This is not nice either,
> they need to drop the constness on return because these helpers are used
> by both the readers and writers, but at least it is clear what's going on.

> +__seqprop_##lockname##_ptr(const seqcount_##lockname##_t *s) \
> { \
> + return (void *)&s->seqcount; /* drop const */ \
> } \

> +static inline seqcount_t *__seqprop_ptr(const seqcount_t *s)
> {
> + return (void *)s; /* drop const */
> }

Okay, so dropping 'const' makes sense in terms of staying bug-compatible
with the previous API and not build-breaking the world - but could we
perhaps follow this up with fixups of the type misuse and then a removal
of the forced type casts from these APIs?

Meanwhile I'm applying your patches to tip:locking/core, unless someone objects.

Thanks,

Ingo

Subject: [tip: locking/core] locking/seqlock: Change __seqprop() to return the function pointer

The following commit has been merged into the locking/core branch of tip:

Commit-ID: e6115c6f7a0ce3388cc60b69a284facf78b5dbfd
Gitweb: https://git.kernel.org/tip/e6115c6f7a0ce3388cc60b69a284facf78b5dbfd
Author: Oleg Nesterov <[email protected]>
AuthorDate: Thu, 12 Oct 2023 16:32:27 +02:00
Committer: Ingo Molnar <[email protected]>
CommitterDate: Thu, 12 Oct 2023 20:18:21 +02:00

locking/seqlock: Change __seqprop() to return the function pointer

This simplifies the macro and makes it easy to add the new seqprop's
with 2 or more args.

Plus this way we do not lose the type info, the (void*) type cast is
no longer needed.

And the latter reveals the problem: a lot of seqcount_t helpers pass
the "const seqcount_t *s" argument to __seqprop_ptr(seqcount_t *s)
but (before this patch) "(void *)(s)" masked the problem.

So this patch changes __seqprop_ptr() and __seqprop_##lockname##_ptr()
to accept the "const LOCKNAME *s" argument. This is not nice either,
they need to drop the constness on return because these helpers are used
by both the readers and writers, but at least it is clear what's going on.

Signed-off-by: Oleg Nesterov <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Waiman Long <[email protected]>
Cc: Will Deacon <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Paul E. McKenney <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
---
include/linux/seqlock.h | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 7e7109d..4b8dcd3 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -200,9 +200,9 @@ typedef struct seqcount_##lockname { \
} seqcount_##lockname##_t; \
\
static __always_inline seqcount_t * \
-__seqprop_##lockname##_ptr(seqcount_##lockname##_t *s) \
+__seqprop_##lockname##_ptr(const seqcount_##lockname##_t *s) \
{ \
- return &s->seqcount; \
+ return (void *)&s->seqcount; /* drop const */ \
} \
\
static __always_inline unsigned \
@@ -247,9 +247,9 @@ __seqprop_##lockname##_assert(const seqcount_##lockname##_t *s) \
* __seqprop() for seqcount_t
*/

-static inline seqcount_t *__seqprop_ptr(seqcount_t *s)
+static inline seqcount_t *__seqprop_ptr(const seqcount_t *s)
{
- return s;
+ return (void *)s; /* drop const */
}

static inline unsigned __seqprop_sequence(const seqcount_t *s)
@@ -292,19 +292,19 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
#define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)

#define __seqprop_case(s, lockname, prop) \
- seqcount_##lockname##_t: __seqprop_##lockname##_##prop((void *)(s))
+ seqcount_##lockname##_t: __seqprop_##lockname##_##prop

#define __seqprop(s, prop) _Generic(*(s), \
- seqcount_t: __seqprop_##prop((void *)(s)), \
+ seqcount_t: __seqprop_##prop, \
__seqprop_case((s), raw_spinlock, prop), \
__seqprop_case((s), spinlock, prop), \
__seqprop_case((s), rwlock, prop), \
__seqprop_case((s), mutex, prop))

-#define seqprop_ptr(s) __seqprop(s, ptr)
-#define seqprop_sequence(s) __seqprop(s, sequence)
-#define seqprop_preemptible(s) __seqprop(s, preemptible)
-#define seqprop_assert(s) __seqprop(s, assert)
+#define seqprop_ptr(s) __seqprop(s, ptr)(s)
+#define seqprop_sequence(s) __seqprop(s, sequence)(s)
+#define seqprop_preemptible(s) __seqprop(s, preemptible)(s)
+#define seqprop_assert(s) __seqprop(s, assert)(s)

/**
* __read_seqcount_begin() - begin a seqcount_t read section w/o barrier

Subject: [tip: locking/core] locking/seqlock: Simplify SEQCOUNT_LOCKNAME()

The following commit has been merged into the locking/core branch of tip:

Commit-ID: f995443f01b4dbcce723539b99050ce69b319e58
Gitweb: https://git.kernel.org/tip/f995443f01b4dbcce723539b99050ce69b319e58
Author: Oleg Nesterov <[email protected]>
AuthorDate: Thu, 12 Oct 2023 16:31:58 +02:00
Committer: Ingo Molnar <[email protected]>
CommitterDate: Thu, 12 Oct 2023 20:18:20 +02:00

locking/seqlock: Simplify SEQCOUNT_LOCKNAME()

1. Kill the "lockmember" argument. It is always s->lock plus
__seqprop_##lockname##_sequence() already uses s->lock and
ignores "lockmember".

2. Kill the "lock_acquire" argument. __seqprop_##lockname##_sequence()
can use the same "lockbase" prefix for _lock and _unlock.

Apart from line numbers, gcc -E outputs the same code.

Signed-off-by: Oleg Nesterov <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Waiman Long <[email protected]>
Cc: Will Deacon <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Paul E. McKenney <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
---
include/linux/seqlock.h | 18 ++++++++----------
1 file changed, 8 insertions(+), 10 deletions(-)

diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index af518e4..7e7109d 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -191,11 +191,9 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
* @lockname: "LOCKNAME" part of seqcount_LOCKNAME_t
* @locktype: LOCKNAME canonical C data type
* @preemptible: preemptibility of above locktype
- * @lockmember: argument for lockdep_assert_held()
- * @lockbase: associated lock release function (prefix only)
- * @lock_acquire: associated lock acquisition function (full call)
+ * @lockbase: prefix for associated lock/unlock
*/
-#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockmember, lockbase, lock_acquire) \
+#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockbase) \
typedef struct seqcount_##lockname { \
seqcount_t seqcount; \
__SEQ_LOCK(locktype *lock); \
@@ -216,7 +214,7 @@ __seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \
return seq; \
\
if (preemptible && unlikely(seq & 1)) { \
- __SEQ_LOCK(lock_acquire); \
+ __SEQ_LOCK(lockbase##_lock(s->lock)); \
__SEQ_LOCK(lockbase##_unlock(s->lock)); \
\
/* \
@@ -242,7 +240,7 @@ __seqprop_##lockname##_preemptible(const seqcount_##lockname##_t *s) \
static __always_inline void \
__seqprop_##lockname##_assert(const seqcount_##lockname##_t *s) \
{ \
- __SEQ_LOCK(lockdep_assert_held(lockmember)); \
+ __SEQ_LOCK(lockdep_assert_held(s->lock)); \
}

/*
@@ -271,10 +269,10 @@ static inline void __seqprop_assert(const seqcount_t *s)

#define __SEQ_RT IS_ENABLED(CONFIG_PREEMPT_RT)

-SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, s->lock, raw_spin, raw_spin_lock(s->lock))
-SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, s->lock, spin, spin_lock(s->lock))
-SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, s->lock, read, read_lock(s->lock))
-SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock, mutex, mutex_lock(s->lock))
+SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, raw_spin)
+SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, spin)
+SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, read)
+SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)

/*
* SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t

2023-10-12 19:25:11

by Linus Torvalds

[permalink] [raw]
Subject: Re: [PATCH v2 2/2] seqlock: change __seqprop() to return the function pointer

On Thu, 12 Oct 2023 at 11:21, Ingo Molnar <[email protected]> wrote:
>
> Okay, so dropping 'const' makes sense in terms of staying bug-compatible
> with the previous API and not build-breaking the world - but could we
> perhaps follow this up with fixups of the type misuse and then a removal
> of the forced type casts from these APIs?

No. The use of 'const' here is *not* a bug.

The thing is, 'const' doesn't mean what you seem to think it means. A
'const' pointer in C in no way means that the target is constant - it
means that *THIS* use of the pointer will not write to the target!

Those may sound similar, but they are very very very different.

In particular, for the sequence

seq = raw_seqcount_begin(seq_ptr);
...
if (read_seqcount_retry(seq_ptr, seq))
goto retry;

then 'seq_ptr' really *is* a 'const' pointer. The reader very much
does not write to it, and this very much is part of the fundamental
design.

The above is *literally* what sequence locking is all about: readers
are pure readers.

So no, making it a 'const seqptr_t' is absolutely not a bug. It's very
much a FUNDAMENTAL FEATURE of sequence locks.

Now, I'm not sure how much we actually take advantage of this and
there may not be very many cases of this all, but I really think this
is fundamental to the whole data structure, and there are most
definitely cases where we probably *should* take more advantage of the
fact that a read_seqcount is a read-only op.

For example, I think a function like 'dget_parent()' should actually
take a 'const struct dentry *dentry' as its argument, and the seqcount
is embedded inside that dentry and as such would also be const.

Right now the dentry code doesn't actually do that, because this isn't
one of the areas we have constified, but it's conceptually the right
thing to do. We use the dentry argument in a read-only manner
(although the *parent* that we look up then is written to, and in the
case of a root dentry, the parent may end up being the same as the
original).

Note that the 'const' should only be an issue for the begin/retry
cases, and obviously not for the write ones, but those readers do use
the seqprop_ptr() helper. So those absolutely need to handle the const
case.

So no, the cast wasn't "masking" anything at all. The 'const' is real.

Linus

2023-10-13 08:47:08

by Ingo Molnar

[permalink] [raw]
Subject: [PATCH] locking/seqlock: Propagate 'const' pointers within read-only methods, remove forced type casts


* Linus Torvalds <[email protected]> wrote:

> On Thu, 12 Oct 2023 at 11:21, Ingo Molnar <[email protected]> wrote:
> >
> > Okay, so dropping 'const' makes sense in terms of staying bug-compatible
> > with the previous API and not build-breaking the world - but could we
> > perhaps follow this up with fixups of the type misuse and then a removal
> > of the forced type casts from these APIs?
>
> No. The use of 'const' here is *not* a bug.
>
> The thing is, 'const' doesn't mean what you seem to think it means. A
> 'const' pointer in C in no way means that the target is constant - it
> means that *THIS* use of the pointer will not write to the target!

Yeah, that is absolutely what I too think 'const' means - and sorry, I
didn't expand: I meant something like the patch below, which clearly
separates the 'const' from the non-const pointer uses within
<linux/seqlock.h> and removes the two forced type casts I was unhappy
about.

The 'bug' was that the __seqprop_*ptr() wrapper was used with both const
and non-const pointers, and we forced a type and lost a tiny bit of
potential const propagation. The code was fine and I should not have called
it a 'bug', but I consider the dropping of 'const' a bad pattern, and I
sometimes exaggerate problems to trick^W convince developers to continue
working along a given path...

In hindsight my "break the world" expectation was overblown too: our const
propagation through these methods was almost complete already, and the
fixes stayed within <linux/seqlock.h>.

This patch could probably be split into two patches. Lightly tested only.

Does this work for you?

Thanks,

Ingo

===================>
From: Ingo Molnar <[email protected]>
Date: Fri, 13 Oct 2023 10:15:46 +0200
Subject: [PATCH] locking/seqlock: Propagate 'const' pointers within read-only methods, remove forced type casts

Currently __seqprop_ptr() is an inline function that must chose to either
use 'const' or non-const seqcount related pointers - but this results in
the undesirable loss of 'const' propagation, via a forced type cast.

The easiest solution would be to turn the pointer wrappers into macros that
pass through whatever type is passed to them - but the clever maze of
seqlock API instantiation macros relies on the GCC CPP '##' macro
extension, which isn't recursive, so inline functions must be used here.

So create two wrapper variants instead: 'ptr' and 'const_ptr', and pick the
right one for the codepaths that are const: read_seqcount_begin() and
read_seqcount_retry().

This cleans up type handling and allows the removal of all type forcing.

No change in functionality.

Signed-off-by: Ingo Molnar <[email protected]>
Cc: Oleg Nesterov <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Waiman Long <[email protected]>
Cc: Will Deacon <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Paul E. McKenney <[email protected]>
---
include/linux/seqlock.h | 26 +++++++++++++++++++-------
1 file changed, 19 insertions(+), 7 deletions(-)

diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 4b8dcd3a0d93..80f21d2ca2aa 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -200,9 +200,15 @@ typedef struct seqcount_##lockname { \
} seqcount_##lockname##_t; \
\
static __always_inline seqcount_t * \
-__seqprop_##lockname##_ptr(const seqcount_##lockname##_t *s) \
+__seqprop_##lockname##_ptr(seqcount_##lockname##_t *s) \
{ \
- return (void *)&s->seqcount; /* drop const */ \
+ return &s->seqcount; \
+} \
+ \
+static __always_inline const seqcount_t * \
+__seqprop_##lockname##_const_ptr(const seqcount_##lockname##_t *s) \
+{ \
+ return &s->seqcount; \
} \
\
static __always_inline unsigned \
@@ -247,9 +253,14 @@ __seqprop_##lockname##_assert(const seqcount_##lockname##_t *s) \
* __seqprop() for seqcount_t
*/

-static inline seqcount_t *__seqprop_ptr(const seqcount_t *s)
+static inline seqcount_t *__seqprop_ptr(seqcount_t *s)
{
- return (void *)s; /* drop const */
+ return s;
+}
+
+static inline const seqcount_t *__seqprop_const_ptr(const seqcount_t *s)
+{
+ return s;
}

static inline unsigned __seqprop_sequence(const seqcount_t *s)
@@ -302,6 +313,7 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
__seqprop_case((s), mutex, prop))

#define seqprop_ptr(s) __seqprop(s, ptr)(s)
+#define seqprop_const_ptr(s) __seqprop(s, const_ptr)(s)
#define seqprop_sequence(s) __seqprop(s, sequence)(s)
#define seqprop_preemptible(s) __seqprop(s, preemptible)(s)
#define seqprop_assert(s) __seqprop(s, assert)(s)
@@ -353,7 +365,7 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
*/
#define read_seqcount_begin(s) \
({ \
- seqcount_lockdep_reader_access(seqprop_ptr(s)); \
+ seqcount_lockdep_reader_access(seqprop_const_ptr(s)); \
raw_read_seqcount_begin(s); \
})

@@ -419,7 +431,7 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
* Return: true if a read section retry is required, else false
*/
#define __read_seqcount_retry(s, start) \
- do___read_seqcount_retry(seqprop_ptr(s), start)
+ do___read_seqcount_retry(seqprop_const_ptr(s), start)

static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start)
{
@@ -439,7 +451,7 @@ static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start)
* Return: true if a read section retry is required, else false
*/
#define read_seqcount_retry(s, start) \
- do_read_seqcount_retry(seqprop_ptr(s), start)
+ do_read_seqcount_retry(seqprop_const_ptr(s), start)

static inline int do_read_seqcount_retry(const seqcount_t *s, unsigned start)
{

2023-10-13 16:26:49

by Oleg Nesterov

[permalink] [raw]
Subject: Re: [PATCH] locking/seqlock: Propagate 'const' pointers within read-only methods, remove forced type casts

On 10/13, Ingo Molnar wrote:
>
> So create two wrapper variants instead: 'ptr' and 'const_ptr', and pick the
> right one for the codepaths that are const: read_seqcount_begin() and
> read_seqcount_retry().
>
> This cleans up type handling and allows the removal of all type forcing.

Too late, but nevertheless

Reviewed-by: Oleg Nesterov <[email protected]>