2014-07-11 21:00:17

by Davidlohr Bueso

[permalink] [raw]
Subject: [PATCH peterz-queue:locking/core 1/2] locking/rwsem: Fix building with opt spinning and new osq_lock header

Commit 'rwsem: Reduce the size of struct rw_semaphore' broke all
DECLARE_RWSEM users, ie:

init/init_task.c:14:44: error: ‘OSQ_UNLOCKED_VA’ undeclared here (not in a function)

Signed-off-by: Davidlohr Bueso <[email protected]>
---
include/linux/rwsem.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 0eff99c..5d40884 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -64,7 +64,7 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
#endif

#if defined(CONFIG_SMP) && !defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
-#define __RWSEM_OPT_INIT(lockname) , .owner = NULL, .osq = { ATOMIC_INIT(OQS_UNLOCKED_VAL) }
+#define __RWSEM_OPT_INIT(lockname) , .owner = NULL, .osq = { ATOMIC_INIT(OSQ_UNLOCKED_VAL) }
#else
#define __RWSEM_OPT_INIT(lockname)
#endif
--
1.8.1.4


2014-07-11 21:00:19

by Davidlohr Bueso

[permalink] [raw]
Subject: [PATCH peterz-queue:locking/core 2/2] locking/rwsem: Add CONFIG_RWSEM_SPIN_ON_OWNER

Just like with mutexes (CONFIG_MUTEX_SPIN_ON_OWNER),
encapsulate the dependencies for rwsem optimistic spinning.
No logical changes here as it continues to depend on both
SMP and the XADD algorithm variant.

Acked-by: Jason Low <[email protected]>
Signed-off-by: Davidlohr Bueso <[email protected]>
---
Based on previous discussion (https://lkml.org/lkml/2014/6/6/461):
rebased & included ifdefs around #include <linux/osq_lock.h>

include/linux/rwsem.h | 6 ++++--
kernel/Kconfig.locks | 4 ++++
kernel/locking/rwsem-xadd.c | 4 ++--
kernel/locking/rwsem.c | 2 +-
4 files changed, 11 insertions(+), 5 deletions(-)

diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 5d40884..03dbec7 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -14,7 +14,9 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/atomic.h>
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
#include <linux/osq_lock.h>
+#endif

struct rw_semaphore;

@@ -26,7 +28,7 @@ struct rw_semaphore {
long count;
struct list_head wait_list;
raw_spinlock_t wait_lock;
-#ifdef CONFIG_SMP
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
struct optimistic_spin_queue osq; /* spinner MCS lock */
/*
* Write owner. Used as a speculative check to see
@@ -63,7 +65,7 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
# define __RWSEM_DEP_MAP_INIT(lockname)
#endif

-#if defined(CONFIG_SMP) && !defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
#define __RWSEM_OPT_INIT(lockname) , .owner = NULL, .osq = { ATOMIC_INIT(OSQ_UNLOCKED_VAL) }
#else
#define __RWSEM_OPT_INIT(lockname)
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
index 35536d9..e4c3162 100644
--- a/kernel/Kconfig.locks
+++ b/kernel/Kconfig.locks
@@ -224,6 +224,10 @@ config MUTEX_SPIN_ON_OWNER
def_bool y
depends on SMP && !DEBUG_MUTEXES

+config RWSEM_SPIN_ON_OWNER
+ def_bool y
+ depends on SMP && RWSEM_XCHGADD_ALGORITHM
+
config ARCH_USE_QUEUE_RWLOCK
bool

diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index a673bba..b3d0e9f 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -82,7 +82,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
sem->count = RWSEM_UNLOCKED_VALUE;
raw_spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
-#ifdef CONFIG_SMP
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
sem->owner = NULL;
atomic_set(&sem->osq.tail, OSQ_UNLOCKED_VAL);
#endif
@@ -262,7 +262,7 @@ static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
return false;
}

-#ifdef CONFIG_SMP
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
/*
* Try to acquire write lock before the writer has been put on wait queue.
*/
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index 42f806d..e2d3bc7 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -12,7 +12,7 @@

#include <linux/atomic.h>

-#if defined(CONFIG_SMP) && defined(CONFIG_RWSEM_XCHGADD_ALGORITHM)
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
static inline void rwsem_set_owner(struct rw_semaphore *sem)
{
sem->owner = current;
--
1.8.1.4

2014-07-11 21:13:38

by Jason Low

[permalink] [raw]
Subject: Re: [PATCH peterz-queue:locking/core 1/2] locking/rwsem: Fix building with opt spinning and new osq_lock header

On Fri, 2014-07-11 at 14:00 -0700, Davidlohr Bueso wrote:
> Commit 'rwsem: Reduce the size of struct rw_semaphore' broke all
> DECLARE_RWSEM users, ie:
>
> init/init_task.c:14:44: error: ‘OSQ_UNLOCKED_VA’ undeclared here (not in a function)
>
> Signed-off-by: Davidlohr Bueso <[email protected]>
> ---
> include/linux/rwsem.h | 2 +-
> 1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
> index 0eff99c..5d40884 100644
> --- a/include/linux/rwsem.h
> +++ b/include/linux/rwsem.h
> @@ -64,7 +64,7 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
> #endif
>
> #if defined(CONFIG_SMP) && !defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
> -#define __RWSEM_OPT_INIT(lockname) , .owner = NULL, .osq = { ATOMIC_INIT(OQS_UNLOCKED_VAL) }
> +#define __RWSEM_OPT_INIT(lockname) , .owner = NULL, .osq = { ATOMIC_INIT(OSQ_UNLOCKED_VAL) }

Hi David, I'll be sending out a v2 patchset which address Steven's
feedback (which includes using a macro instead of directly initializing
the osq field).

Thanks!

Subject: [tip:locking/urgent] locking/rwsem: Add CONFIG_RWSEM_SPIN_ON_OWNER

Commit-ID: 5db6c6fefb1ca0e81e3bd6dd8998bf51c453d823
Gitweb: http://git.kernel.org/tip/5db6c6fefb1ca0e81e3bd6dd8998bf51c453d823
Author: Davidlohr Bueso <[email protected]>
AuthorDate: Fri, 11 Jul 2014 14:00:06 -0700
Committer: Ingo Molnar <[email protected]>
CommitDate: Wed, 16 Jul 2014 14:57:13 +0200

locking/rwsem: Add CONFIG_RWSEM_SPIN_ON_OWNER

Just like with mutexes (CONFIG_MUTEX_SPIN_ON_OWNER),
encapsulate the dependencies for rwsem optimistic spinning.
No logical changes here as it continues to depend on both
SMP and the XADD algorithm variant.

Signed-off-by: Davidlohr Bueso <[email protected]>
Acked-by: Jason Low <[email protected]>
[ Also make it depend on ARCH_SUPPORTS_ATOMIC_RMW. ]
Signed-off-by: Peter Zijlstra <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Cc: [email protected]
Cc: Chris Mason <[email protected]>
Cc: Davidlohr Bueso <[email protected]>
Cc: Josef Bacik <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Waiman Long <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>

Signed-off-by: Ingo Molnar <[email protected]>
---
include/linux/rwsem.h | 6 ++++--
kernel/Kconfig.locks | 4 ++++
kernel/locking/rwsem-xadd.c | 4 ++--
kernel/locking/rwsem.c | 2 +-
4 files changed, 11 insertions(+), 5 deletions(-)

diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 716807f..035d3c5 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -14,7 +14,9 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/atomic.h>
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
#include <linux/osq_lock.h>
+#endif

struct rw_semaphore;

@@ -26,7 +28,7 @@ struct rw_semaphore {
long count;
struct list_head wait_list;
raw_spinlock_t wait_lock;
-#ifdef CONFIG_SMP
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
struct optimistic_spin_queue osq; /* spinner MCS lock */
/*
* Write owner. Used as a speculative check to see
@@ -63,7 +65,7 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
# define __RWSEM_DEP_MAP_INIT(lockname)
#endif

-#if defined(CONFIG_SMP) && !defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
#define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED, .owner = NULL
#else
#define __RWSEM_OPT_INIT(lockname)
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
index 8190794..76768ee 100644
--- a/kernel/Kconfig.locks
+++ b/kernel/Kconfig.locks
@@ -227,6 +227,10 @@ config MUTEX_SPIN_ON_OWNER
def_bool y
depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW

+config RWSEM_SPIN_ON_OWNER
+ def_bool y
+ depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW
+
config ARCH_USE_QUEUE_RWLOCK
bool

diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index 7190592..a2391ac 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -82,7 +82,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
sem->count = RWSEM_UNLOCKED_VALUE;
raw_spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
-#ifdef CONFIG_SMP
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
sem->owner = NULL;
osq_lock_init(&sem->osq);
#endif
@@ -262,7 +262,7 @@ static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
return false;
}

-#ifdef CONFIG_SMP
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
/*
* Try to acquire write lock before the writer has been put on wait queue.
*/
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index 42f806d..e2d3bc7 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -12,7 +12,7 @@

#include <linux/atomic.h>

-#if defined(CONFIG_SMP) && defined(CONFIG_RWSEM_XCHGADD_ALGORITHM)
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
static inline void rwsem_set_owner(struct rw_semaphore *sem)
{
sem->owner = current;