2014-06-15 13:16:37

by Peter Zijlstra

[permalink] [raw]
Subject: [PATCH 08/11] qspinlock: Revert to test-and-set on hypervisors

When we detect a hypervisor (!paravirt, see later patches), revert to
a simple test-and-set lock to avoid the horrors of queue preemption.

Signed-off-by: Peter Zijlstra <[email protected]>
---
arch/x86/include/asm/qspinlock.h | 14 ++++++++++++++
include/asm-generic/qspinlock.h | 7 +++++++
kernel/locking/qspinlock.c | 3 +++
3 files changed, 24 insertions(+)

--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlock.h
@@ -1,6 +1,7 @@
#ifndef _ASM_X86_QSPINLOCK_H
#define _ASM_X86_QSPINLOCK_H

+#include <asm/cpufeature.h>
#include <asm-generic/qspinlock_types.h>

#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
@@ -20,6 +21,19 @@ static inline void queue_spin_unlock(str

#endif /* !CONFIG_X86_OOSTORE && !CONFIG_X86_PPRO_FENCE */

+#define virt_queue_spin_lock virt_queue_spin_lock
+
+static inline bool virt_queue_spin_lock(struct qspinlock *lock)
+{
+ if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
+ return false;
+
+ while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0)
+ cpu_relax();
+
+ return true;
+}
+
#include <asm-generic/qspinlock.h>

#endif /* _ASM_X86_QSPINLOCK_H */
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -98,6 +98,13 @@ static __always_inline void queue_spin_u
}
#endif

+#ifndef virt_queue_spin_lock
+static __always_inline bool virt_queue_spin_lock(struct qspinlock *lock)
+{
+ return false;
+}
+#endif
+
/*
* Initializier
*/
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -247,6 +247,9 @@ void queue_spin_lock_slowpath(struct qsp

BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));

+ if (virt_queue_spin_lock(lock))
+ return;
+
/*
* wait for in-progress pending->locked hand-overs
*


2014-06-16 21:57:22

by Waiman Long

[permalink] [raw]
Subject: Re: [PATCH 08/11] qspinlock: Revert to test-and-set on hypervisors

On 06/15/2014 08:47 AM, Peter Zijlstra wrote:
> When we detect a hypervisor (!paravirt, see later patches), revert to
> a simple test-and-set lock to avoid the horrors of queue preemption.
>
> Signed-off-by: Peter Zijlstra<[email protected]>
> ---
> arch/x86/include/asm/qspinlock.h | 14 ++++++++++++++
> include/asm-generic/qspinlock.h | 7 +++++++
> kernel/locking/qspinlock.c | 3 +++
> 3 files changed, 24 insertions(+)
>
> --- a/arch/x86/include/asm/qspinlock.h
> +++ b/arch/x86/include/asm/qspinlock.h
> @@ -1,6 +1,7 @@
> #ifndef _ASM_X86_QSPINLOCK_H
> #define _ASM_X86_QSPINLOCK_H
>
> +#include<asm/cpufeature.h>
> #include<asm-generic/qspinlock_types.h>
>
> #if !defined(CONFIG_X86_OOSTORE)&& !defined(CONFIG_X86_PPRO_FENCE)
> @@ -20,6 +21,19 @@ static inline void queue_spin_unlock(str
>
> #endif /* !CONFIG_X86_OOSTORE&& !CONFIG_X86_PPRO_FENCE */
>
> +#define virt_queue_spin_lock virt_queue_spin_lock
> +
> +static inline bool virt_queue_spin_lock(struct qspinlock *lock)
> +{
> + if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
> + return false;
> +
> + while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0)
> + cpu_relax();
> +
> + return true;
> +}
> +
> #include<asm-generic/qspinlock.h>
>
> #endif /* _ASM_X86_QSPINLOCK_H */
> --- a/include/asm-generic/qspinlock.h
> +++ b/include/asm-generic/qspinlock.h
> @@ -98,6 +98,13 @@ static __always_inline void queue_spin_u
> }
> #endif
>
> +#ifndef virt_queue_spin_lock
> +static __always_inline bool virt_queue_spin_lock(struct qspinlock *lock)
> +{
> + return false;
> +}
> +#endif
> +
> /*
> * Initializier
> */
> --- a/kernel/locking/qspinlock.c
> +++ b/kernel/locking/qspinlock.c
> @@ -247,6 +247,9 @@ void queue_spin_lock_slowpath(struct qsp
>
> BUILD_BUG_ON(CONFIG_NR_CPUS>= (1U<< _Q_TAIL_CPU_BITS));
>
> + if (virt_queue_spin_lock(lock))
> + return;
> +
> /*
> * wait for in-progress pending->locked hand-overs
> *

I just wonder if it is better to allow the kernel distributors to decide
if unfair lock should be the default for virtual guest. Anyway, I have
no objection to that myself.

-Longman

2014-06-18 19:29:03

by Konrad Rzeszutek Wilk

[permalink] [raw]
Subject: Re: [PATCH 08/11] qspinlock: Revert to test-and-set on hypervisors

On Sun, Jun 15, 2014 at 02:47:05PM +0200, Peter Zijlstra wrote:
> When we detect a hypervisor (!paravirt, see later patches), revert to

Please spell out the name of the patches.

> a simple test-and-set lock to avoid the horrors of queue preemption.

Heheh.
>
> Signed-off-by: Peter Zijlstra <[email protected]>
> ---
> arch/x86/include/asm/qspinlock.h | 14 ++++++++++++++
> include/asm-generic/qspinlock.h | 7 +++++++
> kernel/locking/qspinlock.c | 3 +++
> 3 files changed, 24 insertions(+)
>
> --- a/arch/x86/include/asm/qspinlock.h
> +++ b/arch/x86/include/asm/qspinlock.h
> @@ -1,6 +1,7 @@
> #ifndef _ASM_X86_QSPINLOCK_H
> #define _ASM_X86_QSPINLOCK_H
>
> +#include <asm/cpufeature.h>
> #include <asm-generic/qspinlock_types.h>
>
> #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
> @@ -20,6 +21,19 @@ static inline void queue_spin_unlock(str
>
> #endif /* !CONFIG_X86_OOSTORE && !CONFIG_X86_PPRO_FENCE */
>
> +#define virt_queue_spin_lock virt_queue_spin_lock
> +
> +static inline bool virt_queue_spin_lock(struct qspinlock *lock)
> +{
> + if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
> + return false;
> +
> + while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0)
> + cpu_relax();
> +
> + return true;
> +}
> +
> #include <asm-generic/qspinlock.h>
>
> #endif /* _ASM_X86_QSPINLOCK_H */
> --- a/include/asm-generic/qspinlock.h
> +++ b/include/asm-generic/qspinlock.h
> @@ -98,6 +98,13 @@ static __always_inline void queue_spin_u
> }
> #endif
>
> +#ifndef virt_queue_spin_lock
> +static __always_inline bool virt_queue_spin_lock(struct qspinlock *lock)
> +{
> + return false;
> +}
> +#endif
> +
> /*
> * Initializier
> */
> --- a/kernel/locking/qspinlock.c
> +++ b/kernel/locking/qspinlock.c
> @@ -247,6 +247,9 @@ void queue_spin_lock_slowpath(struct qsp
>
> BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
>
> + if (virt_queue_spin_lock(lock))
> + return;
> +
> /*
> * wait for in-progress pending->locked hand-overs
> *
>
>