2019-10-16 11:34:18

by Alex Kogan

[permalink] [raw]
Subject: [PATCH v5 4/5] locking/qspinlock: Introduce starvation avoidance into CNA

Keep track of the number of intra-node lock handoffs, and force
inter-node handoff once this number reaches a preset threshold.

Signed-off-by: Alex Kogan <[email protected]>
Reviewed-by: Steve Sistare <[email protected]>
---
kernel/locking/qspinlock.c | 3 +++
kernel/locking/qspinlock_cna.h | 30 +++++++++++++++++++++++++++---
2 files changed, 30 insertions(+), 3 deletions(-)

diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 6d8c4a52e44e..1d0d884308ef 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -597,6 +597,9 @@ EXPORT_SYMBOL(queued_spin_lock_slowpath);
#if !defined(_GEN_CNA_LOCK_SLOWPATH) && defined(CONFIG_NUMA_AWARE_SPINLOCKS)
#define _GEN_CNA_LOCK_SLOWPATH

+#undef pv_init_node
+#define pv_init_node cna_init_node
+
#undef pv_wait_head_or_lock
#define pv_wait_head_or_lock cna_pre_scan

diff --git a/kernel/locking/qspinlock_cna.h b/kernel/locking/qspinlock_cna.h
index 4d095f742d31..b92a6f9a19db 100644
--- a/kernel/locking/qspinlock_cna.h
+++ b/kernel/locking/qspinlock_cna.h
@@ -50,9 +50,19 @@ struct cna_node {
struct mcs_spinlock mcs;
int numa_node;
u32 encoded_tail;
- u32 pre_scan_result; /* 0 or an encoded tail */
+ u32 pre_scan_result; /* 0, 1 or an encoded tail */
+ u32 intra_count;
};

+/*
+ * Controls the threshold for the number of intra-node lock hand-offs. It can
+ * be tuned and depend, e.g., on the number of CPUs per node. For now,
+ * choose a value that provides reasonable long-term fairness without
+ * sacrificing performance compared to a version that does not have any
+ * fairness guarantees.
+ */
+#define INTRA_NODE_HANDOFF_THRESHOLD (1 << 16)
+
static void __init cna_init_nodes_per_cpu(unsigned int cpu)
{
struct mcs_spinlock *base = per_cpu_ptr(&qnodes[0].mcs, cpu);
@@ -86,6 +96,11 @@ static void __init cna_init_nodes(void)
}
early_initcall(cna_init_nodes);

+static __always_inline void cna_init_node(struct mcs_spinlock *node)
+{
+ ((struct cna_node *)node)->intra_count = 0;
+}
+
static inline bool cna_try_change_tail(struct qspinlock *lock, u32 val,
struct mcs_spinlock *node)
{
@@ -215,7 +230,13 @@ __always_inline u32 cna_pre_scan(struct qspinlock *lock,
{
struct cna_node *cn = (struct cna_node *)node;

- cn->pre_scan_result = cna_scan_main_queue(node, node);
+ /*
+ * setting @pre_scan_result to 1 indicates that no post-scan
+ * should be made in cna_pass_lock()
+ */
+ cn->pre_scan_result =
+ cn->intra_count == INTRA_NODE_HANDOFF_THRESHOLD ?
+ 1 : cna_scan_main_queue(node, node);

return 0;
}
@@ -234,7 +255,7 @@ static inline void cna_pass_lock(struct mcs_spinlock *node,
* pre-scan, and if so, try to find it in post-scan starting from the
* node where pre-scan stopped (stored in @pre_scan_result)
*/
- if (scan > 0)
+ if (scan > 1)
scan = cna_scan_main_queue(node, decode_tail(scan));

if (!scan) { /* if found a successor from the same numa node */
@@ -245,6 +266,9 @@ static inline void cna_pass_lock(struct mcs_spinlock *node,
* to pass the lock
*/
val = node->locked + (node->locked == 0);
+ /* inc @intra_count if the secondary queue is not empty */
+ ((struct cna_node *)next_holder)->intra_count =
+ cn->intra_count + (node->locked > 1);
} else if (node->locked > 1) { /* if secondary queue is not empty */
/* next holder will be the first node in the secondary queue */
tail_2nd = decode_tail(node->locked);
--
2.11.0 (Apple Git-81)


2019-10-19 08:41:21

by Waiman Long

[permalink] [raw]
Subject: Re: [PATCH v5 4/5] locking/qspinlock: Introduce starvation avoidance into CNA

On 10/16/19 12:29 AM, Alex Kogan wrote:
> Keep track of the number of intra-node lock handoffs, and force
> inter-node handoff once this number reaches a preset threshold.
>
> Signed-off-by: Alex Kogan <[email protected]>
> Reviewed-by: Steve Sistare <[email protected]>
> ---
> kernel/locking/qspinlock.c | 3 +++
> kernel/locking/qspinlock_cna.h | 30 +++++++++++++++++++++++++++---
> 2 files changed, 30 insertions(+), 3 deletions(-)
>
> diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
> index 6d8c4a52e44e..1d0d884308ef 100644
> --- a/kernel/locking/qspinlock.c
> +++ b/kernel/locking/qspinlock.c
> @@ -597,6 +597,9 @@ EXPORT_SYMBOL(queued_spin_lock_slowpath);
> #if !defined(_GEN_CNA_LOCK_SLOWPATH) && defined(CONFIG_NUMA_AWARE_SPINLOCKS)
> #define _GEN_CNA_LOCK_SLOWPATH
>
> +#undef pv_init_node
> +#define pv_init_node cna_init_node
> +
> #undef pv_wait_head_or_lock
> #define pv_wait_head_or_lock cna_pre_scan
>
> diff --git a/kernel/locking/qspinlock_cna.h b/kernel/locking/qspinlock_cna.h
> index 4d095f742d31..b92a6f9a19db 100644
> --- a/kernel/locking/qspinlock_cna.h
> +++ b/kernel/locking/qspinlock_cna.h
> @@ -50,9 +50,19 @@ struct cna_node {
> struct mcs_spinlock mcs;
> int numa_node;
> u32 encoded_tail;
> - u32 pre_scan_result; /* 0 or an encoded tail */
> + u32 pre_scan_result; /* 0, 1 or an encoded tail */
> + u32 intra_count;
> };
>
> +/*
> + * Controls the threshold for the number of intra-node lock hand-offs. It can
> + * be tuned and depend, e.g., on the number of CPUs per node. For now,
> + * choose a value that provides reasonable long-term fairness without
> + * sacrificing performance compared to a version that does not have any
> + * fairness guarantees.
> + */
> +#define INTRA_NODE_HANDOFF_THRESHOLD (1 << 16)

I think 64k is too high. I will be more comfortable with a number like
(1 << 8). The worst case latency for a lock waiter from the other node
is just not acceptable.

Cheers,
Longman