2023-10-12 15:53:50

by Liam R. Howlett

[permalink] [raw]
Subject: [PATCH v2] maple_tree: Add GFP_KERNEL to allocations in mas_expected_entries()

Users complained about OOM errors during fork without triggering
compaction. This can be fixed by modifying the flags used in
mas_expected_entries() so that the compaction will be triggered in low
memory situations. Since mas_expected_entries() is only used during
fork, the extra argument does not need to be passed through.

Additionally, the two test_maple_tree test cases and one benchmark test
were altered to use the correct locking type so that allocations would
not trigger sleeping and thus fail. Testing was completed with lockdep
atomic sleep detection.

The additional locking change requires rwsem support additions to the
tools/ directory through the use of pthreads pthread_rwlock_t. With
this change test_maple_tree works in userspace, as a module, and
in-kernel.

Users may notice that the system gave up early on attempting to start
new processes instead of attempting to reclaim memory.

Link: https://lkml.kernel.org/r/20230915093243epcms1p46fa00bbac1ab7b7dca94acb66c44c456@epcms1p4
Fixes: 54a611b60590 ("Maple Tree: add new data structure")
Cc: <[email protected]>
Cc: [email protected]
Cc: Peng Zhang <[email protected]>
Signed-off-by: Liam R. Howlett <[email protected]>
---

v1: https://lore.kernel.org/linux-mm/[email protected]/

Changes in v2:
- Updated benchmarking to use the rw_semaphore lock (not strictly necessary)
- Updated check_forking() to use the rw_semaphore lock.
- Testing now completed with lockdep atomic sleep checking.

lib/maple_tree.c | 2 +-
lib/test_maple_tree.c | 35 ++++++++++++++++++++++----------
tools/include/linux/rwsem.h | 40 +++++++++++++++++++++++++++++++++++++
3 files changed, 65 insertions(+), 12 deletions(-)
create mode 100644 tools/include/linux/rwsem.h

diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 0e00a84e8e8f..bb24d84a4922 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -5627,7 +5627,7 @@ int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
/* Internal nodes */
nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
/* Add working room for split (2 nodes) + new parents */
- mas_node_count(mas, nr_nodes + 3);
+ mas_node_count_gfp(mas, nr_nodes + 3, GFP_KERNEL);

/* Detect if allocations run out */
mas->mas_flags |= MA_STATE_PREALLOC;
diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
index 06959165e2f9..464eeb90d5ad 100644
--- a/lib/test_maple_tree.c
+++ b/lib/test_maple_tree.c
@@ -9,6 +9,7 @@

#include <linux/maple_tree.h>
#include <linux/module.h>
+#include <linux/rwsem.h>

#define MTREE_ALLOC_MAX 0x2000000000000Ul
#define CONFIG_MAPLE_SEARCH
@@ -1841,17 +1842,21 @@ static noinline void __init check_forking(struct maple_tree *mt)
void *val;
MA_STATE(mas, mt, 0, 0);
MA_STATE(newmas, mt, 0, 0);
+ struct rw_semaphore newmt_lock;
+
+ init_rwsem(&newmt_lock);

for (i = 0; i <= nr_entries; i++)
mtree_store_range(mt, i*10, i*10 + 5,
xa_mk_value(i), GFP_KERNEL);

mt_set_non_kernel(99999);
- mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
+ mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
+ mt_set_external_lock(&newmt, &newmt_lock);
newmas.tree = &newmt;
mas_reset(&newmas);
mas_reset(&mas);
- mas_lock(&newmas);
+ down_write(&newmt_lock);
mas.index = 0;
mas.last = 0;
if (mas_expected_entries(&newmas, nr_entries)) {
@@ -1866,10 +1871,10 @@ static noinline void __init check_forking(struct maple_tree *mt)
}
rcu_read_unlock();
mas_destroy(&newmas);
- mas_unlock(&newmas);
mt_validate(&newmt);
mt_set_non_kernel(0);
- mtree_destroy(&newmt);
+ __mt_destroy(&newmt);
+ up_write(&newmt_lock);
}

static noinline void __init check_iteration(struct maple_tree *mt)
@@ -1980,6 +1985,10 @@ static noinline void __init bench_forking(struct maple_tree *mt)
void *val;
MA_STATE(mas, mt, 0, 0);
MA_STATE(newmas, mt, 0, 0);
+ struct rw_semaphore newmt_lock;
+
+ init_rwsem(&newmt_lock);
+ mt_set_external_lock(&newmt, &newmt_lock);

for (i = 0; i <= nr_entries; i++)
mtree_store_range(mt, i*10, i*10 + 5,
@@ -1994,7 +2003,7 @@ static noinline void __init bench_forking(struct maple_tree *mt)
mas.index = 0;
mas.last = 0;
rcu_read_lock();
- mas_lock(&newmas);
+ down_write(&newmt_lock);
if (mas_expected_entries(&newmas, nr_entries)) {
printk("OOM!");
BUG_ON(1);
@@ -2005,11 +2014,11 @@ static noinline void __init bench_forking(struct maple_tree *mt)
mas_store(&newmas, val);
}
mas_destroy(&newmas);
- mas_unlock(&newmas);
rcu_read_unlock();
mt_validate(&newmt);
mt_set_non_kernel(0);
- mtree_destroy(&newmt);
+ __mt_destroy(&newmt);
+ up_write(&newmt_lock);
}
}
#endif
@@ -2616,6 +2625,10 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt,
void *tmp;
MA_STATE(mas, mt, 0, 0);
MA_STATE(newmas, &newmt, 0, 0);
+ struct rw_semaphore newmt_lock;
+
+ init_rwsem(&newmt_lock);
+ mt_set_external_lock(&newmt, &newmt_lock);

if (!zero_start)
i = 1;
@@ -2625,9 +2638,9 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt,
mtree_store_range(mt, i*10, (i+1)*10 - gap,
xa_mk_value(i), GFP_KERNEL);

- mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
+ mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
mt_set_non_kernel(99999);
- mas_lock(&newmas);
+ down_write(&newmt_lock);
ret = mas_expected_entries(&newmas, nr_entries);
mt_set_non_kernel(0);
MT_BUG_ON(mt, ret != 0);
@@ -2640,9 +2653,9 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt,
}
rcu_read_unlock();
mas_destroy(&newmas);
- mas_unlock(&newmas);

- mtree_destroy(&newmt);
+ __mt_destroy(&newmt);
+ up_write(&newmt_lock);
}

/* Duplicate many sizes of trees. Mainly to test expected entry values */
diff --git a/tools/include/linux/rwsem.h b/tools/include/linux/rwsem.h
new file mode 100644
index 000000000000..83971b3cbfce
--- /dev/null
+++ b/tools/include/linux/rwsem.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef _TOOLS__RWSEM_H
+#define _TOOLS__RWSEM_H
+
+#include <pthread.h>
+
+struct rw_semaphore {
+ pthread_rwlock_t lock;
+};
+
+static inline int init_rwsem(struct rw_semaphore *sem)
+{
+ return pthread_rwlock_init(&sem->lock, NULL);
+}
+
+static inline int exit_rwsem(struct rw_semaphore *sem)
+{
+ return pthread_rwlock_destroy(&sem->lock);
+}
+
+static inline int down_read(struct rw_semaphore *sem)
+{
+ return pthread_rwlock_rdlock(&sem->lock);
+}
+
+static inline int up_read(struct rw_semaphore *sem)
+{
+ return pthread_rwlock_unlock(&sem->lock);
+}
+
+static inline int down_write(struct rw_semaphore *sem)
+{
+ return pthread_rwlock_wrlock(&sem->lock);
+}
+
+static inline int up_write(struct rw_semaphore *sem)
+{
+ return pthread_rwlock_unlock(&sem->lock);
+}
+#endif /* _TOOLS_RWSEM_H */
--
2.40.1


2023-10-13 09:27:26

by Peng Zhang

[permalink] [raw]
Subject: Re: [PATCH v2] maple_tree: Add GFP_KERNEL to allocations in mas_expected_entries()



在 2023/10/12 23:52, Liam R. Howlett 写道:
> Users complained about OOM errors during fork without triggering
> compaction. This can be fixed by modifying the flags used in
> mas_expected_entries() so that the compaction will be triggered in low
> memory situations. Since mas_expected_entries() is only used during
> fork, the extra argument does not need to be passed through.
>
> Additionally, the two test_maple_tree test cases and one benchmark test
> were altered to use the correct locking type so that allocations would
> not trigger sleeping and thus fail. Testing was completed with lockdep
> atomic sleep detection.
>
> The additional locking change requires rwsem support additions to the
> tools/ directory through the use of pthreads pthread_rwlock_t. With
> this change test_maple_tree works in userspace, as a module, and
> in-kernel.
>
> Users may notice that the system gave up early on attempting to start
> new processes instead of attempting to reclaim memory.
>
> Link: https://lkml.kernel.org/r/20230915093243epcms1p46fa00bbac1ab7b7dca94acb66c44c456@epcms1p4
> Fixes: 54a611b60590 ("Maple Tree: add new data structure")
> Cc: <[email protected]>
> Cc: [email protected]
> Cc: Peng Zhang <[email protected]>
> Signed-off-by: Liam R. Howlett <[email protected]>
Reviewed-by: Peng Zhang <[email protected]>
> ---
>
> v1: https://lore.kernel.org/linux-mm/[email protected]/
>
> Changes in v2:
> - Updated benchmarking to use the rw_semaphore lock (not strictly necessary)
> - Updated check_forking() to use the rw_semaphore lock.
> - Testing now completed with lockdep atomic sleep checking.
>
> lib/maple_tree.c | 2 +-
> lib/test_maple_tree.c | 35 ++++++++++++++++++++++----------
> tools/include/linux/rwsem.h | 40 +++++++++++++++++++++++++++++++++++++
> 3 files changed, 65 insertions(+), 12 deletions(-)
> create mode 100644 tools/include/linux/rwsem.h
>
> diff --git a/lib/maple_tree.c b/lib/maple_tree.c
> index 0e00a84e8e8f..bb24d84a4922 100644
> --- a/lib/maple_tree.c
> +++ b/lib/maple_tree.c
> @@ -5627,7 +5627,7 @@ int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
> /* Internal nodes */
> nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
> /* Add working room for split (2 nodes) + new parents */
> - mas_node_count(mas, nr_nodes + 3);
> + mas_node_count_gfp(mas, nr_nodes + 3, GFP_KERNEL);
>
> /* Detect if allocations run out */
> mas->mas_flags |= MA_STATE_PREALLOC;
> diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
> index 06959165e2f9..464eeb90d5ad 100644
> --- a/lib/test_maple_tree.c
> +++ b/lib/test_maple_tree.c
> @@ -9,6 +9,7 @@
>
> #include <linux/maple_tree.h>
> #include <linux/module.h>
> +#include <linux/rwsem.h>
>
> #define MTREE_ALLOC_MAX 0x2000000000000Ul
> #define CONFIG_MAPLE_SEARCH
> @@ -1841,17 +1842,21 @@ static noinline void __init check_forking(struct maple_tree *mt)
> void *val;
> MA_STATE(mas, mt, 0, 0);
> MA_STATE(newmas, mt, 0, 0);
> + struct rw_semaphore newmt_lock;
> +
> + init_rwsem(&newmt_lock);
>
> for (i = 0; i <= nr_entries; i++)
> mtree_store_range(mt, i*10, i*10 + 5,
> xa_mk_value(i), GFP_KERNEL);
>
> mt_set_non_kernel(99999);
> - mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
> + mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
> + mt_set_external_lock(&newmt, &newmt_lock);
> newmas.tree = &newmt;
> mas_reset(&newmas);
> mas_reset(&mas);
> - mas_lock(&newmas);
> + down_write(&newmt_lock);
> mas.index = 0;
> mas.last = 0;
> if (mas_expected_entries(&newmas, nr_entries)) {
> @@ -1866,10 +1871,10 @@ static noinline void __init check_forking(struct maple_tree *mt)
> }
> rcu_read_unlock();
> mas_destroy(&newmas);
> - mas_unlock(&newmas);
> mt_validate(&newmt);
> mt_set_non_kernel(0);
> - mtree_destroy(&newmt);
> + __mt_destroy(&newmt);
> + up_write(&newmt_lock);
> }
>
> static noinline void __init check_iteration(struct maple_tree *mt)
> @@ -1980,6 +1985,10 @@ static noinline void __init bench_forking(struct maple_tree *mt)
> void *val;
> MA_STATE(mas, mt, 0, 0);
> MA_STATE(newmas, mt, 0, 0);
> + struct rw_semaphore newmt_lock;
> +
> + init_rwsem(&newmt_lock);
> + mt_set_external_lock(&newmt, &newmt_lock);
>
> for (i = 0; i <= nr_entries; i++)
> mtree_store_range(mt, i*10, i*10 + 5,
> @@ -1994,7 +2003,7 @@ static noinline void __init bench_forking(struct maple_tree *mt)
> mas.index = 0;
> mas.last = 0;
> rcu_read_lock();
> - mas_lock(&newmas);
> + down_write(&newmt_lock);
> if (mas_expected_entries(&newmas, nr_entries)) {
> printk("OOM!");
> BUG_ON(1);
> @@ -2005,11 +2014,11 @@ static noinline void __init bench_forking(struct maple_tree *mt)
> mas_store(&newmas, val);
> }
> mas_destroy(&newmas);
> - mas_unlock(&newmas);
> rcu_read_unlock();
> mt_validate(&newmt);
> mt_set_non_kernel(0);
> - mtree_destroy(&newmt);
> + __mt_destroy(&newmt);
> + up_write(&newmt_lock);
> }
> }
> #endif
> @@ -2616,6 +2625,10 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt,
> void *tmp;
> MA_STATE(mas, mt, 0, 0);
> MA_STATE(newmas, &newmt, 0, 0);
> + struct rw_semaphore newmt_lock;
> +
> + init_rwsem(&newmt_lock);
> + mt_set_external_lock(&newmt, &newmt_lock);
>
> if (!zero_start)
> i = 1;
> @@ -2625,9 +2638,9 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt,
> mtree_store_range(mt, i*10, (i+1)*10 - gap,
> xa_mk_value(i), GFP_KERNEL);
>
> - mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
> + mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
> mt_set_non_kernel(99999);
> - mas_lock(&newmas);
> + down_write(&newmt_lock);
> ret = mas_expected_entries(&newmas, nr_entries);
> mt_set_non_kernel(0);
> MT_BUG_ON(mt, ret != 0);
> @@ -2640,9 +2653,9 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt,
> }
> rcu_read_unlock();
> mas_destroy(&newmas);
> - mas_unlock(&newmas);
>
> - mtree_destroy(&newmt);
> + __mt_destroy(&newmt);
> + up_write(&newmt_lock);
> }
>
> /* Duplicate many sizes of trees. Mainly to test expected entry values */
> diff --git a/tools/include/linux/rwsem.h b/tools/include/linux/rwsem.h
> new file mode 100644
> index 000000000000..83971b3cbfce
> --- /dev/null
> +++ b/tools/include/linux/rwsem.h
> @@ -0,0 +1,40 @@
> +/* SPDX-License-Identifier: GPL-2.0+ */
> +#ifndef _TOOLS__RWSEM_H
> +#define _TOOLS__RWSEM_H
> +
> +#include <pthread.h>
> +
> +struct rw_semaphore {
> + pthread_rwlock_t lock;
> +};
> +
> +static inline int init_rwsem(struct rw_semaphore *sem)
> +{
> + return pthread_rwlock_init(&sem->lock, NULL);
> +}
> +
> +static inline int exit_rwsem(struct rw_semaphore *sem)
> +{
> + return pthread_rwlock_destroy(&sem->lock);
> +}
> +
> +static inline int down_read(struct rw_semaphore *sem)
> +{
> + return pthread_rwlock_rdlock(&sem->lock);
> +}
> +
> +static inline int up_read(struct rw_semaphore *sem)
> +{
> + return pthread_rwlock_unlock(&sem->lock);
> +}
> +
> +static inline int down_write(struct rw_semaphore *sem)
> +{
> + return pthread_rwlock_wrlock(&sem->lock);
> +}
> +
> +static inline int up_write(struct rw_semaphore *sem)
> +{
> + return pthread_rwlock_unlock(&sem->lock);
> +}
> +#endif /* _TOOLS_RWSEM_H */