Separate call to mas_destroy() from mas_nomem() so we can check for no
memory errors without destroying the current maple state in
mas_store_gfp(). We then add calls to mas_destroy() to callers of
mas_nomem().
Signed-off-by: Sidhartha Kumar <[email protected]>
---
lib/maple_tree.c | 39 ++++++++++++++++++++------------
tools/testing/radix-tree/maple.c | 11 +++++----
2 files changed, 31 insertions(+), 19 deletions(-)
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 3780d4bb0415..f1496817e52a 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -4526,6 +4526,7 @@ int mas_alloc_cyclic(struct ma_state *mas, unsigned long *startp,
if (*next == 0)
mas->tree->ma_flags |= MT_FLAGS_ALLOC_WRAPPED;
+ mas_destroy(mas);
return ret;
}
EXPORT_SYMBOL(mas_alloc_cyclic);
@@ -5606,18 +5607,22 @@ EXPORT_SYMBOL_GPL(mas_store);
int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp)
{
MA_WR_STATE(wr_mas, mas, entry);
+ int ret;
- mas_wr_store_setup(&wr_mas);
- trace_ma_write(__func__, mas, 0, entry);
retry:
- mas_wr_store_entry(&wr_mas);
+ mas_wr_preallocate(&wr_mas, entry, gfp);
+ WARN_ON_ONCE(mas->store_type == wr_invalid);
+
if (unlikely(mas_nomem(mas, gfp)))
goto retry;
+ if (mas_is_err(mas))
+ goto out;
- if (unlikely(mas_is_err(mas)))
- return xa_err(mas->node);
-
- return 0;
+ mas_wr_store_entry(&wr_mas);
+out:
+ ret = xa_err(mas->node);
+ mas_destroy(mas);
+ return ret;
}
EXPORT_SYMBOL_GPL(mas_store_gfp);
@@ -6365,6 +6370,7 @@ void *mas_erase(struct ma_state *mas)
if (mas_nomem(mas, GFP_KERNEL))
goto write_retry;
+ mas_destroy(mas);
return entry;
}
EXPORT_SYMBOL_GPL(mas_erase);
@@ -6379,10 +6385,8 @@ EXPORT_SYMBOL_GPL(mas_erase);
bool mas_nomem(struct ma_state *mas, gfp_t gfp)
__must_hold(mas->tree->ma_lock)
{
- if (likely(mas->node != MA_ERROR(-ENOMEM))) {
- mas_destroy(mas);
+ if (likely(mas->node != MA_ERROR(-ENOMEM)))
return false;
- }
if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) {
mtree_unlock(mas->tree);
@@ -6460,6 +6464,7 @@ int mtree_store_range(struct maple_tree *mt, unsigned long index,
{
MA_STATE(mas, mt, index, last);
MA_WR_STATE(wr_mas, &mas, entry);
+ int ret = 0;
trace_ma_write(__func__, &mas, 0, entry);
if (WARN_ON_ONCE(xa_is_advanced(entry)))
@@ -6475,10 +6480,12 @@ int mtree_store_range(struct maple_tree *mt, unsigned long index,
goto retry;
mtree_unlock(mt);
+
if (mas_is_err(&mas))
- return xa_err(mas.node);
+ ret = xa_err(mas.node);
- return 0;
+ mas_destroy(&mas);
+ return ret;
}
EXPORT_SYMBOL(mtree_store_range);
@@ -6514,6 +6521,7 @@ int mtree_insert_range(struct maple_tree *mt, unsigned long first,
unsigned long last, void *entry, gfp_t gfp)
{
MA_STATE(ms, mt, first, last);
+ int ret = 0;
if (WARN_ON_ONCE(xa_is_advanced(entry)))
return -EINVAL;
@@ -6529,9 +6537,10 @@ int mtree_insert_range(struct maple_tree *mt, unsigned long first,
mtree_unlock(mt);
if (mas_is_err(&ms))
- return xa_err(ms.node);
+ ret = xa_err(ms.node);
- return 0;
+ mas_destroy(&ms);
+ return ret;
}
EXPORT_SYMBOL(mtree_insert_range);
@@ -6586,6 +6595,7 @@ int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
unlock:
mtree_unlock(mt);
+ mas_destroy(&mas);
return ret;
}
EXPORT_SYMBOL(mtree_alloc_range);
@@ -6667,6 +6677,7 @@ int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
unlock:
mtree_unlock(mt);
+ mas_destroy(&mas);
return ret;
}
EXPORT_SYMBOL(mtree_alloc_rrange);
diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c
index c57979de1576..c834e91e6810 100644
--- a/tools/testing/radix-tree/maple.c
+++ b/tools/testing/radix-tree/maple.c
@@ -119,7 +119,7 @@ static noinline void __init check_new_node(struct maple_tree *mt)
MT_BUG_ON(mt, mas.alloc->slot[0] == NULL);
mas_push_node(&mas, mn);
mas_reset(&mas);
- mas_nomem(&mas, GFP_KERNEL); /* free */
+ mas_destroy(&mas);
mtree_unlock(mt);
@@ -143,7 +143,7 @@ static noinline void __init check_new_node(struct maple_tree *mt)
mn->parent = ma_parent_ptr(mn);
ma_free_rcu(mn);
mas.status = ma_start;
- mas_nomem(&mas, GFP_KERNEL);
+ mas_destroy(&mas);
/* Allocate 3 nodes, will fail. */
mas_node_count(&mas, 3);
/* Drop the lock and allocate 3 nodes. */
@@ -160,7 +160,7 @@ static noinline void __init check_new_node(struct maple_tree *mt)
MT_BUG_ON(mt, mas_allocated(&mas) != 3);
/* Free. */
mas_reset(&mas);
- mas_nomem(&mas, GFP_KERNEL);
+ mas_destroy(&mas);
/* Set allocation request to 1. */
mas_set_alloc_req(&mas, 1);
@@ -275,7 +275,7 @@ static noinline void __init check_new_node(struct maple_tree *mt)
MT_BUG_ON(mt, mas_allocated(&mas) != i - j - 1);
}
mas_reset(&mas);
- MT_BUG_ON(mt, mas_nomem(&mas, GFP_KERNEL));
+ mas_destroy(&mas);
}
@@ -298,7 +298,7 @@ static noinline void __init check_new_node(struct maple_tree *mt)
}
MT_BUG_ON(mt, mas_allocated(&mas) != total);
mas_reset(&mas);
- mas_nomem(&mas, GFP_KERNEL); /* Free. */
+ mas_destroy(&mas); /* Free. */
MT_BUG_ON(mt, mas_allocated(&mas) != 0);
for (i = 1; i < 128; i++) {
@@ -35846,6 +35846,7 @@ static noinline void __init check_nomem(struct maple_tree *mt)
mas_store(&ms, &ms); /* insert 1 -> &ms */
mas_nomem(&ms, GFP_KERNEL); /* Node allocated in here. */
mtree_unlock(mt);
+ mas_destroy(&ms);
mtree_destroy(mt);
}
--
2.45.1
* Sidhartha Kumar <[email protected]> [240604 13:42]:
> Separate call to mas_destroy() from mas_nomem() so we can check for no
> memory errors without destroying the current maple state in
> mas_store_gfp(). We then add calls to mas_destroy() to callers of
> mas_nomem().
>
> Signed-off-by: Sidhartha Kumar <[email protected]>
> ---
> lib/maple_tree.c | 39 ++++++++++++++++++++------------
> tools/testing/radix-tree/maple.c | 11 +++++----
> 2 files changed, 31 insertions(+), 19 deletions(-)
>
> diff --git a/lib/maple_tree.c b/lib/maple_tree.c
> index 3780d4bb0415..f1496817e52a 100644
> --- a/lib/maple_tree.c
> +++ b/lib/maple_tree.c
> @@ -4526,6 +4526,7 @@ int mas_alloc_cyclic(struct ma_state *mas, unsigned long *startp,
> if (*next == 0)
> mas->tree->ma_flags |= MT_FLAGS_ALLOC_WRAPPED;
>
> + mas_destroy(mas);
> return ret;
> }
> EXPORT_SYMBOL(mas_alloc_cyclic);
> @@ -5606,18 +5607,22 @@ EXPORT_SYMBOL_GPL(mas_store);
> int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp)
> {
> MA_WR_STATE(wr_mas, mas, entry);
> + int ret;
>
> - mas_wr_store_setup(&wr_mas);
> - trace_ma_write(__func__, mas, 0, entry);
> retry:
> - mas_wr_store_entry(&wr_mas);
> + mas_wr_preallocate(&wr_mas, entry, gfp);
> + WARN_ON_ONCE(mas->store_type == wr_invalid);
> +
> if (unlikely(mas_nomem(mas, gfp)))
> goto retry;
Nit: missing new line
> + if (mas_is_err(mas))
> + goto out;
>
> - if (unlikely(mas_is_err(mas)))
> - return xa_err(mas->node);
> -
> - return 0;
> + mas_wr_store_entry(&wr_mas);
> +out:
> + ret = xa_err(mas->node);
Looking at what xa_err() does, it would probably be wise to only assign
the ret to xa_err() on mas_is_err(), which you do elsewhere so I'm not
sure why this is special.
> + mas_destroy(mas);
> + return ret;
> }
> EXPORT_SYMBOL_GPL(mas_store_gfp);
>
> @@ -6365,6 +6370,7 @@ void *mas_erase(struct ma_state *mas)
> if (mas_nomem(mas, GFP_KERNEL))
> goto write_retry;
>
> + mas_destroy(mas);
> return entry;
> }
> EXPORT_SYMBOL_GPL(mas_erase);
> @@ -6379,10 +6385,8 @@ EXPORT_SYMBOL_GPL(mas_erase);
> bool mas_nomem(struct ma_state *mas, gfp_t gfp)
> __must_hold(mas->tree->ma_lock)
> {
> - if (likely(mas->node != MA_ERROR(-ENOMEM))) {
> - mas_destroy(mas);
> + if (likely(mas->node != MA_ERROR(-ENOMEM)))
> return false;
> - }
>
> if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) {
> mtree_unlock(mas->tree);
> @@ -6460,6 +6464,7 @@ int mtree_store_range(struct maple_tree *mt, unsigned long index,
> {
> MA_STATE(mas, mt, index, last);
> MA_WR_STATE(wr_mas, &mas, entry);
> + int ret = 0;
>
> trace_ma_write(__func__, &mas, 0, entry);
> if (WARN_ON_ONCE(xa_is_advanced(entry)))
> @@ -6475,10 +6480,12 @@ int mtree_store_range(struct maple_tree *mt, unsigned long index,
> goto retry;
>
> mtree_unlock(mt);
> +
> if (mas_is_err(&mas))
> - return xa_err(mas.node);
> + ret = xa_err(mas.node);
>
> - return 0;
> + mas_destroy(&mas);
> + return ret;
> }
> EXPORT_SYMBOL(mtree_store_range);
>
> @@ -6514,6 +6521,7 @@ int mtree_insert_range(struct maple_tree *mt, unsigned long first,
> unsigned long last, void *entry, gfp_t gfp)
> {
> MA_STATE(ms, mt, first, last);
> + int ret = 0;
>
> if (WARN_ON_ONCE(xa_is_advanced(entry)))
> return -EINVAL;
> @@ -6529,9 +6537,10 @@ int mtree_insert_range(struct maple_tree *mt, unsigned long first,
>
> mtree_unlock(mt);
> if (mas_is_err(&ms))
> - return xa_err(ms.node);
> + ret = xa_err(ms.node);
>
> - return 0;
> + mas_destroy(&ms);
> + return ret;
> }
> EXPORT_SYMBOL(mtree_insert_range);
>
> @@ -6586,6 +6595,7 @@ int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
>
> unlock:
> mtree_unlock(mt);
> + mas_destroy(&mas);
> return ret;
> }
> EXPORT_SYMBOL(mtree_alloc_range);
> @@ -6667,6 +6677,7 @@ int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
>
> unlock:
> mtree_unlock(mt);
> + mas_destroy(&mas);
> return ret;
> }
> EXPORT_SYMBOL(mtree_alloc_rrange);
> diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c
> index c57979de1576..c834e91e6810 100644
> --- a/tools/testing/radix-tree/maple.c
> +++ b/tools/testing/radix-tree/maple.c
> @@ -119,7 +119,7 @@ static noinline void __init check_new_node(struct maple_tree *mt)
> MT_BUG_ON(mt, mas.alloc->slot[0] == NULL);
> mas_push_node(&mas, mn);
> mas_reset(&mas);
> - mas_nomem(&mas, GFP_KERNEL); /* free */
> + mas_destroy(&mas);
> mtree_unlock(mt);
>
>
> @@ -143,7 +143,7 @@ static noinline void __init check_new_node(struct maple_tree *mt)
> mn->parent = ma_parent_ptr(mn);
> ma_free_rcu(mn);
> mas.status = ma_start;
> - mas_nomem(&mas, GFP_KERNEL);
> + mas_destroy(&mas);
> /* Allocate 3 nodes, will fail. */
> mas_node_count(&mas, 3);
> /* Drop the lock and allocate 3 nodes. */
> @@ -160,7 +160,7 @@ static noinline void __init check_new_node(struct maple_tree *mt)
> MT_BUG_ON(mt, mas_allocated(&mas) != 3);
> /* Free. */
> mas_reset(&mas);
> - mas_nomem(&mas, GFP_KERNEL);
> + mas_destroy(&mas);
>
> /* Set allocation request to 1. */
> mas_set_alloc_req(&mas, 1);
> @@ -275,7 +275,7 @@ static noinline void __init check_new_node(struct maple_tree *mt)
> MT_BUG_ON(mt, mas_allocated(&mas) != i - j - 1);
> }
> mas_reset(&mas);
> - MT_BUG_ON(mt, mas_nomem(&mas, GFP_KERNEL));
> + mas_destroy(&mas);
This was checking something with an MT_BUG_ON() that was dropped. Can
we check it another way?
>
> }
>
> @@ -298,7 +298,7 @@ static noinline void __init check_new_node(struct maple_tree *mt)
> }
> MT_BUG_ON(mt, mas_allocated(&mas) != total);
> mas_reset(&mas);
> - mas_nomem(&mas, GFP_KERNEL); /* Free. */
> + mas_destroy(&mas); /* Free. */
>
> MT_BUG_ON(mt, mas_allocated(&mas) != 0);
> for (i = 1; i < 128; i++) {
> @@ -35846,6 +35846,7 @@ static noinline void __init check_nomem(struct maple_tree *mt)
> mas_store(&ms, &ms); /* insert 1 -> &ms */
> mas_nomem(&ms, GFP_KERNEL); /* Node allocated in here. */
> mtree_unlock(mt);
> + mas_destroy(&ms);
> mtree_destroy(mt);
> }
>
> --
> 2.45.1
>