Postorder iteration yields all of a node's children prior to yielding the node
itself, and this particular implementation also avoids examining the leaf links
in a node after that node has been yielded.
In what I expect will be its most common usage, postorder iteration allows the
deletion of every node in an rbtree without modifying the rbtree nodes (no
_requirement_ that they be nulled) while avoiding referencing child nodes after
they have been "deleted" (most commonly, freed).
I have only updated zswap to use this functionality at this point, but numerous
bits of code (most notably in the filesystem drivers) use a hand rolled
postorder iteration that NULLs child links as it traverses the tree. Each of
those instances could be replaced with this common implementation.
1 & 2 add rbtree postorder iteration functions.
3 adds testing of the iteration to the rbtree runtime tests
4 allows building the rbtree runtime tests as builtins
5 updates zswap.
--
since v1:
- spacing
- s/it's/its/
- remove now unused var in zswap code.
- Reviewed-by: Seth Jennings <[email protected]>
Cody P Schafer (5):
rbtree: add postorder iteration functions
rbtree: add rbtree_postorder_for_each_entry_safe() helper
rbtree_test: add test for postorder iteration
rbtree: allow tests to run as builtin
mm/zswap: use postorder iteration when destroying rbtree
include/linux/rbtree.h | 22 ++++++++++++++++++++++
lib/Kconfig.debug | 2 +-
lib/rbtree.c | 40 ++++++++++++++++++++++++++++++++++++++++
lib/rbtree_test.c | 12 ++++++++++++
mm/zswap.c | 16 ++--------------
5 files changed, 77 insertions(+), 15 deletions(-)
--
1.8.3.4
Add postorder iteration functions for rbtree. These are useful for
safely freeing an entire rbtree without modifying the tree at all.
Signed-off-by: Cody P Schafer <[email protected]>
Reviewed-by: Seth Jennings <[email protected]>
---
include/linux/rbtree.h | 4 ++++
lib/rbtree.c | 40 ++++++++++++++++++++++++++++++++++++++++
2 files changed, 44 insertions(+)
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index 0022c1b..c467151 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -68,6 +68,10 @@ extern struct rb_node *rb_prev(const struct rb_node *);
extern struct rb_node *rb_first(const struct rb_root *);
extern struct rb_node *rb_last(const struct rb_root *);
+/* Postorder iteration - always visit the parent after its children */
+extern struct rb_node *rb_first_postorder(const struct rb_root *);
+extern struct rb_node *rb_next_postorder(const struct rb_node *);
+
/* Fast replacement of a single node without remove/rebalance/add/rebalance */
extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
struct rb_root *root);
diff --git a/lib/rbtree.c b/lib/rbtree.c
index c0e31fe..65f4eff 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -518,3 +518,43 @@ void rb_replace_node(struct rb_node *victim, struct rb_node *new,
*new = *victim;
}
EXPORT_SYMBOL(rb_replace_node);
+
+static struct rb_node *rb_left_deepest_node(const struct rb_node *node)
+{
+ for (;;) {
+ if (node->rb_left)
+ node = node->rb_left;
+ else if (node->rb_right)
+ node = node->rb_right;
+ else
+ return (struct rb_node *)node;
+ }
+}
+
+struct rb_node *rb_next_postorder(const struct rb_node *node)
+{
+ const struct rb_node *parent;
+ if (!node)
+ return NULL;
+ parent = rb_parent(node);
+
+ /* If we're sitting on node, we've already seen our children */
+ if (parent && node == parent->rb_left && parent->rb_right) {
+ /* If we are the parent's left node, go to the parent's right
+ * node then all the way down to the left */
+ return rb_left_deepest_node(parent->rb_right);
+ } else
+ /* Otherwise we are the parent's right node, and the parent
+ * should be next */
+ return (struct rb_node *)parent;
+}
+EXPORT_SYMBOL(rb_next_postorder);
+
+struct rb_node *rb_first_postorder(const struct rb_root *root)
+{
+ if (!root->rb_node)
+ return NULL;
+
+ return rb_left_deepest_node(root->rb_node);
+}
+EXPORT_SYMBOL(rb_first_postorder);
--
1.8.3.4
Because deletion (of the entire tree) is a relatively common use of the
rbtree_postorder iteration, and because doing it safely means fiddling
with temporary storage, provide a helper to simplify postorder rbtree
iteration.
Signed-off-by: Cody P Schafer <[email protected]>
Reviewed-by: Seth Jennings <[email protected]>
---
include/linux/rbtree.h | 18 ++++++++++++++++++
1 file changed, 18 insertions(+)
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index c467151..aa870a4 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -85,4 +85,22 @@ static inline void rb_link_node(struct rb_node * node, struct rb_node * parent,
*rb_link = node;
}
+/**
+ * rbtree_postorder_for_each_entry_safe - iterate over rb_root in post order of
+ * given type safe against removal of rb_node entry
+ *
+ * @pos: the 'type *' to use as a loop cursor.
+ * @n: another 'type *' to use as temporary storage
+ * @root: 'rb_root *' of the rbtree.
+ * @field: the name of the rb_node field within 'type'.
+ */
+#define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \
+ for (pos = rb_entry(rb_first_postorder(root), typeof(*pos), field),\
+ n = rb_entry(rb_next_postorder(&pos->field), \
+ typeof(*pos), field); \
+ &pos->field; \
+ pos = n, \
+ n = rb_entry(rb_next_postorder(&pos->field), \
+ typeof(*pos), field))
+
#endif /* _LINUX_RBTREE_H */
--
1.8.3.4
Just check that we examine all nodes in the tree for the postorder iteration.
Signed-off-by: Cody P Schafer <[email protected]>
Reviewed-by: Seth Jennings <[email protected]>
---
lib/rbtree_test.c | 12 ++++++++++++
1 file changed, 12 insertions(+)
diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c
index 122f02f..31dd4cc 100644
--- a/lib/rbtree_test.c
+++ b/lib/rbtree_test.c
@@ -114,6 +114,16 @@ static int black_path_count(struct rb_node *rb)
return count;
}
+static void check_postorder(int nr_nodes)
+{
+ struct rb_node *rb;
+ int count = 0;
+ for (rb = rb_first_postorder(&root); rb; rb = rb_next_postorder(rb))
+ count++;
+
+ WARN_ON_ONCE(count != nr_nodes);
+}
+
static void check(int nr_nodes)
{
struct rb_node *rb;
@@ -136,6 +146,8 @@ static void check(int nr_nodes)
WARN_ON_ONCE(count != nr_nodes);
WARN_ON_ONCE(count < (1 << black_path_count(rb_last(&root))) - 1);
+
+ check_postorder(nr_nodes);
}
static void check_augmented(int nr_nodes)
--
1.8.3.4
Signed-off-by: Cody P Schafer <[email protected]>
Reviewed-by: Seth Jennings <[email protected]>
---
mm/zswap.c | 16 ++--------------
1 file changed, 2 insertions(+), 14 deletions(-)
diff --git a/mm/zswap.c b/mm/zswap.c
index deda2b6..5c853b2 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -790,26 +790,14 @@ static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset)
static void zswap_frontswap_invalidate_area(unsigned type)
{
struct zswap_tree *tree = zswap_trees[type];
- struct rb_node *node;
- struct zswap_entry *entry;
+ struct zswap_entry *entry, *n;
if (!tree)
return;
/* walk the tree and free everything */
spin_lock(&tree->lock);
- /*
- * TODO: Even though this code should not be executed because
- * the try_to_unuse() in swapoff should have emptied the tree,
- * it is very wasteful to rebalance the tree after every
- * removal when we are freeing the whole tree.
- *
- * If post-order traversal code is ever added to the rbtree
- * implementation, it should be used here.
- */
- while ((node = rb_first(&tree->rbroot))) {
- entry = rb_entry(node, struct zswap_entry, rbnode);
- rb_erase(&entry->rbnode, &tree->rbroot);
+ rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode) {
zbud_free(tree->pool, entry->handle);
zswap_entry_cache_free(entry);
atomic_dec(&zswap_stored_pages);
--
1.8.3.4
No reason require rbtree test code to be a module, allow it to be
builtin (streamlines my development process)
Signed-off-by: Cody P Schafer <[email protected]>
Reviewed-by: Seth Jennings <[email protected]>
---
lib/Kconfig.debug | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 1501aa5..606e3c8 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1442,7 +1442,7 @@ config BACKTRACE_SELF_TEST
config RBTREE_TEST
tristate "Red-Black tree test"
- depends on m && DEBUG_KERNEL
+ depends on DEBUG_KERNEL
help
A benchmark measuring the performance of the rbtree library.
Also includes rbtree invariant checks.
--
1.8.3.4