Hi,
This series of patches is to reduce the size of struct vmap_area.
Since the members of struct vmap_area are not being used at the same time,
it is possible to reduce its size by placing several members that are not
used at the same time in a union.
The first 4 patches did some preparatory work for this and improved
readability.
The fifth patch is the main patch, it did the work of rewriting vmap_area.
More details can be obtained from the commit message.
Thanks,
Pengfei
Pengfei Li (5):
mm/vmalloc.c: Introduce a wrapper function of insert_vmap_area()
mm/vmalloc.c: Introduce a wrapper function of
insert_vmap_area_augment()
mm/vmalloc.c: Rename function __find_vmap_area() for readability
mm/vmalloc.c: Modify function merge_or_add_vmap_area() for readability
mm/vmalloc.c: Rewrite struct vmap_area to reduce its size
include/linux/vmalloc.h | 28 +++++---
mm/vmalloc.c | 144 +++++++++++++++++++++++++++-------------
2 files changed, 117 insertions(+), 55 deletions(-)
--
2.21.0
Rename function __find_vmap_area to __search_va_from_busy_tree to
indicate that it is searching in the *BUSY* tree.
Signed-off-by: Pengfei Li <[email protected]>
---
mm/vmalloc.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index a5065fcb74d3..1beb5bcfb450 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -399,7 +399,7 @@ static void purge_vmap_area_lazy(void);
static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
static unsigned long lazy_max_pages(void);
-static struct vmap_area *__find_vmap_area(unsigned long addr)
+static struct vmap_area *__search_va_from_busy_tree(unsigned long addr)
{
struct rb_node *n = vmap_area_root.rb_node;
@@ -1313,7 +1313,7 @@ static struct vmap_area *find_vmap_area(unsigned long addr)
struct vmap_area *va;
spin_lock(&vmap_area_lock);
- va = __find_vmap_area(addr);
+ va = __search_va_from_busy_tree(addr);
spin_unlock(&vmap_area_lock);
return va;
--
2.21.0
Since function merge_or_add_vmap_area() is only used to
merge or add vmap area to the *FREE* tree, so rename it
to merge_or_add_va_to_free_tree.
Then this is obvious, merge_or_add_vmap_area() does not
need parameters root and head, so remove them.
Signed-off-by: Pengfei Li <[email protected]>
---
mm/vmalloc.c | 21 +++++++++------------
1 file changed, 9 insertions(+), 12 deletions(-)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 1beb5bcfb450..4148d6fdfb6d 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -688,8 +688,7 @@ insert_va_to_free_tree(struct vmap_area *va, struct rb_node *from)
* freed.
*/
static __always_inline void
-merge_or_add_vmap_area(struct vmap_area *va,
- struct rb_root *root, struct list_head *head)
+merge_or_add_va_to_free_tree(struct vmap_area *va)
{
struct vmap_area *sibling;
struct list_head *next;
@@ -701,7 +700,7 @@ merge_or_add_vmap_area(struct vmap_area *va,
* Find a place in the tree where VA potentially will be
* inserted, unless it is merged with its sibling/siblings.
*/
- link = find_va_links(va, root, NULL, &parent);
+ link = find_va_links(va, &free_vmap_area_root, NULL, &parent);
/*
* Get next node of VA to check if merging can be done.
@@ -717,7 +716,7 @@ merge_or_add_vmap_area(struct vmap_area *va,
* | |
* start end
*/
- if (next != head) {
+ if (next != &free_vmap_area_list) {
sibling = list_entry(next, struct vmap_area, list);
if (sibling->va_start == va->va_end) {
sibling->va_start = va->va_start;
@@ -725,9 +724,6 @@ merge_or_add_vmap_area(struct vmap_area *va,
/* Check and update the tree if needed. */
augment_tree_propagate_from(sibling);
- /* Remove this VA, it has been merged. */
- unlink_va(va, root);
-
/* Free vmap_area object. */
kmem_cache_free(vmap_area_cachep, va);
@@ -744,7 +740,7 @@ merge_or_add_vmap_area(struct vmap_area *va,
* | |
* start end
*/
- if (next->prev != head) {
+ if (next->prev != &free_vmap_area_list) {
sibling = list_entry(next->prev, struct vmap_area, list);
if (sibling->va_end == va->va_start) {
sibling->va_end = va->va_end;
@@ -753,7 +749,8 @@ merge_or_add_vmap_area(struct vmap_area *va,
augment_tree_propagate_from(sibling);
/* Remove this VA, it has been merged. */
- unlink_va(va, root);
+ if (merged)
+ unlink_va(va, &free_vmap_area_root);
/* Free vmap_area object. */
kmem_cache_free(vmap_area_cachep, va);
@@ -764,7 +761,8 @@ merge_or_add_vmap_area(struct vmap_area *va,
insert:
if (!merged) {
- link_va(va, root, parent, link, head);
+ link_va(va, &free_vmap_area_root, parent, link,
+ &free_vmap_area_list);
augment_tree_propagate_from(va);
}
}
@@ -1141,8 +1139,7 @@ static void __free_vmap_area(struct vmap_area *va)
/*
* Merge VA with its neighbors, otherwise just add it.
*/
- merge_or_add_vmap_area(va,
- &free_vmap_area_root, &free_vmap_area_list);
+ merge_or_add_va_to_free_tree(va);
}
/*
--
2.21.0
Members of struct vmap_area are not being used at the same time.
For example,
(1)members @flags, @purge_list, and @vm are unused when vmap area is
in the *FREE* tree;
(2)members @subtree_max_size and @purge_list are unused when vmap area is
in the *BUSY* tree and not in the purge list;
(3)members @subtree_max_size and @vm are unused when vmap area is
in the *BUSY* tree and in the purge list.
Since members @subtree_max_size, @purge_list and @vm are not used
at the same time, so they can be placed in a union to reduce the
size of struct vmap_area.
Besides, rename @flags to @_vm_valid to indicate if @vm is valid.
The reason why @_vm_valid can be placed in a union with @va_start
is that if @vm is valid, then @va_start can be known by @vm.
Signed-off-by: Pengfei Li <[email protected]>
---
include/linux/vmalloc.h | 28 ++++++++++----
mm/vmalloc.c | 85 +++++++++++++++++++++++++++++++----------
2 files changed, 85 insertions(+), 28 deletions(-)
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 51e131245379..7b99de5ccbec 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -48,18 +48,30 @@ struct vm_struct {
};
struct vmap_area {
- unsigned long va_start;
+ union {
+ unsigned long va_start;
+ /*
+ * Determine whether vm is valid according to
+ * the value of _vm_valid
+ */
+ unsigned long _vm_valid;
+ };
+
unsigned long va_end;
- /*
- * Largest available free size in subtree.
- */
- unsigned long subtree_max_size;
- unsigned long flags;
+ union {
+ /* Only used when vmap area in *FREE* vmap_area tree */
+ unsigned long subtree_max_size;
+
+ /* Only used when vmap area in vmap_purge_list */
+ struct llist_node purge_list;
+
+ /* Only used when va_vm_is_valid() return true */
+ struct vm_struct *vm;
+ };
+
struct rb_node rb_node; /* address sorted rbtree */
struct list_head list; /* address sorted list */
- struct llist_node purge_list; /* "lazy purge" list */
- struct vm_struct *vm;
};
/*
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 4148d6fdfb6d..89b93ee0ec04 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -329,8 +329,28 @@ EXPORT_SYMBOL(vmalloc_to_pfn);
#define DEBUG_AUGMENT_PROPAGATE_CHECK 0
#define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
-#define VM_LAZY_FREE 0x02
-#define VM_VM_AREA 0x04
+#define VA_VM_VALID 0x01UL
+
+static __always_inline void
+__va_set_vm(struct vmap_area *va, struct vm_struct *vm)
+{
+ /* Overwrite va->va_start by va->_vm_valid */
+ va->_vm_valid = VA_VM_VALID;
+ va->vm = vm;
+}
+
+static __always_inline void
+__va_unset_vm(struct vmap_area *va)
+{
+ /* Restore va->va_start and overwrite va->_vm_valid */
+ va->va_start = (unsigned long)va->vm->addr;
+}
+
+static __always_inline bool
+va_vm_is_valid(struct vmap_area *va)
+{
+ return (va->_vm_valid == VA_VM_VALID);
+}
static DEFINE_SPINLOCK(vmap_area_lock);
/* Export for kexec only */
@@ -399,15 +419,26 @@ static void purge_vmap_area_lazy(void);
static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
static unsigned long lazy_max_pages(void);
+/*
+ * Search the *BUSY* vmap_area tree. If va_vm_is_valid() return true,
+ * then va->va_start has been overwritten by va->_vm_valid,
+ * otherwise va->va_start remains the original value.
+ */
static struct vmap_area *__search_va_from_busy_tree(unsigned long addr)
{
struct rb_node *n = vmap_area_root.rb_node;
while (n) {
struct vmap_area *va;
+ unsigned long start;
va = rb_entry(n, struct vmap_area, rb_node);
- if (addr < va->va_start)
+ if (va_vm_is_valid(va))
+ start = (unsigned long)va->vm->addr;
+ else
+ start = va->va_start;
+
+ if (addr < start)
n = n->rb_left;
else if (addr >= va->va_end)
n = n->rb_right;
@@ -429,8 +460,13 @@ find_va_links(struct vmap_area *va,
{
struct vmap_area *tmp_va;
struct rb_node **link;
+ unsigned long start;
+ bool is_busy_va_tree = false;
if (root) {
+ if (root == &vmap_area_root)
+ is_busy_va_tree = true;
+
link = &root->rb_node;
if (unlikely(!*link)) {
*parent = NULL;
@@ -447,6 +483,10 @@ find_va_links(struct vmap_area *va,
*/
do {
tmp_va = rb_entry(*link, struct vmap_area, rb_node);
+ if (is_busy_va_tree && va_vm_is_valid(tmp_va))
+ start = (unsigned long)tmp_va->vm->addr;
+ else
+ start = tmp_va->va_start;
/*
* During the traversal we also do some sanity check.
@@ -454,9 +494,9 @@ find_va_links(struct vmap_area *va,
* or full overlaps.
*/
if (va->va_start < tmp_va->va_end &&
- va->va_end <= tmp_va->va_start)
+ va->va_end <= start)
link = &(*link)->rb_left;
- else if (va->va_end > tmp_va->va_start &&
+ else if (va->va_end > start &&
va->va_start >= tmp_va->va_end)
link = &(*link)->rb_right;
else
@@ -1079,8 +1119,8 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
va->va_start = addr;
va->va_end = addr + size;
- va->flags = 0;
insert_va_to_busy_tree(va);
+ va->vm = NULL;
spin_unlock(&vmap_area_lock);
@@ -1872,11 +1912,11 @@ void __init vmalloc_init(void)
if (WARN_ON_ONCE(!va))
continue;
- va->flags = VM_VM_AREA;
va->va_start = (unsigned long)tmp->addr;
va->va_end = va->va_start + tmp->size;
- va->vm = tmp;
insert_va_to_busy_tree(va);
+
+ __va_set_vm(va, tmp);
}
/*
@@ -1969,8 +2009,9 @@ static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
vm->addr = (void *)va->va_start;
vm->size = va->va_end - va->va_start;
vm->caller = caller;
- va->vm = vm;
- va->flags |= VM_VM_AREA;
+
+ __va_set_vm(va, vm);
+
spin_unlock(&vmap_area_lock);
}
@@ -2075,7 +2116,7 @@ struct vm_struct *find_vm_area(const void *addr)
struct vmap_area *va;
va = find_vmap_area((unsigned long)addr);
- if (va && va->flags & VM_VM_AREA)
+ if (va && va_vm_is_valid(va))
return va->vm;
return NULL;
@@ -2098,13 +2139,15 @@ struct vm_struct *remove_vm_area(const void *addr)
might_sleep();
va = find_vmap_area((unsigned long)addr);
- if (va && va->flags & VM_VM_AREA) {
+ if (va && va_vm_is_valid(va)) {
struct vm_struct *vm = va->vm;
spin_lock(&vmap_area_lock);
- va->vm = NULL;
- va->flags &= ~VM_VM_AREA;
- va->flags |= VM_LAZY_FREE;
+ /*
+ * Call __va_unset_vm() to restore the value of va->va_start
+ * before calling free_unmap_vmap_area() to add it to purge list
+ */
+ __va_unset_vm(va);
spin_unlock(&vmap_area_lock);
kasan_free_shadow(vm);
@@ -2813,7 +2856,7 @@ long vread(char *buf, char *addr, unsigned long count)
if (!count)
break;
- if (!(va->flags & VM_VM_AREA))
+ if (!va_vm_is_valid(va))
continue;
vm = va->vm;
@@ -2893,7 +2936,7 @@ long vwrite(char *buf, char *addr, unsigned long count)
if (!count)
break;
- if (!(va->flags & VM_VM_AREA))
+ if (!va_vm_is_valid(va))
continue;
vm = va->vm;
@@ -3407,14 +3450,16 @@ static int s_show(struct seq_file *m, void *p)
va = list_entry(p, struct vmap_area, list);
/*
- * s_show can encounter race with remove_vm_area, !VM_VM_AREA on
+ * s_show can encounter race with remove_vm_area, !va_vm_is_valid() on
* behalf of vmap area is being tear down or vm_map_ram allocation.
+ * And if va->vm != NULL then vmap area is being tear down,
+ * otherwise vmap area is allocated by vm_map_ram().
*/
- if (!(va->flags & VM_VM_AREA)) {
+ if (!va_vm_is_valid(va)) {
seq_printf(m, "0x%pK-0x%pK %7ld %s\n",
(void *)va->va_start, (void *)va->va_end,
va->va_end - va->va_start,
- va->flags & VM_LAZY_FREE ? "unpurged vm_area" : "vm_map_ram");
+ va->vm ? "unpurged vm_area" : "vm_map_ram");
return 0;
}
--
2.21.0
The red-black tree whose root is free_vmap_area_root is called the
*FREE* tree. Like the previous commit, add wrapper functions
insert_va_to_free_tree and rename insert_vmap_area_augment to
__insert_vmap_area_augment.
Signed-off-by: Pengfei Li <[email protected]>
---
mm/vmalloc.c | 20 +++++++++++---------
1 file changed, 11 insertions(+), 9 deletions(-)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 0a46be76c63b..a5065fcb74d3 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -658,7 +658,7 @@ insert_va_to_busy_tree(struct vmap_area *va)
}
static void
-insert_vmap_area_augment(struct vmap_area *va,
+__insert_vmap_area_augment(struct vmap_area *va,
struct rb_node *from, struct rb_root *root,
struct list_head *head)
{
@@ -674,6 +674,13 @@ insert_vmap_area_augment(struct vmap_area *va,
augment_tree_propagate_from(va);
}
+static __always_inline void
+insert_va_to_free_tree(struct vmap_area *va, struct rb_node *from)
+{
+ __insert_vmap_area_augment(va, from, &free_vmap_area_root,
+ &free_vmap_area_list);
+}
+
/*
* Merge de-allocated chunk of VA memory with previous
* and next free blocks. If coalesce is not done a new
@@ -979,8 +986,7 @@ adjust_va_to_fit_type(struct vmap_area *va,
augment_tree_propagate_from(va);
if (lva) /* type == NE_FIT_TYPE */
- insert_vmap_area_augment(lva, &va->rb_node,
- &free_vmap_area_root, &free_vmap_area_list);
+ insert_va_to_free_tree(lva, &va->rb_node);
}
return 0;
@@ -1822,9 +1828,7 @@ static void vmap_init_free_space(void)
free->va_start = vmap_start;
free->va_end = busy->va_start;
- insert_vmap_area_augment(free, NULL,
- &free_vmap_area_root,
- &free_vmap_area_list);
+ insert_va_to_free_tree(free, NULL);
}
}
@@ -1837,9 +1841,7 @@ static void vmap_init_free_space(void)
free->va_start = vmap_start;
free->va_end = vmap_end;
- insert_vmap_area_augment(free, NULL,
- &free_vmap_area_root,
- &free_vmap_area_list);
+ insert_va_to_free_tree(free, NULL);
}
}
}
--
2.21.0
The red-black tree whose root is vmap_area_root is called the
*BUSY* tree. Since function insert_vmap_area() is only used to
add vmap area to the *BUSY* tree, so add wrapper functions
insert_va_to_busy_tree for readability.
Besides, rename insert_vmap_area to __insert_vmap_area to indicate
that it should not be called directly.
Signed-off-by: Pengfei Li <[email protected]>
---
mm/vmalloc.c | 14 ++++++++++----
1 file changed, 10 insertions(+), 4 deletions(-)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 0f76cca32a1c..0a46be76c63b 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -641,7 +641,7 @@ augment_tree_propagate_from(struct vmap_area *va)
}
static void
-insert_vmap_area(struct vmap_area *va,
+__insert_vmap_area(struct vmap_area *va,
struct rb_root *root, struct list_head *head)
{
struct rb_node **link;
@@ -651,6 +651,12 @@ insert_vmap_area(struct vmap_area *va,
link_va(va, root, parent, link, head);
}
+static __always_inline void
+insert_va_to_busy_tree(struct vmap_area *va)
+{
+ __insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
+}
+
static void
insert_vmap_area_augment(struct vmap_area *va,
struct rb_node *from, struct rb_root *root,
@@ -1070,7 +1076,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
va->va_start = addr;
va->va_end = addr + size;
va->flags = 0;
- insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
+ insert_va_to_busy_tree(va);
spin_unlock(&vmap_area_lock);
@@ -1871,7 +1877,7 @@ void __init vmalloc_init(void)
va->va_start = (unsigned long)tmp->addr;
va->va_end = va->va_start + tmp->size;
va->vm = tmp;
- insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
+ insert_va_to_busy_tree(va);
}
/*
@@ -3281,7 +3287,7 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
va->va_start = start;
va->va_end = start + size;
- insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
+ insert_va_to_busy_tree(va);
}
spin_unlock(&vmap_area_lock);
--
2.21.0
On Sun 30-06-19 15:56:45, Pengfei Li wrote:
> Hi,
>
> This series of patches is to reduce the size of struct vmap_area.
>
> Since the members of struct vmap_area are not being used at the same time,
> it is possible to reduce its size by placing several members that are not
> used at the same time in a union.
>
> The first 4 patches did some preparatory work for this and improved
> readability.
>
> The fifth patch is the main patch, it did the work of rewriting vmap_area.
>
> More details can be obtained from the commit message.
None of the commit messages talk about the motivation. Why do we want to
add quite some code to achieve this? How much do we save? This all
should be a part of the cover letter.
> Thanks,
>
> Pengfei
>
> Pengfei Li (5):
> mm/vmalloc.c: Introduce a wrapper function of insert_vmap_area()
> mm/vmalloc.c: Introduce a wrapper function of
> insert_vmap_area_augment()
> mm/vmalloc.c: Rename function __find_vmap_area() for readability
> mm/vmalloc.c: Modify function merge_or_add_vmap_area() for readability
> mm/vmalloc.c: Rewrite struct vmap_area to reduce its size
>
> include/linux/vmalloc.h | 28 +++++---
> mm/vmalloc.c | 144 +++++++++++++++++++++++++++-------------
> 2 files changed, 117 insertions(+), 55 deletions(-)
>
> --
> 2.21.0
--
Michal Hocko
SUSE Labs
On Mon, Jul 01, 2019 at 11:20:37AM +0200, Michal Hocko wrote:
> On Sun 30-06-19 15:56:45, Pengfei Li wrote:
> > Hi,
> >
> > This series of patches is to reduce the size of struct vmap_area.
> >
> > Since the members of struct vmap_area are not being used at the same time,
> > it is possible to reduce its size by placing several members that are not
> > used at the same time in a union.
> >
> > The first 4 patches did some preparatory work for this and improved
> > readability.
> >
> > The fifth patch is the main patch, it did the work of rewriting vmap_area.
> >
> > More details can be obtained from the commit message.
>
> None of the commit messages talk about the motivation. Why do we want to
> add quite some code to achieve this? How much do we save? This all
> should be a part of the cover letter.
>
> > Thanks,
> >
> > Pengfei
> >
> > Pengfei Li (5):
> > mm/vmalloc.c: Introduce a wrapper function of insert_vmap_area()
> > mm/vmalloc.c: Introduce a wrapper function of
> > insert_vmap_area_augment()
> > mm/vmalloc.c: Rename function __find_vmap_area() for readability
> > mm/vmalloc.c: Modify function merge_or_add_vmap_area() for readability
> > mm/vmalloc.c: Rewrite struct vmap_area to reduce its size
> >
> > include/linux/vmalloc.h | 28 +++++---
> > mm/vmalloc.c | 144 +++++++++++++++++++++++++++-------------
> > 2 files changed, 117 insertions(+), 55 deletions(-)
> >
> > --
> > 2.21.0
> > Pengfei Li (5):
> > mm/vmalloc.c: Introduce a wrapper function of insert_vmap_area()
> > mm/vmalloc.c: Introduce a wrapper function of
> > insert_vmap_area_augment()
> > mm/vmalloc.c: Rename function __find_vmap_area() for readability
> > mm/vmalloc.c: Modify function merge_or_add_vmap_area() for readability
> > mm/vmalloc.c: Rewrite struct vmap_area to reduce its size
Fitting vmap_area to 1 cacheline boundary makes sense to me. I was thinking about
that and i have patches in my pipeline to send out but implementation is different.
I had a look at all 5 patches. What you are doing is reasonable to me, i mean when
it comes to the idea of reducing the size to L1 cache line.
I have a concern about implementation and all logic around when we can use va_start
and when it is something else. It is not optimal at least to me, from performance point
of view and complexity. All hot paths and tree traversal are affected by that.
For example running the vmalloc test driver against this series shows the following
delta:
<5.2.0-rc6+>
Summary: fix_size_alloc_test passed: loops: 1000000 avg: 969370 usec
Summary: full_fit_alloc_test passed: loops: 1000000 avg: 989619 usec
Summary: long_busy_list_alloc_test loops: 1000000 avg: 12895813 usec
<5.2.0-rc6+>
<this series>
Summary: fix_size_alloc_test passed: loops: 1000000 avg: 1098372 usec
Summary: full_fit_alloc_test passed: loops: 1000000 avg: 1167260 usec
Summary: long_busy_list_alloc_test passed: loops: 1000000 avg: 12934286 usec
<this series>
For example, the degrade in second test is ~15%.
--
Vlad Rezki
Michal Hocko <[email protected]> 于2019年7月1日周一 下午5:20写道:
>
> On Sun 30-06-19 15:56:45, Pengfei Li wrote:
> > Hi,
> >
> > This series of patches is to reduce the size of struct vmap_area.
> >
> > Since the members of struct vmap_area are not being used at the same time,
> > it is possible to reduce its size by placing several members that are not
> > used at the same time in a union.
> >
> > The first 4 patches did some preparatory work for this and improved
> > readability.
> >
> > The fifth patch is the main patch, it did the work of rewriting vmap_area.
> >
> > More details can be obtained from the commit message.
>
> None of the commit messages talk about the motivation. Why do we want to
> add quite some code to achieve this? How much do we save? This all
> should be a part of the cover letter.
>
Hi Michal,
Thank you for your comments.
Sorry for the commit without any test data.
I will add motivation and necessary test data in the next version.
Best regards,
Pengfei
> > Thanks,
> >
> > Pengfei
> >
> > Pengfei Li (5):
> > mm/vmalloc.c: Introduce a wrapper function of insert_vmap_area()
> > mm/vmalloc.c: Introduce a wrapper function of
> > insert_vmap_area_augment()
> > mm/vmalloc.c: Rename function __find_vmap_area() for readability
> > mm/vmalloc.c: Modify function merge_or_add_vmap_area() for readability
> > mm/vmalloc.c: Rewrite struct vmap_area to reduce its size
> >
> > include/linux/vmalloc.h | 28 +++++---
> > mm/vmalloc.c | 144 +++++++++++++++++++++++++++-------------
> > 2 files changed, 117 insertions(+), 55 deletions(-)
> >
> > --
> > 2.21.0
>
> --
> Michal Hocko
> SUSE Labs
Uladzislau Rezki <[email protected]> 于2019年7月1日周一 下午6:11写道:
>
> On Mon, Jul 01, 2019 at 11:20:37AM +0200, Michal Hocko wrote:
> > On Sun 30-06-19 15:56:45, Pengfei Li wrote:
> > > Hi,
> > >
> > > This series of patches is to reduce the size of struct vmap_area.
> > >
> > > Since the members of struct vmap_area are not being used at the same time,
> > > it is possible to reduce its size by placing several members that are not
> > > used at the same time in a union.
> > >
> > > The first 4 patches did some preparatory work for this and improved
> > > readability.
> > >
> > > The fifth patch is the main patch, it did the work of rewriting vmap_area.
> > >
> > > More details can be obtained from the commit message.
> >
> > None of the commit messages talk about the motivation. Why do we want to
> > add quite some code to achieve this? How much do we save? This all
> > should be a part of the cover letter.
> >
> > > Thanks,
> > >
> > > Pengfei
> > >
> > > Pengfei Li (5):
> > > mm/vmalloc.c: Introduce a wrapper function of insert_vmap_area()
> > > mm/vmalloc.c: Introduce a wrapper function of
> > > insert_vmap_area_augment()
> > > mm/vmalloc.c: Rename function __find_vmap_area() for readability
> > > mm/vmalloc.c: Modify function merge_or_add_vmap_area() for readability
> > > mm/vmalloc.c: Rewrite struct vmap_area to reduce its size
> > >
> > > include/linux/vmalloc.h | 28 +++++---
> > > mm/vmalloc.c | 144 +++++++++++++++++++++++++++-------------
> > > 2 files changed, 117 insertions(+), 55 deletions(-)
> > >
> > > --
> > > 2.21.0
>
> > > Pengfei Li (5):
> > > mm/vmalloc.c: Introduce a wrapper function of insert_vmap_area()
> > > mm/vmalloc.c: Introduce a wrapper function of
> > > insert_vmap_area_augment()
> > > mm/vmalloc.c: Rename function __find_vmap_area() for readability
> > > mm/vmalloc.c: Modify function merge_or_add_vmap_area() for readability
> > > mm/vmalloc.c: Rewrite struct vmap_area to reduce its size
> Fitting vmap_area to 1 cacheline boundary makes sense to me. I was thinking about
> that and i have patches in my pipeline to send out but implementation is different.
>
> I had a look at all 5 patches. What you are doing is reasonable to me, i mean when
> it comes to the idea of reducing the size to L1 cache line.
>
Thank you for your review.
> I have a concern about implementation and all logic around when we can use va_start
> and when it is something else. It is not optimal at least to me, from performance point
> of view and complexity. All hot paths and tree traversal are affected by that.
>
> For example running the vmalloc test driver against this series shows the following
> delta:
>
> <5.2.0-rc6+>
> Summary: fix_size_alloc_test passed: loops: 1000000 avg: 969370 usec
> Summary: full_fit_alloc_test passed: loops: 1000000 avg: 989619 usec
> Summary: long_busy_list_alloc_test loops: 1000000 avg: 12895813 usec
> <5.2.0-rc6+>
>
> <this series>
> Summary: fix_size_alloc_test passed: loops: 1000000 avg: 1098372 usec
> Summary: full_fit_alloc_test passed: loops: 1000000 avg: 1167260 usec
> Summary: long_busy_list_alloc_test passed: loops: 1000000 avg: 12934286 usec
> <this series>
>
> For example, the degrade in second test is ~15%.
>
> --
> Vlad Rezki
Hi, Vlad
I think the reason for the performance degradation is that the value
of va_start is obtained by va->vm->addr.
And since the vmap area in the BUSY tree is always page-aligned,
there is no reason for _va_vmlid to override va_start, just let
the va->flags use the bits that lower than PAGE_OFFSET.
I will use this implementation in the next version and show almost
no performance penalty in my local tests.
I will send the next version soon.
Thank you for taking your time for the review.
Best regards,
Pengfei
On Sun, Jun 30, 2019 at 03:56:48PM +0800, Pengfei Li wrote:
> Rename function __find_vmap_area to __search_va_from_busy_tree to
> indicate that it is searching in the *BUSY* tree.
Wrong preposition; you search _in_ a tree, not _from_ a tree.
On Tue, Jul 2, 2019 at 8:23 PM Matthew Wilcox <[email protected]> wrote:
>
> On Sun, Jun 30, 2019 at 03:56:48PM +0800, Pengfei Li wrote:
> > Rename function __find_vmap_area to __search_va_from_busy_tree to
> > indicate that it is searching in the *BUSY* tree.
>
> Wrong preposition; you search _in_ a tree, not _from_ a tree.
Thanks for your review, I will correct it in the next version.