Following are some cleanup for hugetlb.
Simple test with tools/testing/selftests/vm/map_hugetlb pass.
v4:
* fix a logic error for patch 7, thanks Mike
v3:
* rebase on v5.9-rc2 which adjust the last patch a little
v2:
* drop 5/6/10 since similar patches are merged or under review.
* adjust 2 based on comment from Mike Kravetz
Wei Yang (7):
mm/hugetlb: not necessary to coalesce regions recursively
mm/hugetlb: remove VM_BUG_ON(!nrg) in
get_file_region_entry_from_cache()
mm/hugetlb: use list_splice to merge two list at once
mm/hugetlb: count file_region to be added when regions_needed != NULL
mm/hugetlb: a page from buddy is not on any list
mm/hugetlb: narrow the hugetlb_lock protection area during preparing
huge page
mm/hugetlb: take the free hpage during the iteration directly
mm/hugetlb.c | 73 ++++++++++++++++++++++------------------------------
1 file changed, 31 insertions(+), 42 deletions(-)
--
2.20.1 (Apple Git-117)
set_hugetlb_cgroup_[rsvd] just manipulate page local data, which is not
necessary to be protected by hugetlb_lock.
Let's take this out.
Signed-off-by: Wei Yang <[email protected]>
Reviewed-by: Baoquan He <[email protected]>
Reviewed-by: Mike Kravetz <[email protected]>
---
mm/hugetlb.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index c9b292e664c4..7b3357c1dcec 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1493,9 +1493,9 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
{
INIT_LIST_HEAD(&page->lru);
set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
- spin_lock(&hugetlb_lock);
set_hugetlb_cgroup(page, NULL);
set_hugetlb_cgroup_rsvd(page, NULL);
+ spin_lock(&hugetlb_lock);
h->nr_huge_pages++;
h->nr_huge_pages_node[nid]++;
spin_unlock(&hugetlb_lock);
--
2.20.1 (Apple Git-117)
We are sure to get a valid file_region, otherwise the
VM_BUG_ON(resv->region_cache_count <= 0) at the very beginning would be
triggered.
Let's remove the redundant one.
Signed-off-by: Wei Yang <[email protected]>
Reviewed-by: Mike Kravetz <[email protected]>
---
mm/hugetlb.c | 1 -
1 file changed, 1 deletion(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index db6af2654f12..fbaf49bc1d26 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -240,7 +240,6 @@ get_file_region_entry_from_cache(struct resv_map *resv, long from, long to)
resv->region_cache_count--;
nrg = list_first_entry(&resv->region_cache, struct file_region, link);
- VM_BUG_ON(!nrg);
list_del(&nrg->link);
nrg->from = from;
--
2.20.1 (Apple Git-117)
Instead of add allocated file_region one by one to region_cache, we
could use list_splice to merge two list at once.
Also we know the number of entries in the list, increase the number
directly.
Signed-off-by: Wei Yang <[email protected]>
Reviewed-by: Baoquan He <[email protected]>
Reviewed-by: Mike Kravetz <[email protected]>
---
mm/hugetlb.c | 7 ++-----
1 file changed, 2 insertions(+), 5 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index fbaf49bc1d26..a02bf430de6f 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -443,11 +443,8 @@ static int allocate_file_region_entries(struct resv_map *resv,
spin_lock(&resv->lock);
- list_for_each_entry_safe(rg, trg, &allocated_regions, link) {
- list_del(&rg->link);
- list_add(&rg->link, &resv->region_cache);
- resv->region_cache_count++;
- }
+ list_splice(&allocated_regions, &resv->region_cache);
+ resv->region_cache_count += to_allocate;
}
return 0;
--
2.20.1 (Apple Git-117)
There are only two cases of function add_reservation_in_range()
* count file_region and return the number in regions_needed
* do the real list operation without counting
This means it is not necessary to have two parameters to classify these
two cases.
Just use regions_needed to separate them.
Signed-off-by: Wei Yang <[email protected]>
Reviewed-by: Baoquan He <[email protected]>
Reviewed-by: Mike Kravetz <[email protected]>
---
mm/hugetlb.c | 33 +++++++++++++++++----------------
1 file changed, 17 insertions(+), 16 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a02bf430de6f..441b7f7c623e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -321,16 +321,17 @@ static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
}
}
-/* Must be called with resv->lock held. Calling this with count_only == true
- * will count the number of pages to be added but will not modify the linked
- * list. If regions_needed != NULL and count_only == true, then regions_needed
- * will indicate the number of file_regions needed in the cache to carry out to
- * add the regions for this range.
+/*
+ * Must be called with resv->lock held.
+ *
+ * Calling this with regions_needed != NULL will count the number of pages
+ * to be added but will not modify the linked list. And regions_needed will
+ * indicate the number of file_regions needed in the cache to carry out to add
+ * the regions for this range.
*/
static long add_reservation_in_range(struct resv_map *resv, long f, long t,
struct hugetlb_cgroup *h_cg,
- struct hstate *h, long *regions_needed,
- bool count_only)
+ struct hstate *h, long *regions_needed)
{
long add = 0;
struct list_head *head = &resv->regions;
@@ -366,14 +367,14 @@ static long add_reservation_in_range(struct resv_map *resv, long f, long t,
*/
if (rg->from > last_accounted_offset) {
add += rg->from - last_accounted_offset;
- if (!count_only) {
+ if (!regions_needed) {
nrg = get_file_region_entry_from_cache(
resv, last_accounted_offset, rg->from);
record_hugetlb_cgroup_uncharge_info(h_cg, h,
resv, nrg);
list_add(&nrg->link, rg->link.prev);
coalesce_file_region(resv, nrg);
- } else if (regions_needed)
+ } else
*regions_needed += 1;
}
@@ -385,13 +386,13 @@ static long add_reservation_in_range(struct resv_map *resv, long f, long t,
*/
if (last_accounted_offset < t) {
add += t - last_accounted_offset;
- if (!count_only) {
+ if (!regions_needed) {
nrg = get_file_region_entry_from_cache(
resv, last_accounted_offset, t);
record_hugetlb_cgroup_uncharge_info(h_cg, h, resv, nrg);
list_add(&nrg->link, rg->link.prev);
coalesce_file_region(resv, nrg);
- } else if (regions_needed)
+ } else
*regions_needed += 1;
}
@@ -484,8 +485,8 @@ static long region_add(struct resv_map *resv, long f, long t,
retry:
/* Count how many regions are actually needed to execute this add. */
- add_reservation_in_range(resv, f, t, NULL, NULL, &actual_regions_needed,
- true);
+ add_reservation_in_range(resv, f, t, NULL, NULL,
+ &actual_regions_needed);
/*
* Check for sufficient descriptors in the cache to accommodate
@@ -513,7 +514,7 @@ static long region_add(struct resv_map *resv, long f, long t,
goto retry;
}
- add = add_reservation_in_range(resv, f, t, h_cg, h, NULL, false);
+ add = add_reservation_in_range(resv, f, t, h_cg, h, NULL);
resv->adds_in_progress -= in_regions_needed;
@@ -549,9 +550,9 @@ static long region_chg(struct resv_map *resv, long f, long t,
spin_lock(&resv->lock);
- /* Count how many hugepages in this range are NOT respresented. */
+ /* Count how many hugepages in this range are NOT represented. */
chg = add_reservation_in_range(resv, f, t, NULL, NULL,
- out_regions_needed, true);
+ out_regions_needed);
if (*out_regions_needed == 0)
*out_regions_needed = 1;
--
2.20.1 (Apple Git-117)
Per my understanding, we keep the regions ordered and would always
coalesce regions properly. So the task to keep this property is just
to coalesce its neighbour.
Let's simplify this.
Signed-off-by: Wei Yang <[email protected]>
Reviewed-by: Baoquan He <[email protected]>
Reviewed-by: Mike Kravetz <[email protected]>
---
mm/hugetlb.c | 6 +-----
1 file changed, 1 insertion(+), 5 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a301c2d672bf..db6af2654f12 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -309,8 +309,7 @@ static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
list_del(&rg->link);
kfree(rg);
- coalesce_file_region(resv, prg);
- return;
+ rg = prg;
}
nrg = list_next_entry(rg, link);
@@ -320,9 +319,6 @@ static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
list_del(&rg->link);
kfree(rg);
-
- coalesce_file_region(resv, nrg);
- return;
}
}
--
2.20.1 (Apple Git-117)