2021-03-29 23:28:15

by Mike Kravetz

[permalink] [raw]
Subject: [PATCH v2 1/8] mm/cma: change cma mutex to irq safe spinlock

Ideally, cma_release could be called from any context. However, that is
not possible because a mutex is used to protect the per-area bitmap.
Change the bitmap to an irq safe spinlock.

Signed-off-by: Mike Kravetz <[email protected]>
---
mm/cma.c | 20 +++++++++++---------
mm/cma.h | 2 +-
mm/cma_debug.c | 10 ++++++----
3 files changed, 18 insertions(+), 14 deletions(-)

diff --git a/mm/cma.c b/mm/cma.c
index b2393b892d3b..80875fd4487b 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -24,7 +24,6 @@
#include <linux/memblock.h>
#include <linux/err.h>
#include <linux/mm.h>
-#include <linux/mutex.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/log2.h>
@@ -83,13 +82,14 @@ static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
unsigned int count)
{
unsigned long bitmap_no, bitmap_count;
+ unsigned long flags;

bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
bitmap_count = cma_bitmap_pages_to_bits(cma, count);

- mutex_lock(&cma->lock);
+ spin_lock_irqsave(&cma->lock, flags);
bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
- mutex_unlock(&cma->lock);
+ spin_unlock_irqrestore(&cma->lock, flags);
}

static void __init cma_activate_area(struct cma *cma)
@@ -118,7 +118,7 @@ static void __init cma_activate_area(struct cma *cma)
pfn += pageblock_nr_pages)
init_cma_reserved_pageblock(pfn_to_page(pfn));

- mutex_init(&cma->lock);
+ spin_lock_init(&cma->lock);

#ifdef CONFIG_CMA_DEBUGFS
INIT_HLIST_HEAD(&cma->mem_head);
@@ -391,8 +391,9 @@ static void cma_debug_show_areas(struct cma *cma)
unsigned long start = 0;
unsigned long nr_part, nr_total = 0;
unsigned long nbits = cma_bitmap_maxno(cma);
+ unsigned long flags;

- mutex_lock(&cma->lock);
+ spin_lock_irqsave(&cma->lock, flags);
pr_info("number of available pages: ");
for (;;) {
next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
@@ -407,7 +408,7 @@ static void cma_debug_show_areas(struct cma *cma)
start = next_zero_bit + nr_zero;
}
pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
- mutex_unlock(&cma->lock);
+ spin_unlock_irqrestore(&cma->lock, flags);
}
#else
static inline void cma_debug_show_areas(struct cma *cma) { }
@@ -430,6 +431,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
unsigned long pfn = -1;
unsigned long start = 0;
unsigned long bitmap_maxno, bitmap_no, bitmap_count;
+ unsigned long flags;
size_t i;
struct page *page = NULL;
int ret = -ENOMEM;
@@ -454,12 +456,12 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
goto out;

for (;;) {
- mutex_lock(&cma->lock);
+ spin_lock_irqsave(&cma->lock, flags);
bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
bitmap_maxno, start, bitmap_count, mask,
offset);
if (bitmap_no >= bitmap_maxno) {
- mutex_unlock(&cma->lock);
+ spin_unlock_irqrestore(&cma->lock, flags);
break;
}
bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
@@ -468,7 +470,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
* our exclusive use. If the migration fails we will take the
* lock again and unmark it.
*/
- mutex_unlock(&cma->lock);
+ spin_unlock_irqrestore(&cma->lock, flags);

pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
diff --git a/mm/cma.h b/mm/cma.h
index 68ffad4e430d..2c775877eae2 100644
--- a/mm/cma.h
+++ b/mm/cma.h
@@ -15,7 +15,7 @@ struct cma {
unsigned long count;
unsigned long *bitmap;
unsigned int order_per_bit; /* Order of pages represented by one bit */
- struct mutex lock;
+ spinlock_t lock;
#ifdef CONFIG_CMA_DEBUGFS
struct hlist_head mem_head;
spinlock_t mem_head_lock;
diff --git a/mm/cma_debug.c b/mm/cma_debug.c
index d5bf8aa34fdc..6379cfbfd568 100644
--- a/mm/cma_debug.c
+++ b/mm/cma_debug.c
@@ -35,11 +35,12 @@ static int cma_used_get(void *data, u64 *val)
{
struct cma *cma = data;
unsigned long used;
+ unsigned long flags;

- mutex_lock(&cma->lock);
+ spin_lock_irqsave(&cma->lock, flags);
/* pages counter is smaller than sizeof(int) */
used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma));
- mutex_unlock(&cma->lock);
+ spin_unlock_irqrestore(&cma->lock, flags);
*val = (u64)used << cma->order_per_bit;

return 0;
@@ -52,8 +53,9 @@ static int cma_maxchunk_get(void *data, u64 *val)
unsigned long maxchunk = 0;
unsigned long start, end = 0;
unsigned long bitmap_maxno = cma_bitmap_maxno(cma);
+ unsigned long flags;

- mutex_lock(&cma->lock);
+ spin_lock_irqsave(&cma->lock, flags);
for (;;) {
start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end);
if (start >= bitmap_maxno)
@@ -61,7 +63,7 @@ static int cma_maxchunk_get(void *data, u64 *val)
end = find_next_bit(cma->bitmap, bitmap_maxno, start);
maxchunk = max(end - start, maxchunk);
}
- mutex_unlock(&cma->lock);
+ spin_unlock_irqrestore(&cma->lock, flags);
*val = (u64)maxchunk << cma->order_per_bit;

return 0;
--
2.30.2


2021-03-30 01:18:23

by Roman Gushchin

[permalink] [raw]
Subject: Re: [PATCH v2 1/8] mm/cma: change cma mutex to irq safe spinlock

On Mon, Mar 29, 2021 at 04:23:55PM -0700, Mike Kravetz wrote:
> Ideally, cma_release could be called from any context. However, that is
> not possible because a mutex is used to protect the per-area bitmap.
> Change the bitmap to an irq safe spinlock.
>
> Signed-off-by: Mike Kravetz <[email protected]>

Acked-by: Roman Gushchin <[email protected]>

Thanks!

> ---
> mm/cma.c | 20 +++++++++++---------
> mm/cma.h | 2 +-
> mm/cma_debug.c | 10 ++++++----
> 3 files changed, 18 insertions(+), 14 deletions(-)
>
> diff --git a/mm/cma.c b/mm/cma.c
> index b2393b892d3b..80875fd4487b 100644
> --- a/mm/cma.c
> +++ b/mm/cma.c
> @@ -24,7 +24,6 @@
> #include <linux/memblock.h>
> #include <linux/err.h>
> #include <linux/mm.h>
> -#include <linux/mutex.h>
> #include <linux/sizes.h>
> #include <linux/slab.h>
> #include <linux/log2.h>
> @@ -83,13 +82,14 @@ static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
> unsigned int count)
> {
> unsigned long bitmap_no, bitmap_count;
> + unsigned long flags;
>
> bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
> bitmap_count = cma_bitmap_pages_to_bits(cma, count);
>
> - mutex_lock(&cma->lock);
> + spin_lock_irqsave(&cma->lock, flags);
> bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
> - mutex_unlock(&cma->lock);
> + spin_unlock_irqrestore(&cma->lock, flags);
> }
>
> static void __init cma_activate_area(struct cma *cma)
> @@ -118,7 +118,7 @@ static void __init cma_activate_area(struct cma *cma)
> pfn += pageblock_nr_pages)
> init_cma_reserved_pageblock(pfn_to_page(pfn));
>
> - mutex_init(&cma->lock);
> + spin_lock_init(&cma->lock);
>
> #ifdef CONFIG_CMA_DEBUGFS
> INIT_HLIST_HEAD(&cma->mem_head);
> @@ -391,8 +391,9 @@ static void cma_debug_show_areas(struct cma *cma)
> unsigned long start = 0;
> unsigned long nr_part, nr_total = 0;
> unsigned long nbits = cma_bitmap_maxno(cma);
> + unsigned long flags;
>
> - mutex_lock(&cma->lock);
> + spin_lock_irqsave(&cma->lock, flags);
> pr_info("number of available pages: ");
> for (;;) {
> next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
> @@ -407,7 +408,7 @@ static void cma_debug_show_areas(struct cma *cma)
> start = next_zero_bit + nr_zero;
> }
> pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
> - mutex_unlock(&cma->lock);
> + spin_unlock_irqrestore(&cma->lock, flags);
> }
> #else
> static inline void cma_debug_show_areas(struct cma *cma) { }
> @@ -430,6 +431,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
> unsigned long pfn = -1;
> unsigned long start = 0;
> unsigned long bitmap_maxno, bitmap_no, bitmap_count;
> + unsigned long flags;
> size_t i;
> struct page *page = NULL;
> int ret = -ENOMEM;
> @@ -454,12 +456,12 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
> goto out;
>
> for (;;) {
> - mutex_lock(&cma->lock);
> + spin_lock_irqsave(&cma->lock, flags);
> bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
> bitmap_maxno, start, bitmap_count, mask,
> offset);
> if (bitmap_no >= bitmap_maxno) {
> - mutex_unlock(&cma->lock);
> + spin_unlock_irqrestore(&cma->lock, flags);
> break;
> }
> bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
> @@ -468,7 +470,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
> * our exclusive use. If the migration fails we will take the
> * lock again and unmark it.
> */
> - mutex_unlock(&cma->lock);
> + spin_unlock_irqrestore(&cma->lock, flags);
>
> pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
> ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
> diff --git a/mm/cma.h b/mm/cma.h
> index 68ffad4e430d..2c775877eae2 100644
> --- a/mm/cma.h
> +++ b/mm/cma.h
> @@ -15,7 +15,7 @@ struct cma {
> unsigned long count;
> unsigned long *bitmap;
> unsigned int order_per_bit; /* Order of pages represented by one bit */
> - struct mutex lock;
> + spinlock_t lock;
> #ifdef CONFIG_CMA_DEBUGFS
> struct hlist_head mem_head;
> spinlock_t mem_head_lock;
> diff --git a/mm/cma_debug.c b/mm/cma_debug.c
> index d5bf8aa34fdc..6379cfbfd568 100644
> --- a/mm/cma_debug.c
> +++ b/mm/cma_debug.c
> @@ -35,11 +35,12 @@ static int cma_used_get(void *data, u64 *val)
> {
> struct cma *cma = data;
> unsigned long used;
> + unsigned long flags;
>
> - mutex_lock(&cma->lock);
> + spin_lock_irqsave(&cma->lock, flags);
> /* pages counter is smaller than sizeof(int) */
> used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma));
> - mutex_unlock(&cma->lock);
> + spin_unlock_irqrestore(&cma->lock, flags);
> *val = (u64)used << cma->order_per_bit;
>
> return 0;
> @@ -52,8 +53,9 @@ static int cma_maxchunk_get(void *data, u64 *val)
> unsigned long maxchunk = 0;
> unsigned long start, end = 0;
> unsigned long bitmap_maxno = cma_bitmap_maxno(cma);
> + unsigned long flags;
>
> - mutex_lock(&cma->lock);
> + spin_lock_irqsave(&cma->lock, flags);
> for (;;) {
> start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end);
> if (start >= bitmap_maxno)
> @@ -61,7 +63,7 @@ static int cma_maxchunk_get(void *data, u64 *val)
> end = find_next_bit(cma->bitmap, bitmap_maxno, start);
> maxchunk = max(end - start, maxchunk);
> }
> - mutex_unlock(&cma->lock);
> + spin_unlock_irqrestore(&cma->lock, flags);
> *val = (u64)maxchunk << cma->order_per_bit;
>
> return 0;
> --
> 2.30.2
>

Subject: RE: [PATCH v2 1/8] mm/cma: change cma mutex to irq safe spinlock



> -----Original Message-----
> From: Mike Kravetz [mailto:[email protected]]
> Sent: Tuesday, March 30, 2021 12:24 PM
> To: [email protected]; [email protected]
> Cc: Roman Gushchin <[email protected]>; Michal Hocko <[email protected]>; Shakeel Butt
> <[email protected]>; Oscar Salvador <[email protected]>; David Hildenbrand
> <[email protected]>; Muchun Song <[email protected]>; David Rientjes
> <[email protected]>; linmiaohe <[email protected]>; Peter Zijlstra
> <[email protected]>; Matthew Wilcox <[email protected]>; HORIGUCHI NAOYA
> <[email protected]>; Aneesh Kumar K . V <[email protected]>;
> Waiman Long <[email protected]>; Peter Xu <[email protected]>; Mina Almasry
> <[email protected]>; Hillf Danton <[email protected]>; Joonsoo Kim
> <[email protected]>; Song Bao Hua (Barry Song)
> <[email protected]>; Will Deacon <[email protected]>; Andrew Morton
> <[email protected]>; Mike Kravetz <[email protected]>
> Subject: [PATCH v2 1/8] mm/cma: change cma mutex to irq safe spinlock
>
> Ideally, cma_release could be called from any context. However, that is
> not possible because a mutex is used to protect the per-area bitmap.
> Change the bitmap to an irq safe spinlock.
>
> Signed-off-by: Mike Kravetz <[email protected]>

It seems mutex_lock is locking some areas with bitmap operations which
should be safe to atomic context.

Reviewed-by: Barry Song <[email protected]>

> ---
> mm/cma.c | 20 +++++++++++---------
> mm/cma.h | 2 +-
> mm/cma_debug.c | 10 ++++++----
> 3 files changed, 18 insertions(+), 14 deletions(-)
>
> diff --git a/mm/cma.c b/mm/cma.c
> index b2393b892d3b..80875fd4487b 100644
> --- a/mm/cma.c
> +++ b/mm/cma.c
> @@ -24,7 +24,6 @@
> #include <linux/memblock.h>
> #include <linux/err.h>
> #include <linux/mm.h>
> -#include <linux/mutex.h>
> #include <linux/sizes.h>
> #include <linux/slab.h>
> #include <linux/log2.h>
> @@ -83,13 +82,14 @@ static void cma_clear_bitmap(struct cma *cma, unsigned long
> pfn,
> unsigned int count)
> {
> unsigned long bitmap_no, bitmap_count;
> + unsigned long flags;
>
> bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
> bitmap_count = cma_bitmap_pages_to_bits(cma, count);
>
> - mutex_lock(&cma->lock);
> + spin_lock_irqsave(&cma->lock, flags);
> bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
> - mutex_unlock(&cma->lock);
> + spin_unlock_irqrestore(&cma->lock, flags);
> }
>
> static void __init cma_activate_area(struct cma *cma)
> @@ -118,7 +118,7 @@ static void __init cma_activate_area(struct cma *cma)
> pfn += pageblock_nr_pages)
> init_cma_reserved_pageblock(pfn_to_page(pfn));
>
> - mutex_init(&cma->lock);
> + spin_lock_init(&cma->lock);
>
> #ifdef CONFIG_CMA_DEBUGFS
> INIT_HLIST_HEAD(&cma->mem_head);
> @@ -391,8 +391,9 @@ static void cma_debug_show_areas(struct cma *cma)
> unsigned long start = 0;
> unsigned long nr_part, nr_total = 0;
> unsigned long nbits = cma_bitmap_maxno(cma);
> + unsigned long flags;
>
> - mutex_lock(&cma->lock);
> + spin_lock_irqsave(&cma->lock, flags);
> pr_info("number of available pages: ");
> for (;;) {
> next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
> @@ -407,7 +408,7 @@ static void cma_debug_show_areas(struct cma *cma)
> start = next_zero_bit + nr_zero;
> }
> pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
> - mutex_unlock(&cma->lock);
> + spin_unlock_irqrestore(&cma->lock, flags);
> }
> #else
> static inline void cma_debug_show_areas(struct cma *cma) { }
> @@ -430,6 +431,7 @@ struct page *cma_alloc(struct cma *cma, size_t count,
> unsigned int align,
> unsigned long pfn = -1;
> unsigned long start = 0;
> unsigned long bitmap_maxno, bitmap_no, bitmap_count;
> + unsigned long flags;
> size_t i;
> struct page *page = NULL;
> int ret = -ENOMEM;
> @@ -454,12 +456,12 @@ struct page *cma_alloc(struct cma *cma, size_t count,
> unsigned int align,
> goto out;
>
> for (;;) {
> - mutex_lock(&cma->lock);
> + spin_lock_irqsave(&cma->lock, flags);
> bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
> bitmap_maxno, start, bitmap_count, mask,
> offset);
> if (bitmap_no >= bitmap_maxno) {
> - mutex_unlock(&cma->lock);
> + spin_unlock_irqrestore(&cma->lock, flags);
> break;
> }
> bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
> @@ -468,7 +470,7 @@ struct page *cma_alloc(struct cma *cma, size_t count,
> unsigned int align,
> * our exclusive use. If the migration fails we will take the
> * lock again and unmark it.
> */
> - mutex_unlock(&cma->lock);
> + spin_unlock_irqrestore(&cma->lock, flags);
>
> pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
> ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
> diff --git a/mm/cma.h b/mm/cma.h
> index 68ffad4e430d..2c775877eae2 100644
> --- a/mm/cma.h
> +++ b/mm/cma.h
> @@ -15,7 +15,7 @@ struct cma {
> unsigned long count;
> unsigned long *bitmap;
> unsigned int order_per_bit; /* Order of pages represented by one bit */
> - struct mutex lock;
> + spinlock_t lock;
> #ifdef CONFIG_CMA_DEBUGFS
> struct hlist_head mem_head;
> spinlock_t mem_head_lock;
> diff --git a/mm/cma_debug.c b/mm/cma_debug.c
> index d5bf8aa34fdc..6379cfbfd568 100644
> --- a/mm/cma_debug.c
> +++ b/mm/cma_debug.c
> @@ -35,11 +35,12 @@ static int cma_used_get(void *data, u64 *val)
> {
> struct cma *cma = data;
> unsigned long used;
> + unsigned long flags;
>
> - mutex_lock(&cma->lock);
> + spin_lock_irqsave(&cma->lock, flags);
> /* pages counter is smaller than sizeof(int) */
> used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma));
> - mutex_unlock(&cma->lock);
> + spin_unlock_irqrestore(&cma->lock, flags);
> *val = (u64)used << cma->order_per_bit;
>
> return 0;
> @@ -52,8 +53,9 @@ static int cma_maxchunk_get(void *data, u64 *val)
> unsigned long maxchunk = 0;
> unsigned long start, end = 0;
> unsigned long bitmap_maxno = cma_bitmap_maxno(cma);
> + unsigned long flags;
>
> - mutex_lock(&cma->lock);
> + spin_lock_irqsave(&cma->lock, flags);
> for (;;) {
> start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end);
> if (start >= bitmap_maxno)
> @@ -61,7 +63,7 @@ static int cma_maxchunk_get(void *data, u64 *val)
> end = find_next_bit(cma->bitmap, bitmap_maxno, start);
> maxchunk = max(end - start, maxchunk);
> }
> - mutex_unlock(&cma->lock);
> + spin_unlock_irqrestore(&cma->lock, flags);
> *val = (u64)maxchunk << cma->order_per_bit;
>
> return 0;
> --
> 2.30.2

2021-03-30 02:22:59

by Mike Kravetz

[permalink] [raw]
Subject: Re: [PATCH v2 1/8] mm/cma: change cma mutex to irq safe spinlock

On 3/29/21 6:20 PM, Song Bao Hua (Barry Song) wrote:
>
>
>> -----Original Message-----
>> From: Mike Kravetz [mailto:[email protected]]
>> Sent: Tuesday, March 30, 2021 12:24 PM
>> To: [email protected]; [email protected]
>> Cc: Roman Gushchin <[email protected]>; Michal Hocko <[email protected]>; Shakeel Butt
>> <[email protected]>; Oscar Salvador <[email protected]>; David Hildenbrand
>> <[email protected]>; Muchun Song <[email protected]>; David Rientjes
>> <[email protected]>; linmiaohe <[email protected]>; Peter Zijlstra
>> <[email protected]>; Matthew Wilcox <[email protected]>; HORIGUCHI NAOYA
>> <[email protected]>; Aneesh Kumar K . V <[email protected]>;
>> Waiman Long <[email protected]>; Peter Xu <[email protected]>; Mina Almasry
>> <[email protected]>; Hillf Danton <[email protected]>; Joonsoo Kim
>> <[email protected]>; Song Bao Hua (Barry Song)
>> <[email protected]>; Will Deacon <[email protected]>; Andrew Morton
>> <[email protected]>; Mike Kravetz <[email protected]>
>> Subject: [PATCH v2 1/8] mm/cma: change cma mutex to irq safe spinlock
>>
>> Ideally, cma_release could be called from any context. However, that is
>> not possible because a mutex is used to protect the per-area bitmap.
>> Change the bitmap to an irq safe spinlock.
>>
>> Signed-off-by: Mike Kravetz <[email protected]>
>
> It seems mutex_lock is locking some areas with bitmap operations which
> should be safe to atomic context.
>
> Reviewed-by: Barry Song <[email protected]>

Thanks Barry,

Not sure if you saw questions from Michal in previous series?
There was some concern from Joonsoo in the past about lock hold time due
to bitmap scans. You may have some insight into the typical size of CMA
areas on arm64. I believe the calls to set up the areas specify one bit
per page.
--
Mike Kravetz

2021-03-30 08:03:14

by Michal Hocko

[permalink] [raw]
Subject: Re: [PATCH v2 1/8] mm/cma: change cma mutex to irq safe spinlock

On Mon 29-03-21 16:23:55, Mike Kravetz wrote:
> Ideally, cma_release could be called from any context. However, that is
> not possible because a mutex is used to protect the per-area bitmap.
> Change the bitmap to an irq safe spinlock.

I would phrase the changelog slightly differerent
"
cma_release is currently a sleepable operatation because the bitmap
manipulation is protected by cma->lock mutex. Hugetlb code which relies
on cma_release for CMA backed (giga) hugetlb pages, however, needs to be
irq safe.

The lock doesn't protect any sleepable operation so it can be changed to
a (irq aware) spin lock. The bitmap processing should be quite fast in
typical case but if cma sizes grow to TB then we will likely need to
replace the lock by a more optimized bitmap implementation.
"

it seems that you are overusing irqsave variants even from context which
are never called from the IRQ context so they do not need storing flags.

[...]
> @@ -391,8 +391,9 @@ static void cma_debug_show_areas(struct cma *cma)
> unsigned long start = 0;
> unsigned long nr_part, nr_total = 0;
> unsigned long nbits = cma_bitmap_maxno(cma);
> + unsigned long flags;
>
> - mutex_lock(&cma->lock);
> + spin_lock_irqsave(&cma->lock, flags);

spin_lock_irq should be sufficient. This is only called from the
allocation context and that is never called from IRQ context.

> pr_info("number of available pages: ");
> for (;;) {
> next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
> @@ -407,7 +408,7 @@ static void cma_debug_show_areas(struct cma *cma)
> start = next_zero_bit + nr_zero;
> }
> pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
> - mutex_unlock(&cma->lock);
> + spin_unlock_irqrestore(&cma->lock, flags);
> }
> #else
> static inline void cma_debug_show_areas(struct cma *cma) { }
> @@ -430,6 +431,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
> unsigned long pfn = -1;
> unsigned long start = 0;
> unsigned long bitmap_maxno, bitmap_no, bitmap_count;
> + unsigned long flags;
> size_t i;
> struct page *page = NULL;
> int ret = -ENOMEM;
> @@ -454,12 +456,12 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
> goto out;
>
> for (;;) {
> - mutex_lock(&cma->lock);
> + spin_lock_irqsave(&cma->lock, flags);
> bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
> bitmap_maxno, start, bitmap_count, mask,
> offset);
> if (bitmap_no >= bitmap_maxno) {
> - mutex_unlock(&cma->lock);
> + spin_unlock_irqrestore(&cma->lock, flags);
> break;
> }
> bitmap_set(cma->bitmap, bitmap_no, bitmap_count);

same here.

> @@ -468,7 +470,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
> * our exclusive use. If the migration fails we will take the
> * lock again and unmark it.
> */
> - mutex_unlock(&cma->lock);
> + spin_unlock_irqrestore(&cma->lock, flags);
>
> pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
> ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
> diff --git a/mm/cma.h b/mm/cma.h
> index 68ffad4e430d..2c775877eae2 100644
> --- a/mm/cma.h
> +++ b/mm/cma.h
> @@ -15,7 +15,7 @@ struct cma {
> unsigned long count;
> unsigned long *bitmap;
> unsigned int order_per_bit; /* Order of pages represented by one bit */
> - struct mutex lock;
> + spinlock_t lock;
> #ifdef CONFIG_CMA_DEBUGFS
> struct hlist_head mem_head;
> spinlock_t mem_head_lock;
> diff --git a/mm/cma_debug.c b/mm/cma_debug.c
> index d5bf8aa34fdc..6379cfbfd568 100644
> --- a/mm/cma_debug.c
> +++ b/mm/cma_debug.c
> @@ -35,11 +35,12 @@ static int cma_used_get(void *data, u64 *val)
> {
> struct cma *cma = data;
> unsigned long used;
> + unsigned long flags;
>
> - mutex_lock(&cma->lock);
> + spin_lock_irqsave(&cma->lock, flags);
> /* pages counter is smaller than sizeof(int) */
> used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma));
> - mutex_unlock(&cma->lock);
> + spin_unlock_irqrestore(&cma->lock, flags);
> *val = (u64)used << cma->order_per_bit;

same here

>
> return 0;
> @@ -52,8 +53,9 @@ static int cma_maxchunk_get(void *data, u64 *val)
> unsigned long maxchunk = 0;
> unsigned long start, end = 0;
> unsigned long bitmap_maxno = cma_bitmap_maxno(cma);
> + unsigned long flags;
>
> - mutex_lock(&cma->lock);
> + spin_lock_irqsave(&cma->lock, flags);
> for (;;) {
> start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end);
> if (start >= bitmap_maxno)
> @@ -61,7 +63,7 @@ static int cma_maxchunk_get(void *data, u64 *val)
> end = find_next_bit(cma->bitmap, bitmap_maxno, start);
> maxchunk = max(end - start, maxchunk);
> }
> - mutex_unlock(&cma->lock);
> + spin_unlock_irqrestore(&cma->lock, flags);
> *val = (u64)maxchunk << cma->order_per_bit;
>
> return 0;

and here.
--
Michal Hocko
SUSE Labs

2021-03-30 08:11:32

by Muchun Song

[permalink] [raw]
Subject: Re: [External] Re: [PATCH v2 1/8] mm/cma: change cma mutex to irq safe spinlock

On Tue, Mar 30, 2021 at 4:01 PM Michal Hocko <[email protected]> wrote:
>
> On Mon 29-03-21 16:23:55, Mike Kravetz wrote:
> > Ideally, cma_release could be called from any context. However, that is
> > not possible because a mutex is used to protect the per-area bitmap.
> > Change the bitmap to an irq safe spinlock.
>
> I would phrase the changelog slightly differerent
> "
> cma_release is currently a sleepable operatation because the bitmap
> manipulation is protected by cma->lock mutex. Hugetlb code which relies
> on cma_release for CMA backed (giga) hugetlb pages, however, needs to be
> irq safe.
>
> The lock doesn't protect any sleepable operation so it can be changed to
> a (irq aware) spin lock. The bitmap processing should be quite fast in
> typical case but if cma sizes grow to TB then we will likely need to
> replace the lock by a more optimized bitmap implementation.
> "
>
> it seems that you are overusing irqsave variants even from context which
> are never called from the IRQ context so they do not need storing flags.
>
> [...]
> > @@ -391,8 +391,9 @@ static void cma_debug_show_areas(struct cma *cma)
> > unsigned long start = 0;
> > unsigned long nr_part, nr_total = 0;
> > unsigned long nbits = cma_bitmap_maxno(cma);
> > + unsigned long flags;
> >
> > - mutex_lock(&cma->lock);
> > + spin_lock_irqsave(&cma->lock, flags);
>
> spin_lock_irq should be sufficient. This is only called from the
> allocation context and that is never called from IRQ context.

This makes me think more. I think that spin_lock should be
sufficient. Right?


>
> > pr_info("number of available pages: ");
> > for (;;) {
> > next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
> > @@ -407,7 +408,7 @@ static void cma_debug_show_areas(struct cma *cma)
> > start = next_zero_bit + nr_zero;
> > }
> > pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
> > - mutex_unlock(&cma->lock);
> > + spin_unlock_irqrestore(&cma->lock, flags);
> > }
> > #else
> > static inline void cma_debug_show_areas(struct cma *cma) { }
> > @@ -430,6 +431,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
> > unsigned long pfn = -1;
> > unsigned long start = 0;
> > unsigned long bitmap_maxno, bitmap_no, bitmap_count;
> > + unsigned long flags;
> > size_t i;
> > struct page *page = NULL;
> > int ret = -ENOMEM;
> > @@ -454,12 +456,12 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
> > goto out;
> >
> > for (;;) {
> > - mutex_lock(&cma->lock);
> > + spin_lock_irqsave(&cma->lock, flags);
> > bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
> > bitmap_maxno, start, bitmap_count, mask,
> > offset);
> > if (bitmap_no >= bitmap_maxno) {
> > - mutex_unlock(&cma->lock);
> > + spin_unlock_irqrestore(&cma->lock, flags);
> > break;
> > }
> > bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
>
> same here.
>
> > @@ -468,7 +470,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
> > * our exclusive use. If the migration fails we will take the
> > * lock again and unmark it.
> > */
> > - mutex_unlock(&cma->lock);
> > + spin_unlock_irqrestore(&cma->lock, flags);
> >
> > pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
> > ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
> > diff --git a/mm/cma.h b/mm/cma.h
> > index 68ffad4e430d..2c775877eae2 100644
> > --- a/mm/cma.h
> > +++ b/mm/cma.h
> > @@ -15,7 +15,7 @@ struct cma {
> > unsigned long count;
> > unsigned long *bitmap;
> > unsigned int order_per_bit; /* Order of pages represented by one bit */
> > - struct mutex lock;
> > + spinlock_t lock;
> > #ifdef CONFIG_CMA_DEBUGFS
> > struct hlist_head mem_head;
> > spinlock_t mem_head_lock;
> > diff --git a/mm/cma_debug.c b/mm/cma_debug.c
> > index d5bf8aa34fdc..6379cfbfd568 100644
> > --- a/mm/cma_debug.c
> > +++ b/mm/cma_debug.c
> > @@ -35,11 +35,12 @@ static int cma_used_get(void *data, u64 *val)
> > {
> > struct cma *cma = data;
> > unsigned long used;
> > + unsigned long flags;
> >
> > - mutex_lock(&cma->lock);
> > + spin_lock_irqsave(&cma->lock, flags);
> > /* pages counter is smaller than sizeof(int) */
> > used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma));
> > - mutex_unlock(&cma->lock);
> > + spin_unlock_irqrestore(&cma->lock, flags);
> > *val = (u64)used << cma->order_per_bit;
>
> same here
>
> >
> > return 0;
> > @@ -52,8 +53,9 @@ static int cma_maxchunk_get(void *data, u64 *val)
> > unsigned long maxchunk = 0;
> > unsigned long start, end = 0;
> > unsigned long bitmap_maxno = cma_bitmap_maxno(cma);
> > + unsigned long flags;
> >
> > - mutex_lock(&cma->lock);
> > + spin_lock_irqsave(&cma->lock, flags);
> > for (;;) {
> > start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end);
> > if (start >= bitmap_maxno)
> > @@ -61,7 +63,7 @@ static int cma_maxchunk_get(void *data, u64 *val)
> > end = find_next_bit(cma->bitmap, bitmap_maxno, start);
> > maxchunk = max(end - start, maxchunk);
> > }
> > - mutex_unlock(&cma->lock);
> > + spin_unlock_irqrestore(&cma->lock, flags);
> > *val = (u64)maxchunk << cma->order_per_bit;
> >
> > return 0;
>
> and here.
> --
> Michal Hocko
> SUSE Labs

2021-03-30 08:20:46

by Michal Hocko

[permalink] [raw]
Subject: Re: [External] Re: [PATCH v2 1/8] mm/cma: change cma mutex to irq safe spinlock

On Tue 30-03-21 16:08:36, Muchun Song wrote:
> On Tue, Mar 30, 2021 at 4:01 PM Michal Hocko <[email protected]> wrote:
> >
> > On Mon 29-03-21 16:23:55, Mike Kravetz wrote:
> > > Ideally, cma_release could be called from any context. However, that is
> > > not possible because a mutex is used to protect the per-area bitmap.
> > > Change the bitmap to an irq safe spinlock.
> >
> > I would phrase the changelog slightly differerent
> > "
> > cma_release is currently a sleepable operatation because the bitmap
> > manipulation is protected by cma->lock mutex. Hugetlb code which relies
> > on cma_release for CMA backed (giga) hugetlb pages, however, needs to be
> > irq safe.
> >
> > The lock doesn't protect any sleepable operation so it can be changed to
> > a (irq aware) spin lock. The bitmap processing should be quite fast in
> > typical case but if cma sizes grow to TB then we will likely need to
> > replace the lock by a more optimized bitmap implementation.
> > "
> >
> > it seems that you are overusing irqsave variants even from context which
> > are never called from the IRQ context so they do not need storing flags.
> >
> > [...]
> > > @@ -391,8 +391,9 @@ static void cma_debug_show_areas(struct cma *cma)
> > > unsigned long start = 0;
> > > unsigned long nr_part, nr_total = 0;
> > > unsigned long nbits = cma_bitmap_maxno(cma);
> > > + unsigned long flags;
> > >
> > > - mutex_lock(&cma->lock);
> > > + spin_lock_irqsave(&cma->lock, flags);
> >
> > spin_lock_irq should be sufficient. This is only called from the
> > allocation context and that is never called from IRQ context.
>
> This makes me think more. I think that spin_lock should be
> sufficient. Right?

Nope. Think of the following scenario
spin_lock(cma->lock);
<IRQ>
put_page
__free_huge_page
cma_release
spin_lock_irqsave() DEADLOCK
--
Michal Hocko
SUSE Labs

Subject: RE: [External] Re: [PATCH v2 1/8] mm/cma: change cma mutex to irq safe spinlock



> -----Original Message-----
> From: Muchun Song [mailto:[email protected]]
> Sent: Tuesday, March 30, 2021 9:09 PM
> To: Michal Hocko <[email protected]>
> Cc: Mike Kravetz <[email protected]>; Linux Memory Management List
> <[email protected]>; LKML <[email protected]>; Roman Gushchin
> <[email protected]>; Shakeel Butt <[email protected]>; Oscar Salvador
> <[email protected]>; David Hildenbrand <[email protected]>; David Rientjes
> <[email protected]>; linmiaohe <[email protected]>; Peter Zijlstra
> <[email protected]>; Matthew Wilcox <[email protected]>; HORIGUCHI NAOYA
> <[email protected]>; Aneesh Kumar K . V <[email protected]>;
> Waiman Long <[email protected]>; Peter Xu <[email protected]>; Mina Almasry
> <[email protected]>; Hillf Danton <[email protected]>; Joonsoo Kim
> <[email protected]>; Song Bao Hua (Barry Song)
> <[email protected]>; Will Deacon <[email protected]>; Andrew Morton
> <[email protected]>
> Subject: Re: [External] Re: [PATCH v2 1/8] mm/cma: change cma mutex to irq safe
> spinlock
>
> On Tue, Mar 30, 2021 at 4:01 PM Michal Hocko <[email protected]> wrote:
> >
> > On Mon 29-03-21 16:23:55, Mike Kravetz wrote:
> > > Ideally, cma_release could be called from any context. However,
> > > that is not possible because a mutex is used to protect the per-area bitmap.
> > > Change the bitmap to an irq safe spinlock.
> >
> > I would phrase the changelog slightly differerent "
> > cma_release is currently a sleepable operatation because the bitmap
> > manipulation is protected by cma->lock mutex. Hugetlb code which
> > relies on cma_release for CMA backed (giga) hugetlb pages, however,
> > needs to be irq safe.
> >
> > The lock doesn't protect any sleepable operation so it can be changed
> > to a (irq aware) spin lock. The bitmap processing should be quite fast
> > in typical case but if cma sizes grow to TB then we will likely need
> > to replace the lock by a more optimized bitmap implementation.
> > "
> >
> > it seems that you are overusing irqsave variants even from context
> > which are never called from the IRQ context so they do not need storing flags.
> >
> > [...]
> > > @@ -391,8 +391,9 @@ static void cma_debug_show_areas(struct cma *cma)
> > > unsigned long start = 0;
> > > unsigned long nr_part, nr_total = 0;
> > > unsigned long nbits = cma_bitmap_maxno(cma);
> > > + unsigned long flags;
> > >
> > > - mutex_lock(&cma->lock);
> > > + spin_lock_irqsave(&cma->lock, flags);
> >
> > spin_lock_irq should be sufficient. This is only called from the
> > allocation context and that is never called from IRQ context.
>
> This makes me think more. I think that spin_lock should be sufficient. Right?
>

It seems Mike's point is that cma_release might be called from both
irq context and process context.

If it is running in process context, we need the irq-disable to lock
the irq context which might jump to call cma_release at the same time.

We have never seen cma_release has been really called in irq context
by now, anyway.

>
> >
> > > pr_info("number of available pages: ");
> > > for (;;) {
> > > next_zero_bit = find_next_zero_bit(cma->bitmap, nbits,
> > > start); @@ -407,7 +408,7 @@ static void cma_debug_show_areas(struct cma
> *cma)
> > > start = next_zero_bit + nr_zero;
> > > }
> > > pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
> > > - mutex_unlock(&cma->lock);
> > > + spin_unlock_irqrestore(&cma->lock, flags);
> > > }
> > > #else
> > > static inline void cma_debug_show_areas(struct cma *cma) { } @@
> > > -430,6 +431,7 @@ struct page *cma_alloc(struct cma *cma, size_t count,
> unsigned int align,
> > > unsigned long pfn = -1;
> > > unsigned long start = 0;
> > > unsigned long bitmap_maxno, bitmap_no, bitmap_count;
> > > + unsigned long flags;
> > > size_t i;
> > > struct page *page = NULL;
> > > int ret = -ENOMEM;
> > > @@ -454,12 +456,12 @@ struct page *cma_alloc(struct cma *cma, size_t count,
> unsigned int align,
> > > goto out;
> > >
> > > for (;;) {
> > > - mutex_lock(&cma->lock);
> > > + spin_lock_irqsave(&cma->lock, flags);
> > > bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
> > > bitmap_maxno, start, bitmap_count, mask,
> > > offset);
> > > if (bitmap_no >= bitmap_maxno) {
> > > - mutex_unlock(&cma->lock);
> > > + spin_unlock_irqrestore(&cma->lock, flags);
> > > break;
> > > }
> > > bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
> >
> > same here.
> >
> > > @@ -468,7 +470,7 @@ struct page *cma_alloc(struct cma *cma, size_t count,
> unsigned int align,
> > > * our exclusive use. If the migration fails we will take the
> > > * lock again and unmark it.
> > > */
> > > - mutex_unlock(&cma->lock);
> > > + spin_unlock_irqrestore(&cma->lock, flags);
> > >
> > > pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
> > > ret = alloc_contig_range(pfn, pfn + count,
> > > MIGRATE_CMA, diff --git a/mm/cma.h b/mm/cma.h index
> > > 68ffad4e430d..2c775877eae2 100644
> > > --- a/mm/cma.h
> > > +++ b/mm/cma.h
> > > @@ -15,7 +15,7 @@ struct cma {
> > > unsigned long count;
> > > unsigned long *bitmap;
> > > unsigned int order_per_bit; /* Order of pages represented by one bit
> */
> > > - struct mutex lock;
> > > + spinlock_t lock;
> > > #ifdef CONFIG_CMA_DEBUGFS
> > > struct hlist_head mem_head;
> > > spinlock_t mem_head_lock;
> > > diff --git a/mm/cma_debug.c b/mm/cma_debug.c index
> > > d5bf8aa34fdc..6379cfbfd568 100644
> > > --- a/mm/cma_debug.c
> > > +++ b/mm/cma_debug.c
> > > @@ -35,11 +35,12 @@ static int cma_used_get(void *data, u64 *val) {
> > > struct cma *cma = data;
> > > unsigned long used;
> > > + unsigned long flags;
> > >
> > > - mutex_lock(&cma->lock);
> > > + spin_lock_irqsave(&cma->lock, flags);
> > > /* pages counter is smaller than sizeof(int) */
> > > used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma));
> > > - mutex_unlock(&cma->lock);
> > > + spin_unlock_irqrestore(&cma->lock, flags);
> > > *val = (u64)used << cma->order_per_bit;
> >
> > same here
> >
> > >
> > > return 0;
> > > @@ -52,8 +53,9 @@ static int cma_maxchunk_get(void *data, u64 *val)
> > > unsigned long maxchunk = 0;
> > > unsigned long start, end = 0;
> > > unsigned long bitmap_maxno = cma_bitmap_maxno(cma);
> > > + unsigned long flags;
> > >
> > > - mutex_lock(&cma->lock);
> > > + spin_lock_irqsave(&cma->lock, flags);
> > > for (;;) {
> > > start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end);
> > > if (start >= bitmap_maxno)
> > > @@ -61,7 +63,7 @@ static int cma_maxchunk_get(void *data, u64 *val)
> > > end = find_next_bit(cma->bitmap, bitmap_maxno, start);
> > > maxchunk = max(end - start, maxchunk);
> > > }
> > > - mutex_unlock(&cma->lock);
> > > + spin_unlock_irqrestore(&cma->lock, flags);
> > > *val = (u64)maxchunk << cma->order_per_bit;
> > >
> > > return 0;
> >
> > and here.
> > --
> > Michal Hocko
> > SUSE Labs

2021-03-30 08:24:09

by Muchun Song

[permalink] [raw]
Subject: Re: [External] Re: [PATCH v2 1/8] mm/cma: change cma mutex to irq safe spinlock

On Tue, Mar 30, 2021 at 4:18 PM Michal Hocko <[email protected]> wrote:
>
> On Tue 30-03-21 16:08:36, Muchun Song wrote:
> > On Tue, Mar 30, 2021 at 4:01 PM Michal Hocko <[email protected]> wrote:
> > >
> > > On Mon 29-03-21 16:23:55, Mike Kravetz wrote:
> > > > Ideally, cma_release could be called from any context. However, that is
> > > > not possible because a mutex is used to protect the per-area bitmap.
> > > > Change the bitmap to an irq safe spinlock.
> > >
> > > I would phrase the changelog slightly differerent
> > > "
> > > cma_release is currently a sleepable operatation because the bitmap
> > > manipulation is protected by cma->lock mutex. Hugetlb code which relies
> > > on cma_release for CMA backed (giga) hugetlb pages, however, needs to be
> > > irq safe.
> > >
> > > The lock doesn't protect any sleepable operation so it can be changed to
> > > a (irq aware) spin lock. The bitmap processing should be quite fast in
> > > typical case but if cma sizes grow to TB then we will likely need to
> > > replace the lock by a more optimized bitmap implementation.
> > > "
> > >
> > > it seems that you are overusing irqsave variants even from context which
> > > are never called from the IRQ context so they do not need storing flags.
> > >
> > > [...]
> > > > @@ -391,8 +391,9 @@ static void cma_debug_show_areas(struct cma *cma)
> > > > unsigned long start = 0;
> > > > unsigned long nr_part, nr_total = 0;
> > > > unsigned long nbits = cma_bitmap_maxno(cma);
> > > > + unsigned long flags;
> > > >
> > > > - mutex_lock(&cma->lock);
> > > > + spin_lock_irqsave(&cma->lock, flags);
> > >
> > > spin_lock_irq should be sufficient. This is only called from the
> > > allocation context and that is never called from IRQ context.
> >
> > This makes me think more. I think that spin_lock should be
> > sufficient. Right?
>
> Nope. Think of the following scenario
> spin_lock(cma->lock);
> <IRQ>
> put_page
> __free_huge_page
> cma_release
> spin_lock_irqsave() DEADLOCK

Got it. Thanks.

> --
> Michal Hocko
> SUSE Labs

2021-03-31 02:40:38

by Mike Kravetz

[permalink] [raw]
Subject: Re: [PATCH v2 1/8] mm/cma: change cma mutex to irq safe spinlock

On 3/30/21 1:01 AM, Michal Hocko wrote:
> On Mon 29-03-21 16:23:55, Mike Kravetz wrote:
>> Ideally, cma_release could be called from any context. However, that is
>> not possible because a mutex is used to protect the per-area bitmap.
>> Change the bitmap to an irq safe spinlock.
>
> I would phrase the changelog slightly differerent
> "
> cma_release is currently a sleepable operatation because the bitmap
> manipulation is protected by cma->lock mutex. Hugetlb code which relies
> on cma_release for CMA backed (giga) hugetlb pages, however, needs to be
> irq safe.
>
> The lock doesn't protect any sleepable operation so it can be changed to
> a (irq aware) spin lock. The bitmap processing should be quite fast in
> typical case but if cma sizes grow to TB then we will likely need to
> replace the lock by a more optimized bitmap implementation.
> "

That is better. Thank you.

>
> it seems that you are overusing irqsave variants even from context which
> are never called from the IRQ context so they do not need storing flags.
>
> [...]

Yes.

>> @@ -391,8 +391,9 @@ static void cma_debug_show_areas(struct cma *cma)
>> unsigned long start = 0;
>> unsigned long nr_part, nr_total = 0;
>> unsigned long nbits = cma_bitmap_maxno(cma);
>> + unsigned long flags;
>>
>> - mutex_lock(&cma->lock);
>> + spin_lock_irqsave(&cma->lock, flags);
>
> spin_lock_irq should be sufficient. This is only called from the
> allocation context and that is never called from IRQ context.
>

I will change this and those below.

Thanks for your continued reviews and patience.
--
Mike Kravetz