Add new interface cma_pages_valid() which indicates if the specified
pages are part of a CMA region. This interface will be used in a
subsequent patch by hugetlb code.
Signed-off-by: Mike Kravetz <[email protected]>
---
include/linux/cma.h | 1 +
mm/cma.c | 21 +++++++++++++++++----
2 files changed, 18 insertions(+), 4 deletions(-)
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 53fd8c3cdbd0..bd801023504b 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -46,6 +46,7 @@ extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
struct cma **res_cma);
extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align,
bool no_warn);
+extern bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned long count);
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count);
extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
diff --git a/mm/cma.c b/mm/cma.c
index 995e15480937..960994b88c7f 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -524,6 +524,22 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
return page;
}
+bool cma_pages_valid(struct cma *cma, const struct page *pages,
+ unsigned long count)
+{
+ unsigned long pfn;
+
+ if (!cma || !pages)
+ return false;
+
+ pfn = page_to_pfn(pages);
+
+ if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
+ return false;
+
+ return true;
+}
+
/**
* cma_release() - release allocated pages
* @cma: Contiguous memory region for which the allocation is performed.
@@ -539,16 +555,13 @@ bool cma_release(struct cma *cma, const struct page *pages,
{
unsigned long pfn;
- if (!cma || !pages)
+ if (!cma_pages_valid(cma, pages, count))
return false;
pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count);
pfn = page_to_pfn(pages);
- if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
- return false;
-
VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
free_contig_range(pfn, count);
--
2.31.1
On Fri, Oct 01, 2021 at 10:52:07AM -0700, Mike Kravetz wrote:
> +bool cma_pages_valid(struct cma *cma, const struct page *pages,
> + unsigned long count)
> +{
> + unsigned long pfn;
> +
> + if (!cma || !pages)
> + return false;
> +
> + pfn = page_to_pfn(pages);
> +
> + if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
> + return false;
> +
> + return true;
> +}
> +
> /**
> * cma_release() - release allocated pages
> * @cma: Contiguous memory region for which the allocation is performed.
> @@ -539,16 +555,13 @@ bool cma_release(struct cma *cma, const struct page *pages,
> {
> unsigned long pfn;
>
> - if (!cma || !pages)
> + if (!cma_pages_valid(cma, pages, count))
> return false;
>
> pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count);
>
> pfn = page_to_pfn(pages);
>
> - if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
> - return false;
> -
Might be worth noting that after this change, the debug statement will not be
printed as before. Now, we back off before firing it.
You might want to point that out in the changelog in case someone wonders
why.
--
Oscar Salvador
SUSE Labs
On Fri, Oct 01, 2021 at 10:52:07AM -0700, Mike Kravetz wrote:
> +bool cma_pages_valid(struct cma *cma, const struct page *pages,
> + unsigned long count)
> +{
> + unsigned long pfn;
> +
> + if (!cma || !pages)
> + return false;
> +
> + pfn = page_to_pfn(pages);
> +
> + if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
> + return false;
> +
> + return true;
> +}
> +
> /**
> * cma_release() - release allocated pages
> * @cma: Contiguous memory region for which the allocation is performed.
> @@ -539,16 +555,13 @@ bool cma_release(struct cma *cma, const struct page *pages,
> {
> unsigned long pfn;
>
> - if (!cma || !pages)
> + if (!cma_pages_valid(cma, pages, count))
> return false;
>
> pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count);
>
> pfn = page_to_pfn(pages);
>
> - if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
> - return false;
> -
After this patch, the timing of printing the debug statement changes as we back
off earlier.
You might want to point that out in the changelog in case someone wonders why.
--
Oscar Salvador
SUSE Labs
On 01.10.21 19:52, Mike Kravetz wrote:
> Add new interface cma_pages_valid() which indicates if the specified
> pages are part of a CMA region. This interface will be used in a
> subsequent patch by hugetlb code.
>
> Signed-off-by: Mike Kravetz <[email protected]>
> ---
> include/linux/cma.h | 1 +
> mm/cma.c | 21 +++++++++++++++++----
> 2 files changed, 18 insertions(+), 4 deletions(-)
>
> diff --git a/include/linux/cma.h b/include/linux/cma.h
> index 53fd8c3cdbd0..bd801023504b 100644
> --- a/include/linux/cma.h
> +++ b/include/linux/cma.h
> @@ -46,6 +46,7 @@ extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
> struct cma **res_cma);
> extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align,
> bool no_warn);
> +extern bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned long count);
> extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count);
>
> extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
> diff --git a/mm/cma.c b/mm/cma.c
> index 995e15480937..960994b88c7f 100644
> --- a/mm/cma.c
> +++ b/mm/cma.c
> @@ -524,6 +524,22 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
> return page;
> }
>
> +bool cma_pages_valid(struct cma *cma, const struct page *pages,
> + unsigned long count)
> +{
> + unsigned long pfn;
> +
> + if (!cma || !pages)
> + return false;
> +
> + pfn = page_to_pfn(pages);
> +
> + if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
> + return false;
> +
> + return true;
> +}
> +
> /**
> * cma_release() - release allocated pages
> * @cma: Contiguous memory region for which the allocation is performed.
> @@ -539,16 +555,13 @@ bool cma_release(struct cma *cma, const struct page *pages,
> {
> unsigned long pfn;
>
> - if (!cma || !pages)
> + if (!cma_pages_valid(cma, pages, count))
> return false;
>
> pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count);
>
> pfn = page_to_pfn(pages);
>
> - if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
> - return false;
> -
> VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
>
> free_contig_range(pfn, count);
>
Agreed that we might want to perform the pr_debug() now earlier, or do
another pr_warn before returning false.
Acked-by: David Hildenbrand <[email protected]>
--
Thanks,
David / dhildenb
On 10/5/21 1:45 AM, Oscar Salvador wrote:
> On Fri, Oct 01, 2021 at 10:52:07AM -0700, Mike Kravetz wrote:
>> +bool cma_pages_valid(struct cma *cma, const struct page *pages,
>> + unsigned long count)
>> +{
>> + unsigned long pfn;
>> +
>> + if (!cma || !pages)
>> + return false;
>> +
>> + pfn = page_to_pfn(pages);
>> +
>> + if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
>> + return false;
>> +
>> + return true;
>> +}
>> +
>> /**
>> * cma_release() - release allocated pages
>> * @cma: Contiguous memory region for which the allocation is performed.
>> @@ -539,16 +555,13 @@ bool cma_release(struct cma *cma, const struct page *pages,
>> {
>> unsigned long pfn;
>>
>> - if (!cma || !pages)
>> + if (!cma_pages_valid(cma, pages, count))
>> return false;
>>
>> pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count);
>>
>> pfn = page_to_pfn(pages);
>>
>> - if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
>> - return false;
>> -
>
> After this patch, the timing of printing the debug statement changes as we back
> off earlier.
> You might want to point that out in the changelog in case someone wonders why.
>
When making this change, I did not want to duplicate that debug
statement. However, duplicated code only becomes an issue if
CONFIG_DEBUG. So, duplication should be acceptable.
I will make the debug statements function as before.
--
Mike Kravetz