2014-11-05 20:07:53

by Gregory Fong

[permalink] [raw]
Subject: [PATCH 1/2] lib: bitmap: Added alignment offset for bitmap_find_next_zero_area()

From: Michal Nazarewicz <[email protected]>

This commit adds a bitmap_find_next_zero_area_off() function which
works like bitmap_find_next_zero_area() function expect it allows an
offset to be specified when alignment is checked. This lets caller
request a bit such that its number plus the offset is aligned
according to the mask.

Signed-off-by: Michal Nazarewicz <[email protected]>
Signed-off-by: Kyungmin Park <[email protected]>
Signed-off-by: Marek Szyprowski <[email protected]>
CC: Michal Nazarewicz <[email protected]>
[[email protected]: Retrieved from
https://patchwork.linuxtv.org/patch/6254/ and updated documentation]
Signed-off-by: Gregory Fong <[email protected]>
---
include/linux/bitmap.h | 36 +++++++++++++++++++++++++++++++-----
lib/bitmap.c | 24 +++++++++++++-----------
2 files changed, 44 insertions(+), 16 deletions(-)

diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index e1c8d08..34e020c 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -45,6 +45,7 @@
* bitmap_set(dst, pos, nbits) Set specified bit area
* bitmap_clear(dst, pos, nbits) Clear specified bit area
* bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
+ * bitmap_find_next_zero_area_off(buf, len, pos, n, mask) as above
* bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
* bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
* bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
@@ -114,11 +115,36 @@ extern int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);

extern void bitmap_set(unsigned long *map, unsigned int start, int len);
extern void bitmap_clear(unsigned long *map, unsigned int start, int len);
-extern unsigned long bitmap_find_next_zero_area(unsigned long *map,
- unsigned long size,
- unsigned long start,
- unsigned int nr,
- unsigned long align_mask);
+
+extern unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
+ unsigned long size,
+ unsigned long start,
+ unsigned int nr,
+ unsigned long align_mask,
+ unsigned long align_offset);
+
+/**
+ * bitmap_find_next_zero_area - find a contiguous aligned zero area
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ * @align_mask: Alignment mask for zero area
+ *
+ * The @align_mask should be one less than a power of 2; the effect is that
+ * the bit offset of all zero areas this function finds is multiples of that
+ * power of 2. A @align_mask of 0 means no alignment is required.
+ */
+static inline unsigned long
+bitmap_find_next_zero_area(unsigned long *map,
+ unsigned long size,
+ unsigned long start,
+ unsigned int nr,
+ unsigned long align_mask)
+{
+ return bitmap_find_next_zero_area_off(map, size, start, nr,
+ align_mask, 0);
+}

extern int bitmap_scnprintf(char *buf, unsigned int len,
const unsigned long *src, int nbits);
diff --git a/lib/bitmap.c b/lib/bitmap.c
index b499ab6..969ae8f 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -326,30 +326,32 @@ void bitmap_clear(unsigned long *map, unsigned int start, int len)
}
EXPORT_SYMBOL(bitmap_clear);

-/*
- * bitmap_find_next_zero_area - find a contiguous aligned zero area
+/**
+ * bitmap_find_next_zero_area_off - find a contiguous aligned zero area
* @map: The address to base the search on
* @size: The bitmap size in bits
* @start: The bitnumber to start searching at
* @nr: The number of zeroed bits we're looking for
* @align_mask: Alignment mask for zero area
+ * @align_offset: Alignment offset for zero area.
*
* The @align_mask should be one less than a power of 2; the effect is that
- * the bit offset of all zero areas this function finds is multiples of that
- * power of 2. A @align_mask of 0 means no alignment is required.
+ * the bit offset of all zero areas this function finds plus @align_offset
+ * is multiple of that power of 2.
*/
-unsigned long bitmap_find_next_zero_area(unsigned long *map,
- unsigned long size,
- unsigned long start,
- unsigned int nr,
- unsigned long align_mask)
+unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
+ unsigned long size,
+ unsigned long start,
+ unsigned int nr,
+ unsigned long align_mask,
+ unsigned long align_offset)
{
unsigned long index, end, i;
again:
index = find_next_zero_bit(map, size, start);

/* Align allocation */
- index = __ALIGN_MASK(index, align_mask);
+ index = __ALIGN_MASK(index + align_offset, align_mask) - align_offset;

end = index + nr;
if (end > size)
@@ -361,7 +363,7 @@ again:
}
return index;
}
-EXPORT_SYMBOL(bitmap_find_next_zero_area);
+EXPORT_SYMBOL(bitmap_find_next_zero_area_off);

/*
* Bitmap printing & parsing functions: first version by Nadia Yvette Chambers,
--
1.9.1


2014-11-05 20:08:06

by Gregory Fong

[permalink] [raw]
Subject: [PATCH 2/2] mm: cma: Align to physical address, not CMA region position

The alignment in cma_alloc() was done w.r.t. the bitmap. This is a
problem when, for example:

- a device requires 16M (order 12) alignment
- the CMA region is not 16 M aligned

In such a case, can result with the CMA region starting at, say,
0x2f800000 but any allocation you make from there will be aligned from
there. Requesting an allocation of 32 M with 16 M alignment will
result in an allocation from 0x2f800000 to 0x31800000, which doesn't
work very well if your strange device requires 16M alignment.

Change to use bitmap_find_next_zero_area_off() to account for the
difference in alignment at reserve-time and alloc-time.

Cc: Michal Nazarewicz <[email protected]>
Signed-off-by: Gregory Fong <[email protected]>
---
mm/cma.c | 19 ++++++++++++++++---
1 file changed, 16 insertions(+), 3 deletions(-)

diff --git a/mm/cma.c b/mm/cma.c
index fde706e..0813599 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -63,6 +63,17 @@ static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order)
return (1UL << (align_order - cma->order_per_bit)) - 1;
}

+static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order)
+{
+ unsigned int alignment;
+
+ if (align_order <= cma->order_per_bit)
+ return 0;
+ alignment = 1UL << (align_order - cma->order_per_bit);
+ return ALIGN(cma->base_pfn, alignment) -
+ (cma->base_pfn >> cma->order_per_bit);
+}
+
static unsigned long cma_bitmap_maxno(struct cma *cma)
{
return cma->count >> cma->order_per_bit;
@@ -328,7 +339,7 @@ err:
*/
struct page *cma_alloc(struct cma *cma, int count, unsigned int align)
{
- unsigned long mask, pfn, start = 0;
+ unsigned long mask, offset, pfn, start = 0;
unsigned long bitmap_maxno, bitmap_no, bitmap_count;
struct page *page = NULL;
int ret;
@@ -343,13 +354,15 @@ struct page *cma_alloc(struct cma *cma, int count, unsigned int align)
return NULL;

mask = cma_bitmap_aligned_mask(cma, align);
+ offset = cma_bitmap_aligned_offset(cma, align);
bitmap_maxno = cma_bitmap_maxno(cma);
bitmap_count = cma_bitmap_pages_to_bits(cma, count);

for (;;) {
mutex_lock(&cma->lock);
- bitmap_no = bitmap_find_next_zero_area(cma->bitmap,
- bitmap_maxno, start, bitmap_count, mask);
+ bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
+ bitmap_maxno, start, bitmap_count, mask,
+ offset);
if (bitmap_no >= bitmap_maxno) {
mutex_unlock(&cma->lock);
break;
--
1.9.1

2014-11-05 21:57:45

by Michal Nazarewicz

[permalink] [raw]
Subject: Re: [PATCH 1/2] lib: bitmap: Added alignment offset for bitmap_find_next_zero_area()

On Wed, Nov 05 2014, Gregory Fong wrote:
> From: Michal Nazarewicz <[email protected]>

Please change to [email protected]. My Samsung address is no longer
valid. Ditto on signed-off-by line.

>
> This commit adds a bitmap_find_next_zero_area_off() function which
> works like bitmap_find_next_zero_area() function expect it allows an
> offset to be specified when alignment is checked. This lets caller
> request a bit such that its number plus the offset is aligned
> according to the mask.
>
> Signed-off-by: Michal Nazarewicz <[email protected]>
> Signed-off-by: Kyungmin Park <[email protected]>
> Signed-off-by: Marek Szyprowski <[email protected]>
> CC: Michal Nazarewicz <[email protected]>
> [[email protected]: Retrieved from
> https://patchwork.linuxtv.org/patch/6254/ and updated documentation]
> Signed-off-by: Gregory Fong <[email protected]>
> ---
> include/linux/bitmap.h | 36 +++++++++++++++++++++++++++++++-----
> lib/bitmap.c | 24 +++++++++++++-----------
> 2 files changed, 44 insertions(+), 16 deletions(-)
>
> diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
> index e1c8d08..34e020c 100644
> --- a/include/linux/bitmap.h
> +++ b/include/linux/bitmap.h
> @@ -45,6 +45,7 @@
> * bitmap_set(dst, pos, nbits) Set specified bit area
> * bitmap_clear(dst, pos, nbits) Clear specified bit area
> * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
> + * bitmap_find_next_zero_area_off(buf, len, pos, n, mask) as above
> * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
> * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
> * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
> @@ -114,11 +115,36 @@ extern int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
>
> extern void bitmap_set(unsigned long *map, unsigned int start, int len);
> extern void bitmap_clear(unsigned long *map, unsigned int start, int len);
> -extern unsigned long bitmap_find_next_zero_area(unsigned long *map,
> - unsigned long size,
> - unsigned long start,
> - unsigned int nr,
> - unsigned long align_mask);
> +
> +extern unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
> + unsigned long size,
> + unsigned long start,
> + unsigned int nr,
> + unsigned long align_mask,
> + unsigned long align_offset);
> +
> +/**
> + * bitmap_find_next_zero_area - find a contiguous aligned zero area
> + * @map: The address to base the search on
> + * @size: The bitmap size in bits
> + * @start: The bitnumber to start searching at
> + * @nr: The number of zeroed bits we're looking for
> + * @align_mask: Alignment mask for zero area
> + *
> + * The @align_mask should be one less than a power of 2; the effect is that
> + * the bit offset of all zero areas this function finds is multiples of that
> + * power of 2. A @align_mask of 0 means no alignment is required.
> + */
> +static inline unsigned long
> +bitmap_find_next_zero_area(unsigned long *map,
> + unsigned long size,
> + unsigned long start,
> + unsigned int nr,
> + unsigned long align_mask)
> +{
> + return bitmap_find_next_zero_area_off(map, size, start, nr,
> + align_mask, 0);
> +}
>
> extern int bitmap_scnprintf(char *buf, unsigned int len,
> const unsigned long *src, int nbits);
> diff --git a/lib/bitmap.c b/lib/bitmap.c
> index b499ab6..969ae8f 100644
> --- a/lib/bitmap.c
> +++ b/lib/bitmap.c
> @@ -326,30 +326,32 @@ void bitmap_clear(unsigned long *map, unsigned int start, int len)
> }
> EXPORT_SYMBOL(bitmap_clear);
>
> -/*
> - * bitmap_find_next_zero_area - find a contiguous aligned zero area
> +/**
> + * bitmap_find_next_zero_area_off - find a contiguous aligned zero area
> * @map: The address to base the search on
> * @size: The bitmap size in bits
> * @start: The bitnumber to start searching at
> * @nr: The number of zeroed bits we're looking for
> * @align_mask: Alignment mask for zero area
> + * @align_offset: Alignment offset for zero area.
> *
> * The @align_mask should be one less than a power of 2; the effect is that
> - * the bit offset of all zero areas this function finds is multiples of that
> - * power of 2. A @align_mask of 0 means no alignment is required.
> + * the bit offset of all zero areas this function finds plus @align_offset
> + * is multiple of that power of 2.
> */
> -unsigned long bitmap_find_next_zero_area(unsigned long *map,
> - unsigned long size,
> - unsigned long start,
> - unsigned int nr,
> - unsigned long align_mask)
> +unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
> + unsigned long size,
> + unsigned long start,
> + unsigned int nr,
> + unsigned long align_mask,
> + unsigned long align_offset)
> {
> unsigned long index, end, i;
> again:
> index = find_next_zero_bit(map, size, start);
>
> /* Align allocation */
> - index = __ALIGN_MASK(index, align_mask);
> + index = __ALIGN_MASK(index + align_offset, align_mask) - align_offset;
>
> end = index + nr;
> if (end > size)
> @@ -361,7 +363,7 @@ again:
> }
> return index;
> }
> -EXPORT_SYMBOL(bitmap_find_next_zero_area);
> +EXPORT_SYMBOL(bitmap_find_next_zero_area_off);
>
> /*
> * Bitmap printing & parsing functions: first version by Nadia Yvette Chambers,
> --
> 1.9.1
>

--
Best regards, _ _
.o. | Liege of Serenely Enlightened Majesty of o' \,=./ `o
..o | Computer Science, Michał “mina86” Nazarewicz (o o)
ooo +--<[email protected]>--<xmpp:[email protected]>--ooO--(_)--Ooo--

2014-11-05 21:58:47

by Michal Nazarewicz

[permalink] [raw]
Subject: Re: [PATCH 2/2] mm: cma: Align to physical address, not CMA region position

On Wed, Nov 05 2014, Gregory Fong wrote:
> The alignment in cma_alloc() was done w.r.t. the bitmap. This is a
> problem when, for example:
>
> - a device requires 16M (order 12) alignment
> - the CMA region is not 16 M aligned
>
> In such a case, can result with the CMA region starting at, say,
> 0x2f800000 but any allocation you make from there will be aligned from
> there. Requesting an allocation of 32 M with 16 M alignment will
> result in an allocation from 0x2f800000 to 0x31800000, which doesn't
> work very well if your strange device requires 16M alignment.
>
> Change to use bitmap_find_next_zero_area_off() to account for the
> difference in alignment at reserve-time and alloc-time.
>
> Cc: Michal Nazarewicz <[email protected]>

Acked-by: Michal Nazarewicz <[email protected]>

> Signed-off-by: Gregory Fong <[email protected]>
> ---
> mm/cma.c | 19 ++++++++++++++++---
> 1 file changed, 16 insertions(+), 3 deletions(-)
>
> diff --git a/mm/cma.c b/mm/cma.c
> index fde706e..0813599 100644
> --- a/mm/cma.c
> +++ b/mm/cma.c
> @@ -63,6 +63,17 @@ static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order)
> return (1UL << (align_order - cma->order_per_bit)) - 1;
> }
>
> +static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order)
> +{
> + unsigned int alignment;
> +
> + if (align_order <= cma->order_per_bit)
> + return 0;
> + alignment = 1UL << (align_order - cma->order_per_bit);
> + return ALIGN(cma->base_pfn, alignment) -
> + (cma->base_pfn >> cma->order_per_bit);
> +}
> +
> static unsigned long cma_bitmap_maxno(struct cma *cma)
> {
> return cma->count >> cma->order_per_bit;
> @@ -328,7 +339,7 @@ err:
> */
> struct page *cma_alloc(struct cma *cma, int count, unsigned int align)
> {
> - unsigned long mask, pfn, start = 0;
> + unsigned long mask, offset, pfn, start = 0;
> unsigned long bitmap_maxno, bitmap_no, bitmap_count;
> struct page *page = NULL;
> int ret;
> @@ -343,13 +354,15 @@ struct page *cma_alloc(struct cma *cma, int count, unsigned int align)
> return NULL;
>
> mask = cma_bitmap_aligned_mask(cma, align);
> + offset = cma_bitmap_aligned_offset(cma, align);
> bitmap_maxno = cma_bitmap_maxno(cma);
> bitmap_count = cma_bitmap_pages_to_bits(cma, count);
>
> for (;;) {
> mutex_lock(&cma->lock);
> - bitmap_no = bitmap_find_next_zero_area(cma->bitmap,
> - bitmap_maxno, start, bitmap_count, mask);
> + bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
> + bitmap_maxno, start, bitmap_count, mask,
> + offset);
> if (bitmap_no >= bitmap_maxno) {
> mutex_unlock(&cma->lock);
> break;
> --
> 1.9.1
>

--
Best regards, _ _
.o. | Liege of Serenely Enlightened Majesty of o' \,=./ `o
..o | Computer Science, Michał “mina86” Nazarewicz (o o)
ooo +--<[email protected]>--<xmpp:[email protected]>--ooO--(_)--Ooo--