2022-12-16 20:20:24

by Keith Busch

[permalink] [raw]
Subject: [PATCHv2 11/11] dmapool: link blocks across pages

From: Keith Busch <[email protected]>

The allocated dmapool pages are never freed for the lifetime of the
pool. There is no need for the two level list+stack lookup for finding a
free block since nothing is ever removed from the list. Just use a
simple stack, reducing time complexity to constant.

The implementation inserts the stack linking elements and the dma handle
of the block within itself when freed. This means the smallest possible
dmapool block is increased to at most 16 bytes to accomodate these
fields, but there are no exisiting users requesting a dma pool smaller
than that anyway.

Removing the list has a significant change in performance. Using the
kernel's micro-benchmarking self test:

Before:

# modprobe dmapool_test
dmapool test: size:16 blocks:8192 time:57282
dmapool test: size:64 blocks:8192 time:172562
dmapool test: size:256 blocks:8192 time:789247
dmapool test: size:1024 blocks:2048 time:371823
dmapool test: size:4096 blocks:1024 time:362237

After:

# modprobe dmapool_test
dmapool test: size:16 blocks:8192 time:24997
dmapool test: size:64 blocks:8192 time:26584
dmapool test: size:256 blocks:8192 time:33542
dmapool test: size:1024 blocks:2048 time:9022
dmapool test: size:4096 blocks:1024 time:6045

The module test allocates quite a few blocks that may not accurately
represent how these pools are used in real life. For a more marco level
benchmark, running fio high-depth + high-batched on nvme, this patch
shows submission and completion latency reduced by ~100usec each, 1%
IOPs improvement, and perf record's time spent in dma_pool_alloc/free
were reduced by half.

Signed-off-by: Keith Busch <[email protected]>
---
v1->v2:

Applied feedback comments from Tony:
Updated data structure description comment
Used consistent size_t for accounting variables
Fixed block initialization for odd alignments

mm/dmapool.c | 215 ++++++++++++++++++++++++++-------------------------
1 file changed, 109 insertions(+), 106 deletions(-)

diff --git a/mm/dmapool.c b/mm/dmapool.c
index f5b79c3268856..d26a0751dee63 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -15,7 +15,7 @@
* represented by the 'struct dma_pool' which keeps a doubly-linked list of
* allocated pages. Each page in the page_list is split into blocks of at
* least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
- * list of free blocks within the page. Used blocks aren't tracked, but we
+ * list of free blocks across all pages. Used blocks aren't tracked, but we
* keep a count of how many are currently allocated from each page.
*/

@@ -40,13 +40,22 @@
#define DMAPOOL_DEBUG 1
#endif

+struct dma_block {
+ struct dma_block *next_block;
+ dma_addr_t dma;
+};
+
struct dma_pool { /* the pool */
struct list_head page_list;
spinlock_t lock;
struct device *dev;
+ struct dma_block *next_block;
unsigned int size;
unsigned int allocation;
unsigned int boundary;
+ size_t nr_blocks;
+ size_t nr_active;
+ size_t nr_pages;
char name[32];
struct list_head pools;
};
@@ -55,8 +64,6 @@ struct dma_page { /* cacheable header for 'allocation' bytes */
struct list_head page_list;
void *vaddr;
dma_addr_t dma;
- unsigned int in_use;
- unsigned int offset;
};

static DEFINE_MUTEX(pools_lock);
@@ -64,30 +71,18 @@ static DEFINE_MUTEX(pools_reg_lock);

static ssize_t pools_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- int size;
- struct dma_page *page;
struct dma_pool *pool;
+ unsigned size;

size = sysfs_emit(buf, "poolinfo - 0.1\n");

mutex_lock(&pools_lock);
list_for_each_entry(pool, &dev->dma_pools, pools) {
- unsigned pages = 0;
- size_t blocks = 0;
-
- spin_lock_irq(&pool->lock);
- list_for_each_entry(page, &pool->page_list, page_list) {
- pages++;
- blocks += page->in_use;
- }
- spin_unlock_irq(&pool->lock);
-
/* per-pool info, no real statistics yet */
- size += sysfs_emit_at(buf, size, "%-16s %4zu %4zu %4u %2u\n",
- pool->name, blocks,
- (size_t) pages *
- (pool->allocation / pool->size),
- pool->size, pages);
+ size += sysfs_emit_at(buf, size, "%-16s %4ld %4ld %4u %2ld\n",
+ pool->name, pool->nr_active,
+ pool->nr_blocks, pool->size,
+ pool->nr_pages);
}
mutex_unlock(&pools_lock);

@@ -96,6 +91,25 @@ static ssize_t pools_show(struct device *dev, struct device_attribute *attr, cha

static DEVICE_ATTR_RO(pools);

+static inline struct dma_block *pool_block_pop(struct dma_pool *pool)
+{
+ struct dma_block *block = pool->next_block;
+
+ if (block) {
+ pool->next_block = block->next_block;
+ pool->nr_active++;
+ }
+ return block;
+}
+
+static inline void pool_block_push(struct dma_pool *pool, struct dma_block *block,
+ dma_addr_t dma)
+{
+ block->dma = dma;
+ block->next_block = pool->next_block;
+ pool->next_block = block;
+}
+
/**
* dma_pool_create - Creates a pool of consistent memory blocks, for dma.
* @name: name of pool, for diagnostics
@@ -136,8 +150,8 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,

if (size == 0 || size > INT_MAX)
return NULL;
- else if (size < 4)
- size = 4;
+ if (size < sizeof(struct dma_block))
+ size = sizeof(struct dma_block);

size = ALIGN(size, align);
allocation = max_t(size_t, size, PAGE_SIZE);
@@ -162,6 +176,10 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
retval->size = size;
retval->boundary = boundary;
retval->allocation = allocation;
+ retval->nr_blocks = 0;
+ retval->nr_active = 0;
+ retval->nr_pages = 0;
+ retval->next_block = NULL;

INIT_LIST_HEAD(&retval->pools);

@@ -199,22 +217,24 @@ EXPORT_SYMBOL(dma_pool_create);

static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
{
- unsigned int offset = 0;
- unsigned int next_boundary = pool->boundary;
-
- page->in_use = 0;
- page->offset = 0;
- do {
- unsigned int next = offset + pool->size;
- if (unlikely((next + pool->size) >= next_boundary)) {
- next = next_boundary;
+ unsigned int next_boundary = pool->boundary, offset = 0;
+ struct dma_block *block;
+
+ while (offset + pool->size <= pool->allocation) {
+ if (offset + pool->size > next_boundary) {
+ offset = next_boundary;
next_boundary += pool->boundary;
+ continue;
}
- *(int *)(page->vaddr + offset) = next;
- offset = next;
- } while (offset < pool->allocation);
+
+ block = page->vaddr + offset;
+ pool_block_push(pool, block, page->dma + offset);
+ offset += pool->size;
+ pool->nr_blocks++;
+ }

list_add(&page->page_list, &pool->page_list);
+ pool->nr_pages++;
}

static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
@@ -236,11 +256,6 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
return page;
}

-static inline bool is_page_busy(struct dma_page *page)
-{
- return page->in_use != 0;
-}
-
/**
* dma_pool_destroy - destroys a pool of dma memory blocks.
* @pool: dma pool that will be destroyed
@@ -252,7 +267,7 @@ static inline bool is_page_busy(struct dma_page *page)
void dma_pool_destroy(struct dma_pool *pool)
{
struct dma_page *page, *tmp;
- bool empty = false;
+ bool empty = false, busy = false;

if (unlikely(!pool))
return;
@@ -267,13 +282,15 @@ void dma_pool_destroy(struct dma_pool *pool)
device_remove_file(pool->dev, &dev_attr_pools);
mutex_unlock(&pools_reg_lock);

+ if (pool->nr_active) {
+ dev_err(pool->dev, "%s %s busy\n", __func__, pool->name);
+ busy = true;
+ }
+
list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) {
- if (!is_page_busy(page))
+ if (!busy)
dma_free_coherent(pool->dev, pool->allocation,
page->vaddr, page->dma);
- else
- dev_err(pool->dev, "%s %s, %p busy\n", __func__,
- pool->name, page->vaddr);
list_del(&page->page_list);
kfree(page);
}
@@ -282,18 +299,18 @@ void dma_pool_destroy(struct dma_pool *pool)
}
EXPORT_SYMBOL(dma_pool_destroy);

-static inline void pool_check_block(struct dma_pool *pool, void *retval,
- unsigned int offset, gfp_t mem_flags)
+static inline void pool_check_block(struct dma_pool *pool, struct dma_block *block,
+ gfp_t mem_flags)
{
-#ifdef DMAPOOL_DEBUG
+#ifdef DMAPOOL_DEBUG
+ u8 *data = (void *)block;
int i;
- u8 *data = retval;
- /* page->offset is stored in first 4 bytes */
- for (i = sizeof(offset); i < pool->size; i++) {
+
+ for (i = sizeof(struct dma_block); i < pool->size; i++) {
if (data[i] == POOL_POISON_FREED)
continue;
- dev_err(pool->dev, "%s %s, %p (corrupted)\n",
- __func__, pool->name, retval);
+ dev_err(pool->dev, "%s %s, %p (corrupted)\n", __func__,
+ pool->name, block);

/*
* Dump the first 4 bytes even if they are not
@@ -303,8 +320,9 @@ static inline void pool_check_block(struct dma_pool *pool, void *retval,
data, pool->size, 1);
break;
}
+
if (!want_init_on_alloc(mem_flags))
- memset(retval, POOL_POISON_ALLOCATED, pool->size);
+ memset(block, POOL_POISON_ALLOCATED, pool->size);
#endif
}

@@ -321,44 +339,41 @@ static inline void pool_check_block(struct dma_pool *pool, void *retval,
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
dma_addr_t *handle)
{
- unsigned long flags;
+ struct dma_block *block;
struct dma_page *page;
- unsigned int offset;
- void *retval;
+ unsigned long flags;

might_alloc(mem_flags);

spin_lock_irqsave(&pool->lock, flags);
- list_for_each_entry(page, &pool->page_list, page_list) {
- if (page->offset < pool->allocation)
- goto ready;
- }
-
- /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
- spin_unlock_irqrestore(&pool->lock, flags);
+ block = pool_block_pop(pool);
+ if (!block) {
+ /*
+ * pool_alloc_page() might sleep, so temporarily drop
+ * &pool->lock
+ */
+ spin_unlock_irqrestore(&pool->lock, flags);

- page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
- if (!page)
- return NULL;
+ page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
+ if (!page)
+ return NULL;

- spin_lock_irqsave(&pool->lock, flags);
- pool_initialise_page(pool, page);
- ready:
- page->in_use++;
- offset = page->offset;
- page->offset = *(int *)(page->vaddr + offset);
- retval = offset + page->vaddr;
- *handle = offset + page->dma;
- pool_check_block(pool, retval, offset, mem_flags);
+ spin_lock_irqsave(&pool->lock, flags);
+ pool_initialise_page(pool, page);
+ block = pool_block_pop(pool);
+ }
spin_unlock_irqrestore(&pool->lock, flags);

+ *handle = block->dma;
+ pool_check_block(pool, block, mem_flags);
if (want_init_on_alloc(mem_flags))
- memset(retval, 0, pool->size);
+ memset(block, 0, pool->size);

- return retval;
+ return block;
}
EXPORT_SYMBOL(dma_pool_alloc);

+#ifdef DMAPOOL_DEBUG
static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
{
struct dma_page *page;
@@ -372,33 +387,35 @@ static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
return NULL;
}

-#ifdef DMAPOOL_DEBUG
-static inline bool pool_page_err(struct dma_pool *pool, struct dma_page *page,
- void *vaddr)
+static inline bool pool_block_err(struct dma_pool *pool, void *vaddr,
+ dma_addr_t dma)
{
- unsigned int chain = page->offset;
+ struct dma_block *block = pool->next_block;
+ struct dma_page *page;

- if ((dma - page->dma) != offset) {
- dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n",
+ page = pool_find_page(pool, dma);
+ if (!page) {
+ dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n",
__func__, pool->name, vaddr, &dma);
return true;
}

- while (chain < pool->allocation) {
- if (chain != offset) {
- chain = *(int *)(page->vaddr + chain);
+ while (block) {
+ if (block != vaddr) {
+ block = block->next_block;
continue;
}
dev_err(pool->dev, "%s %s, dma %pad already free\n",
__func__, pool->name, &dma);
return true;
}
+
memset(vaddr, POOL_POISON_FREED, pool->size);
return false;
}
#else
-static inline bool pool_page_err(struct dma_pool *pool, struct dma_page *page,
- void *vaddr)
+static inline bool pool_block_err(struct dma_pool *pool, void *vaddr,
+ dma_addr_t dma)
{
if (want_init_on_free())
memset(vaddr, 0, pool->size);
@@ -417,28 +434,14 @@ static inline bool pool_page_err(struct dma_pool *pool, struct dma_page *page,
*/
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
{
- struct dma_page *page;
+ struct dma_block *block = vaddr;
unsigned long flags;
- unsigned int offset;

spin_lock_irqsave(&pool->lock, flags);
- page = pool_find_page(pool, dma);
- if (!page) {
- spin_unlock_irqrestore(&pool->lock, flags);
- dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n",
- __func__, pool->name, vaddr, &dma);
- return;
+ if (!pool_block_err(pool, vaddr, dma)) {
+ pool_block_push(pool, block, dma);
+ pool->nr_active--;
}
-
- offset = vaddr - page->vaddr;
- if (pool_page_err(pool, page, vaddr)) {
- spin_unlock_irqrestore(&pool->lock, flags);
- return;
- }
-
- page->in_use--;
- *(int *)vaddr = page->offset;
- page->offset = offset;
spin_unlock_irqrestore(&pool->lock, flags);
}
EXPORT_SYMBOL(dma_pool_free);
--
2.30.2


2022-12-17 02:49:28

by kernel test robot

[permalink] [raw]
Subject: Re: [PATCHv2 11/11] dmapool: link blocks across pages

Hi Keith,

I love your patch! Perhaps something to improve:

[auto build test WARNING on linus/master]
[also build test WARNING on v6.1]
[cannot apply to next-20221216]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url: https://github.com/intel-lab-lkp/linux/commits/Keith-Busch/dmapool-enhancements/20221217-041918
patch link: https://lore.kernel.org/r/20221216201625.2362737-12-kbusch%40meta.com
patch subject: [PATCHv2 11/11] dmapool: link blocks across pages
config: i386-randconfig-a013
compiler: clang version 14.0.6 (https://github.com/llvm/llvm-project f28c006a5895fc0e329fe15fead81e37457cb1d1)
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# https://github.com/intel-lab-lkp/linux/commit/360d95fe292a507a2035bbedcd5e7a1c0c9027b2
git remote add linux-review https://github.com/intel-lab-lkp/linux
git fetch --no-tags linux-review Keith-Busch/dmapool-enhancements/20221217-041918
git checkout 360d95fe292a507a2035bbedcd5e7a1c0c9027b2
# save the config file
mkdir build_dir && cp config build_dir/.config
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 O=build_dir ARCH=i386 olddefconfig
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 O=build_dir ARCH=i386 SHELL=/bin/bash

If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <[email protected]>

All warnings (new ones prefixed by >>):

>> mm/dmapool.c:83:23: warning: format specifies type 'long' but the argument has type 'size_t' (aka 'unsigned int') [-Wformat]
pool->name, pool->nr_active,
^~~~~~~~~~~~~~~
mm/dmapool.c:84:11: warning: format specifies type 'long' but the argument has type 'size_t' (aka 'unsigned int') [-Wformat]
pool->nr_blocks, pool->size,
^~~~~~~~~~~~~~~
mm/dmapool.c:85:11: warning: format specifies type 'long' but the argument has type 'size_t' (aka 'unsigned int') [-Wformat]
pool->nr_pages);
^~~~~~~~~~~~~~
3 warnings generated.


vim +83 mm/dmapool.c

71
72 static ssize_t pools_show(struct device *dev, struct device_attribute *attr, char *buf)
73 {
74 struct dma_pool *pool;
75 unsigned size;
76
77 size = sysfs_emit(buf, "poolinfo - 0.1\n");
78
79 mutex_lock(&pools_lock);
80 list_for_each_entry(pool, &dev->dma_pools, pools) {
81 /* per-pool info, no real statistics yet */
82 size += sysfs_emit_at(buf, size, "%-16s %4ld %4ld %4u %2ld\n",
> 83 pool->name, pool->nr_active,
84 pool->nr_blocks, pool->size,
85 pool->nr_pages);
86 }
87 mutex_unlock(&pools_lock);
88
89 return size;
90 }
91

--
0-DAY CI Kernel Test Service
https://01.org/lkp


Attachments:
(No filename) (3.26 kB)
config (159.53 kB)
Download all attachments

2022-12-17 04:03:51

by kernel test robot

[permalink] [raw]
Subject: Re: [PATCHv2 11/11] dmapool: link blocks across pages

Hi Keith,

I love your patch! Perhaps something to improve:

[auto build test WARNING on linus/master]
[also build test WARNING on v6.1]
[cannot apply to next-20221216]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url: https://github.com/intel-lab-lkp/linux/commits/Keith-Busch/dmapool-enhancements/20221217-041918
patch link: https://lore.kernel.org/r/20221216201625.2362737-12-kbusch%40meta.com
patch subject: [PATCHv2 11/11] dmapool: link blocks across pages
config: i386-randconfig-a001
compiler: gcc-11 (Debian 11.3.0-8) 11.3.0
reproduce (this is a W=1 build):
# https://github.com/intel-lab-lkp/linux/commit/360d95fe292a507a2035bbedcd5e7a1c0c9027b2
git remote add linux-review https://github.com/intel-lab-lkp/linux
git fetch --no-tags linux-review Keith-Busch/dmapool-enhancements/20221217-041918
git checkout 360d95fe292a507a2035bbedcd5e7a1c0c9027b2
# save the config file
mkdir build_dir && cp config build_dir/.config
make W=1 O=build_dir ARCH=i386 olddefconfig
make W=1 O=build_dir ARCH=i386 SHELL=/bin/bash

If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <[email protected]>

All warnings (new ones prefixed by >>):

mm/dmapool.c: In function 'pools_show':
>> mm/dmapool.c:82:60: warning: format '%ld' expects argument of type 'long int', but argument 5 has type 'size_t' {aka 'unsigned int'} [-Wformat=]
82 | size += sysfs_emit_at(buf, size, "%-16s %4ld %4ld %4u %2ld\n",
| ~~~^
| |
| long int
| %4d
83 | pool->name, pool->nr_active,
| ~~~~~~~~~~~~~~~
| |
| size_t {aka unsigned int}
mm/dmapool.c:82:65: warning: format '%ld' expects argument of type 'long int', but argument 6 has type 'size_t' {aka 'unsigned int'} [-Wformat=]
82 | size += sysfs_emit_at(buf, size, "%-16s %4ld %4ld %4u %2ld\n",
| ~~~^
| |
| long int
| %4d
83 | pool->name, pool->nr_active,
84 | pool->nr_blocks, pool->size,
| ~~~~~~~~~~~~~~~
| |
| size_t {aka unsigned int}
mm/dmapool.c:82:74: warning: format '%ld' expects argument of type 'long int', but argument 8 has type 'size_t' {aka 'unsigned int'} [-Wformat=]
82 | size += sysfs_emit_at(buf, size, "%-16s %4ld %4ld %4u %2ld\n",
| ~~~^
| |
| long int
| %2d
......
85 | pool->nr_pages);
| ~~~~~~~~~~~~~~
| |
| size_t {aka unsigned int}


vim +82 mm/dmapool.c

71
72 static ssize_t pools_show(struct device *dev, struct device_attribute *attr, char *buf)
73 {
74 struct dma_pool *pool;
75 unsigned size;
76
77 size = sysfs_emit(buf, "poolinfo - 0.1\n");
78
79 mutex_lock(&pools_lock);
80 list_for_each_entry(pool, &dev->dma_pools, pools) {
81 /* per-pool info, no real statistics yet */
> 82 size += sysfs_emit_at(buf, size, "%-16s %4ld %4ld %4u %2ld\n",
83 pool->name, pool->nr_active,
84 pool->nr_blocks, pool->size,
85 pool->nr_pages);
86 }
87 mutex_unlock(&pools_lock);
88
89 return size;
90 }
91

--
0-DAY CI Kernel Test Service
https://01.org/lkp


Attachments:
(No filename) (4.82 kB)
config (156.73 kB)
Download all attachments

2022-12-23 17:48:02

by Christoph Hellwig

[permalink] [raw]
Subject: Re: [PATCHv2 11/11] dmapool: link blocks across pages

On Fri, Dec 16, 2022 at 12:16:25PM -0800, Keith Busch wrote:
> unsigned int size;
> unsigned int allocation;
> unsigned int boundary;
> + size_t nr_blocks;
> + size_t nr_active;
> + size_t nr_pages;

Should these be unsigned int like the counters above?

> +static inline struct dma_block *pool_block_pop(struct dma_pool *pool)
> +{
> + struct dma_block *block = pool->next_block;
> +
> + if (block) {
> + pool->next_block = block->next_block;
> + pool->nr_active++;
> + }
> + return block;
> +}
> +
> +static inline void pool_block_push(struct dma_pool *pool, struct dma_block *block,
> + dma_addr_t dma)
> +{
> + block->dma = dma;
> + block->next_block = pool->next_block;
> + pool->next_block = block;
> +}

Any point in marking these inline vs just letting the ocmpile do
it's job?

> @@ -162,6 +176,10 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
> retval->size = size;
> retval->boundary = boundary;
> retval->allocation = allocation;
> + retval->nr_blocks = 0;
> + retval->nr_active = 0;
> + retval->nr_pages = 0;
> + retval->next_block = NULL;

Maybe just switch to kzmalloc so that you don't have to bother
initializing invdividual fields. It's not like dma_pool_create is
called from anything near a fast path.

> static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
> {
> + unsigned int next_boundary = pool->boundary, offset = 0;
> + struct dma_block *block;
> +
> + while (offset + pool->size <= pool->allocation) {
> + if (offset + pool->size > next_boundary) {
> + offset = next_boundary;
> next_boundary += pool->boundary;
> + continue;
> }
> +
> + block = page->vaddr + offset;
> + pool_block_push(pool, block, page->dma + offset);

So I guess with this pool_initialise_page needs to be called under
the lock anyway, but just doing it silently in the previous patch
seems a bit odd.

> +static inline void pool_check_block(struct dma_pool *pool, struct dma_block *block,
> + gfp_t mem_flags)

I didn't spot this earlier, but inline on a relatively expensive debug
helper is a bit silly.

Otherwise this looks like a nice improvement by using a better and
simpler data structure.

2022-12-23 17:48:32

by Christoph Hellwig

[permalink] [raw]
Subject: Re: [PATCHv2 11/11] dmapool: link blocks across pages

On Fri, Dec 23, 2022 at 12:08:54PM -0500, Tony Battersby wrote:
> I previously recommended that they should be size_t because they are
> counting the number of objects in the entire pool, which can be greater
> than 2^32.?? See patch 4 "dmapool: cleanup integer types".

Oh, ok.

> ? However the
> kernel test robot has complained that some of the printk format
> specifiers need to be changed to match the size_t type.

Yes, they do.

2022-12-23 18:07:57

by Tony Battersby

[permalink] [raw]
Subject: Re: [PATCHv2 11/11] dmapool: link blocks across pages

On 12/23/22 11:58, Christoph Hellwig wrote:
> On Fri, Dec 16, 2022 at 12:16:25PM -0800, Keith Busch wrote:
>> unsigned int size;
>> unsigned int allocation;
>> unsigned int boundary;
>> + size_t nr_blocks;
>> + size_t nr_active;
>> + size_t nr_pages;
> Should these be unsigned int like the counters above?

I previously recommended that they should be size_t because they are
counting the number of objects in the entire pool, which can be greater
than 2^32.   See patch 4 "dmapool: cleanup integer types".  However the
kernel test robot has complained that some of the printk format
specifiers need to be changed to match the size_t type.

Tony Battersby
Cybernetics

2022-12-24 15:44:17

by Keith Busch

[permalink] [raw]
Subject: Re: [PATCHv2 11/11] dmapool: link blocks across pages

On Fri, Dec 23, 2022 at 09:15:23AM -0800, Christoph Hellwig wrote:
> On Fri, Dec 23, 2022 at 12:08:54PM -0500, Tony Battersby wrote:
> > ? However the
> > kernel test robot has complained that some of the printk format
> > specifiers need to be changed to match the size_t type.
>
> Yes, they do.

Yeah, the type change was a last minute addition that and I messed it
up. Thanks for the other reviews, though, I didn't want to respin this
just for print formats!