2021-02-05 08:12:23

by John Stultz

[permalink] [raw]
Subject: [RFC][PATCH v6 6/7] dma-buf: system_heap: Add drm pagepool support to system heap

Utilize the drm pagepool code to speed up allocation
performance.

This is similar to the ION pagepool usage, but tries to
utilize generic code instead of a custom implementation.

Cc: Daniel Vetter <[email protected]>
Cc: Christian Koenig <[email protected]>
Cc: Sumit Semwal <[email protected]>
Cc: Liam Mark <[email protected]>
Cc: Chris Goldsworthy <[email protected]>
Cc: Laura Abbott <[email protected]>
Cc: Brian Starkey <[email protected]>
Cc: Hridya Valsaraju <[email protected]>
Cc: Suren Baghdasaryan <[email protected]>
Cc: Sandeep Patil <[email protected]>
Cc: Daniel Mentz <[email protected]>
Cc: Ørjan Eide <[email protected]>
Cc: Robin Murphy <[email protected]>
Cc: Ezequiel Garcia <[email protected]>
Cc: Simon Ser <[email protected]>
Cc: James Jones <[email protected]>
Cc: [email protected]
Cc: [email protected]
Signed-off-by: John Stultz <[email protected]>
---
v2:
* Fix build issue caused by selecting PAGE_POOL w/o NET
as Reported-by: kernel test robot <[email protected]>
v3:
* Simplify the page zeroing logic a bit by using kmap_atomic
instead of vmap as suggested by Daniel Mentz
v5:
* Shift away from networking page pool completely to
dmabuf page pool implementation
v6:
* Switch again to using the drm_page_pool code shared w/
ttm_pool
---
drivers/dma-buf/heaps/Kconfig | 1 +
drivers/dma-buf/heaps/system_heap.c | 56 +++++++++++++++++++++++++++--
2 files changed, 54 insertions(+), 3 deletions(-)

diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig
index f7aef8bc7119..7e28934e0def 100644
--- a/drivers/dma-buf/heaps/Kconfig
+++ b/drivers/dma-buf/heaps/Kconfig
@@ -4,6 +4,7 @@ config DMABUF_HEAPS_DEFERRED_FREE
config DMABUF_HEAPS_SYSTEM
bool "DMA-BUF System Heap"
depends on DMABUF_HEAPS
+ select DRM_PAGE_POOL
help
Choose this option to enable the system dmabuf heap. The system heap
is backed by pages from the buddy allocator. If in doubt, say Y.
diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
index 17e0e9a68baf..6d39e9f32e36 100644
--- a/drivers/dma-buf/heaps/system_heap.c
+++ b/drivers/dma-buf/heaps/system_heap.c
@@ -21,6 +21,8 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>

+#include <drm/page_pool.h>
+
static struct dma_heap *sys_heap;

struct system_heap_buffer {
@@ -53,6 +55,7 @@ static gfp_t order_flags[] = {HIGH_ORDER_GFP, LOW_ORDER_GFP, LOW_ORDER_GFP};
*/
static const unsigned int orders[] = {8, 4, 0};
#define NUM_ORDERS ARRAY_SIZE(orders)
+struct drm_page_pool *pools[NUM_ORDERS];

static struct sg_table *dup_sg_table(struct sg_table *table)
{
@@ -281,18 +284,49 @@ static void system_heap_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
dma_buf_map_clear(map);
}

+static int system_heap_free_pages(struct page *p, unsigned int order)
+{
+ __free_pages(p, order);
+ return 1 << order;
+}
+
+static int system_heap_zero_buffer(struct system_heap_buffer *buffer)
+{
+ struct sg_table *sgt = &buffer->sg_table;
+ struct sg_page_iter piter;
+ struct page *p;
+ void *vaddr;
+ int ret = 0;
+
+ for_each_sgtable_page(sgt, &piter, 0) {
+ p = sg_page_iter_page(&piter);
+ vaddr = kmap_atomic(p);
+ memset(vaddr, 0, PAGE_SIZE);
+ kunmap_atomic(vaddr);
+ }
+
+ return ret;
+}
+
static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
{
struct system_heap_buffer *buffer = dmabuf->priv;
struct sg_table *table;
struct scatterlist *sg;
- int i;
+ int i, j;
+
+ /* Zero the buffer pages before adding back to the pool */
+ system_heap_zero_buffer(buffer);

table = &buffer->sg_table;
for_each_sg(table->sgl, sg, table->nents, i) {
struct page *page = sg_page(sg);

- __free_pages(page, compound_order(page));
+ for (j = 0; j < NUM_ORDERS; j++) {
+ if (compound_order(page) == orders[j])
+ break;
+ }
+ drm_page_pool_add(pools[j], page);
}
sg_free_table(table);
kfree(buffer);
@@ -323,7 +357,9 @@ static struct page *alloc_largest_available(unsigned long size,
if (max_order < orders[i])
continue;

- page = alloc_pages(order_flags[i], orders[i]);
+ page = drm_page_pool_fetch(pools[i]);
+ if (!page)
+ page = alloc_pages(order_flags[i], orders[i]);
if (!page)
continue;
return page;
@@ -428,6 +464,20 @@ static const struct dma_heap_ops system_heap_ops = {
static int system_heap_create(void)
{
struct dma_heap_export_info exp_info;
+ int i;
+
+ for (i = 0; i < NUM_ORDERS; i++) {
+ pools[i] = drm_page_pool_create(orders[i],
+ system_heap_free_pages);
+ if (IS_ERR(pools[i])) {
+ int j;
+
+ pr_err("%s: page pool creation failed!\n", __func__);
+ for (j = 0; j < i; j++)
+ drm_page_pool_destroy(pools[j]);
+ return PTR_ERR(pools[i]);
+ }
+ }

exp_info.name = "system";
exp_info.ops = &system_heap_ops;
--
2.25.1