2020-08-18 09:22:08

by Gerd Hoffmann

[permalink] [raw]
Subject: [PATCH v2 1/2] drm: allow limiting the scatter list size.

Add max_segment argument to drm_prime_pages_to_sg(). When set pass it
through to the __sg_alloc_table_from_pages() call, otherwise use
SCATTERLIST_MAX_SEGMENT.

Also add max_segment field to drm driver and pass it to
drm_prime_pages_to_sg() calls in drivers and helpers.

v2: place max_segment in drm driver not gem object.

Signed-off-by: Gerd Hoffmann <[email protected]>
---
include/drm/drm_device.h | 8 ++++++++
include/drm/drm_prime.h | 3 ++-
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 3 ++-
drivers/gpu/drm/drm_gem_shmem_helper.c | 3 ++-
drivers/gpu/drm/drm_prime.c | 10 +++++++---
drivers/gpu/drm/etnaviv/etnaviv_gem.c | 3 ++-
drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c | 3 ++-
drivers/gpu/drm/msm/msm_gem.c | 3 ++-
drivers/gpu/drm/msm/msm_gem_prime.c | 3 ++-
drivers/gpu/drm/nouveau/nouveau_prime.c | 3 ++-
drivers/gpu/drm/radeon/radeon_prime.c | 3 ++-
drivers/gpu/drm/rockchip/rockchip_drm_gem.c | 6 ++++--
drivers/gpu/drm/tegra/gem.c | 3 ++-
drivers/gpu/drm/vgem/vgem_drv.c | 3 ++-
drivers/gpu/drm/xen/xen_drm_front_gem.c | 3 ++-
15 files changed, 43 insertions(+), 17 deletions(-)

diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index 0988351d743c..47cb547a8115 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -329,6 +329,14 @@ struct drm_device {
*/
struct drm_fb_helper *fb_helper;

+ /**
+ * @max_segment:
+ *
+ * Max size for scatter list segments. When unset the default
+ * (SCATTERLIST_MAX_SEGMENT) is used.
+ */
+ size_t max_segment;
+
/* Everything below here is for legacy driver, never use! */
/* private: */
#if IS_ENABLED(CONFIG_DRM_LEGACY)
diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h
index 9af7422b44cf..2c3689435cb4 100644
--- a/include/drm/drm_prime.h
+++ b/include/drm/drm_prime.h
@@ -88,7 +88,8 @@ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr);
int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma);

-struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages);
+struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages,
+ size_t max_segment);
struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
int flags);

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 519ce4427fce..8f6a647757e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -303,7 +303,8 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
switch (bo->tbo.mem.mem_type) {
case TTM_PL_TT:
sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages,
- bo->tbo.num_pages);
+ bo->tbo.num_pages,
+ obj->dev->max_segment);
if (IS_ERR(sgt))
return sgt;

diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 4b7cfbac4daa..8f47b41b0b2f 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -656,7 +656,8 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj)

WARN_ON(shmem->base.import_attach);

- return drm_prime_pages_to_sg(shmem->pages, obj->size >> PAGE_SHIFT);
+ return drm_prime_pages_to_sg(shmem->pages, obj->size >> PAGE_SHIFT,
+ obj->dev->max_segment);
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);

diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 1693aa7c14b5..27c783fd6633 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -802,7 +802,8 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
*
* This is useful for implementing &drm_gem_object_funcs.get_sg_table.
*/
-struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
+struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages,
+ size_t max_segment)
{
struct sg_table *sg = NULL;
int ret;
@@ -813,8 +814,11 @@ struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_page
goto out;
}

- ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
- nr_pages << PAGE_SHIFT, GFP_KERNEL);
+ if (max_segment == 0 || max_segment > SCATTERLIST_MAX_SEGMENT)
+ max_segment = SCATTERLIST_MAX_SEGMENT;
+ ret = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
+ nr_pages << PAGE_SHIFT,
+ max_segment, GFP_KERNEL);
if (ret)
goto out;

diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index f06e19e7be04..90654246b335 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -103,7 +103,8 @@ struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
struct sg_table *sgt;

- sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
+ sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages,
+ etnaviv_obj->base.dev->max_segment);
if (IS_ERR(sgt)) {
dev_err(dev->dev, "failed to allocate sgt: %ld\n",
PTR_ERR(sgt));
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index 6d9e5c3c4dd5..f65be0fffb3d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -19,7 +19,8 @@ struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj)
if (WARN_ON(!etnaviv_obj->pages)) /* should have already pinned! */
return ERR_PTR(-EINVAL);

- return drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
+ return drm_prime_pages_to_sg(etnaviv_obj->pages, npages,
+ obj->dev->max_segment);
}

void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index b2f49152b4d4..dbf1437c3dac 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -126,7 +126,8 @@ static struct page **get_pages(struct drm_gem_object *obj)

msm_obj->pages = p;

- msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
+ msm_obj->sgt = drm_prime_pages_to_sg(p, npages,
+ obj->dev->max_segment);
if (IS_ERR(msm_obj->sgt)) {
void *ptr = ERR_CAST(msm_obj->sgt);

diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index d7c8948427fe..6337cd1f9428 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -19,7 +19,8 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */
return NULL;

- return drm_prime_pages_to_sg(msm_obj->pages, npages);
+ return drm_prime_pages_to_sg(msm_obj->pages, npages,
+ obj->dev->max_segment);
}

void *msm_gem_prime_vmap(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index bae6a3eccee0..dd0ff032ae16 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -32,7 +32,8 @@ struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *obj)
struct nouveau_bo *nvbo = nouveau_gem_object(obj);
int npages = nvbo->bo.num_pages;

- return drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages);
+ return drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages,
+ obj->dev->max_segment);
}

void *nouveau_gem_prime_vmap(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index b906e8fbd5f3..61a3fe147489 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -36,7 +36,8 @@ struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj)
struct radeon_bo *bo = gem_to_radeon_bo(obj);
int npages = bo->tbo.num_pages;

- return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
+ return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages,
+ obj->dev->max_segment);
}

void *radeon_gem_prime_vmap(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index b9275ba7c5a5..5ddb2d31a607 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -85,7 +85,8 @@ static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)

rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;

- rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
+ rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages,
+ rk_obj->base.dev->max_segment);
if (IS_ERR(rk_obj->sgt)) {
ret = PTR_ERR(rk_obj->sgt);
goto err_put_pages;
@@ -442,7 +443,8 @@ struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
int ret;

if (rk_obj->pages)
- return drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
+ return drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages,
+ obj->dev->max_segment);

sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt)
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 723df142a981..a0abde747e95 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -284,7 +284,8 @@ static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)

bo->num_pages = bo->gem.size >> PAGE_SHIFT;

- bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
+ bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages,
+ bo->gem.dev->max_segment);
if (IS_ERR(bo->sgt)) {
err = PTR_ERR(bo->sgt);
goto put_pages;
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 313339bbff90..045461dc6319 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -321,7 +321,8 @@ static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct drm_vgem_gem_object *bo = to_vgem_bo(obj);

- return drm_prime_pages_to_sg(bo->pages, bo->base.size >> PAGE_SHIFT);
+ return drm_prime_pages_to_sg(bo->pages, bo->base.size >> PAGE_SHIFT,
+ obj->dev->max_segment);
}

static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
index f0b85e094111..61a8c1a9fb04 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
@@ -179,7 +179,8 @@ struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj)
if (!xen_obj->pages)
return ERR_PTR(-ENOMEM);

- return drm_prime_pages_to_sg(xen_obj->pages, xen_obj->num_pages);
+ return drm_prime_pages_to_sg(xen_obj->pages, xen_obj->num_pages,
+ gem_obj->dev->max_segment);
}

struct drm_gem_object *
--
2.18.4


2020-09-01 07:41:59

by Daniel Vetter

[permalink] [raw]
Subject: Re: [PATCH v2 1/2] drm: allow limiting the scatter list size.

On Tue, Aug 18, 2020 at 11:20:16AM +0200, Gerd Hoffmann wrote:
> Add max_segment argument to drm_prime_pages_to_sg(). When set pass it
> through to the __sg_alloc_table_from_pages() call, otherwise use
> SCATTERLIST_MAX_SEGMENT.
>
> Also add max_segment field to drm driver and pass it to
> drm_prime_pages_to_sg() calls in drivers and helpers.
>
> v2: place max_segment in drm driver not gem object.
>
> Signed-off-by: Gerd Hoffmann <[email protected]>
> ---
> include/drm/drm_device.h | 8 ++++++++
> include/drm/drm_prime.h | 3 ++-
> drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 3 ++-
> drivers/gpu/drm/drm_gem_shmem_helper.c | 3 ++-
> drivers/gpu/drm/drm_prime.c | 10 +++++++---
> drivers/gpu/drm/etnaviv/etnaviv_gem.c | 3 ++-
> drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c | 3 ++-
> drivers/gpu/drm/msm/msm_gem.c | 3 ++-
> drivers/gpu/drm/msm/msm_gem_prime.c | 3 ++-
> drivers/gpu/drm/nouveau/nouveau_prime.c | 3 ++-
> drivers/gpu/drm/radeon/radeon_prime.c | 3 ++-
> drivers/gpu/drm/rockchip/rockchip_drm_gem.c | 6 ++++--
> drivers/gpu/drm/tegra/gem.c | 3 ++-
> drivers/gpu/drm/vgem/vgem_drv.c | 3 ++-
> drivers/gpu/drm/xen/xen_drm_front_gem.c | 3 ++-
> 15 files changed, 43 insertions(+), 17 deletions(-)
>
> diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
> index 0988351d743c..47cb547a8115 100644
> --- a/include/drm/drm_device.h
> +++ b/include/drm/drm_device.h
> @@ -329,6 +329,14 @@ struct drm_device {
> */
> struct drm_fb_helper *fb_helper;
>
> + /**
> + * @max_segment:
> + *
> + * Max size for scatter list segments. When unset the default
> + * (SCATTERLIST_MAX_SEGMENT) is used.
> + */
> + size_t max_segment;

Is there no better place for this then "at the bottom"? drm_device is a
huge structure, piling stuff up randomly doesn't make it better :-)

I think ideally we'd have a gem substruct like we have on the modeset side
at least.
-Daniel

> +
> /* Everything below here is for legacy driver, never use! */
> /* private: */
> #if IS_ENABLED(CONFIG_DRM_LEGACY)
> diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h
> index 9af7422b44cf..2c3689435cb4 100644
> --- a/include/drm/drm_prime.h
> +++ b/include/drm/drm_prime.h
> @@ -88,7 +88,8 @@ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr);
> int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
> int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma);
>
> -struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages);
> +struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages,
> + size_t max_segment);
> struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
> int flags);
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> index 519ce4427fce..8f6a647757e7 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> @@ -303,7 +303,8 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
> switch (bo->tbo.mem.mem_type) {
> case TTM_PL_TT:
> sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages,
> - bo->tbo.num_pages);
> + bo->tbo.num_pages,
> + obj->dev->max_segment);
> if (IS_ERR(sgt))
> return sgt;
>
> diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
> index 4b7cfbac4daa..8f47b41b0b2f 100644
> --- a/drivers/gpu/drm/drm_gem_shmem_helper.c
> +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
> @@ -656,7 +656,8 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj)
>
> WARN_ON(shmem->base.import_attach);
>
> - return drm_prime_pages_to_sg(shmem->pages, obj->size >> PAGE_SHIFT);
> + return drm_prime_pages_to_sg(shmem->pages, obj->size >> PAGE_SHIFT,
> + obj->dev->max_segment);
> }
> EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
>
> diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
> index 1693aa7c14b5..27c783fd6633 100644
> --- a/drivers/gpu/drm/drm_prime.c
> +++ b/drivers/gpu/drm/drm_prime.c
> @@ -802,7 +802,8 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
> *
> * This is useful for implementing &drm_gem_object_funcs.get_sg_table.
> */
> -struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
> +struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages,
> + size_t max_segment)
> {
> struct sg_table *sg = NULL;
> int ret;
> @@ -813,8 +814,11 @@ struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_page
> goto out;
> }
>
> - ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
> - nr_pages << PAGE_SHIFT, GFP_KERNEL);
> + if (max_segment == 0 || max_segment > SCATTERLIST_MAX_SEGMENT)
> + max_segment = SCATTERLIST_MAX_SEGMENT;
> + ret = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
> + nr_pages << PAGE_SHIFT,
> + max_segment, GFP_KERNEL);
> if (ret)
> goto out;
>
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> index f06e19e7be04..90654246b335 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> @@ -103,7 +103,8 @@ struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
> int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
> struct sg_table *sgt;
>
> - sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
> + sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages,
> + etnaviv_obj->base.dev->max_segment);
> if (IS_ERR(sgt)) {
> dev_err(dev->dev, "failed to allocate sgt: %ld\n",
> PTR_ERR(sgt));
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
> index 6d9e5c3c4dd5..f65be0fffb3d 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
> @@ -19,7 +19,8 @@ struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj)
> if (WARN_ON(!etnaviv_obj->pages)) /* should have already pinned! */
> return ERR_PTR(-EINVAL);
>
> - return drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
> + return drm_prime_pages_to_sg(etnaviv_obj->pages, npages,
> + obj->dev->max_segment);
> }
>
> void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj)
> diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
> index b2f49152b4d4..dbf1437c3dac 100644
> --- a/drivers/gpu/drm/msm/msm_gem.c
> +++ b/drivers/gpu/drm/msm/msm_gem.c
> @@ -126,7 +126,8 @@ static struct page **get_pages(struct drm_gem_object *obj)
>
> msm_obj->pages = p;
>
> - msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
> + msm_obj->sgt = drm_prime_pages_to_sg(p, npages,
> + obj->dev->max_segment);
> if (IS_ERR(msm_obj->sgt)) {
> void *ptr = ERR_CAST(msm_obj->sgt);
>
> diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
> index d7c8948427fe..6337cd1f9428 100644
> --- a/drivers/gpu/drm/msm/msm_gem_prime.c
> +++ b/drivers/gpu/drm/msm/msm_gem_prime.c
> @@ -19,7 +19,8 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
> if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */
> return NULL;
>
> - return drm_prime_pages_to_sg(msm_obj->pages, npages);
> + return drm_prime_pages_to_sg(msm_obj->pages, npages,
> + obj->dev->max_segment);
> }
>
> void *msm_gem_prime_vmap(struct drm_gem_object *obj)
> diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
> index bae6a3eccee0..dd0ff032ae16 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_prime.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
> @@ -32,7 +32,8 @@ struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *obj)
> struct nouveau_bo *nvbo = nouveau_gem_object(obj);
> int npages = nvbo->bo.num_pages;
>
> - return drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages);
> + return drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages,
> + obj->dev->max_segment);
> }
>
> void *nouveau_gem_prime_vmap(struct drm_gem_object *obj)
> diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
> index b906e8fbd5f3..61a3fe147489 100644
> --- a/drivers/gpu/drm/radeon/radeon_prime.c
> +++ b/drivers/gpu/drm/radeon/radeon_prime.c
> @@ -36,7 +36,8 @@ struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj)
> struct radeon_bo *bo = gem_to_radeon_bo(obj);
> int npages = bo->tbo.num_pages;
>
> - return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
> + return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages,
> + obj->dev->max_segment);
> }
>
> void *radeon_gem_prime_vmap(struct drm_gem_object *obj)
> diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
> index b9275ba7c5a5..5ddb2d31a607 100644
> --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
> +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
> @@ -85,7 +85,8 @@ static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
>
> rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
>
> - rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
> + rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages,
> + rk_obj->base.dev->max_segment);
> if (IS_ERR(rk_obj->sgt)) {
> ret = PTR_ERR(rk_obj->sgt);
> goto err_put_pages;
> @@ -442,7 +443,8 @@ struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
> int ret;
>
> if (rk_obj->pages)
> - return drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
> + return drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages,
> + obj->dev->max_segment);
>
> sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
> if (!sgt)
> diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
> index 723df142a981..a0abde747e95 100644
> --- a/drivers/gpu/drm/tegra/gem.c
> +++ b/drivers/gpu/drm/tegra/gem.c
> @@ -284,7 +284,8 @@ static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
>
> bo->num_pages = bo->gem.size >> PAGE_SHIFT;
>
> - bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
> + bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages,
> + bo->gem.dev->max_segment);
> if (IS_ERR(bo->sgt)) {
> err = PTR_ERR(bo->sgt);
> goto put_pages;
> diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
> index 313339bbff90..045461dc6319 100644
> --- a/drivers/gpu/drm/vgem/vgem_drv.c
> +++ b/drivers/gpu/drm/vgem/vgem_drv.c
> @@ -321,7 +321,8 @@ static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
> {
> struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
>
> - return drm_prime_pages_to_sg(bo->pages, bo->base.size >> PAGE_SHIFT);
> + return drm_prime_pages_to_sg(bo->pages, bo->base.size >> PAGE_SHIFT,
> + obj->dev->max_segment);
> }
>
> static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
> diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
> index f0b85e094111..61a8c1a9fb04 100644
> --- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
> +++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
> @@ -179,7 +179,8 @@ struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj)
> if (!xen_obj->pages)
> return ERR_PTR(-ENOMEM);
>
> - return drm_prime_pages_to_sg(xen_obj->pages, xen_obj->num_pages);
> + return drm_prime_pages_to_sg(xen_obj->pages, xen_obj->num_pages,
> + gem_obj->dev->max_segment);
> }
>
> struct drm_gem_object *
> --
> 2.18.4
>

--
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

2020-09-07 06:40:23

by Gerd Hoffmann

[permalink] [raw]
Subject: Re: [PATCH v2 1/2] drm: allow limiting the scatter list size.

> > + /**
> > + * @max_segment:
> > + *
> > + * Max size for scatter list segments. When unset the default
> > + * (SCATTERLIST_MAX_SEGMENT) is used.
> > + */
> > + size_t max_segment;
>
> Is there no better place for this then "at the bottom"? drm_device is a
> huge structure, piling stuff up randomly doesn't make it better :-)

Moved next to the other gem fields for now (v3 posted).

> I think ideally we'd have a gem substruct like we have on the modeset side
> at least.

Phew, that'll be quite some churn in the tree. And there aren't that many
gem-related fields in struct drm_device.

So you are looking for something like below (header changes only)?

take care,
Gerd

diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index c455ef404ca6..950167ede98a 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -299,22 +299,8 @@ struct drm_device {
/** @mode_config: Current mode config */
struct drm_mode_config mode_config;

- /** @object_name_lock: GEM information */
- struct mutex object_name_lock;
-
- /** @object_name_idr: GEM information */
- struct idr object_name_idr;
-
- /** @vma_offset_manager: GEM information */
- struct drm_vma_offset_manager *vma_offset_manager;
-
- /**
- * @max_segment:
- *
- * Max size for scatter list segments for GEM objects. When
- * unset the default (SCATTERLIST_MAX_SEGMENT) is used.
- */
- size_t max_segment;
+ /** @gem_config: Current GEM config */
+ struct drm_gem_config gem_config;

/** @vram_mm: VRAM MM memory manager */
struct drm_vram_mm *vram_mm;
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 337a48321705..74129fb29fb8 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -39,6 +39,25 @@

#include <drm/drm_vma_manager.h>

+struct drm_gem_config {
+ /** @object_name_lock: GEM information */
+ struct mutex object_name_lock;
+
+ /** @object_name_idr: GEM information */
+ struct idr object_name_idr;
+
+ /** @vma_offset_manager: GEM information */
+ struct drm_vma_offset_manager *vma_offset_manager;
+
+ /**
+ * @max_segment:
+ *
+ * Max size for scatter list segments for GEM objects. When
+ * unset the default (SCATTERLIST_MAX_SEGMENT) is used.
+ */
+ size_t max_segment;
+};
+
struct drm_gem_object;

/**

2020-09-07 07:16:28

by Daniel Vetter

[permalink] [raw]
Subject: Re: [PATCH v2 1/2] drm: allow limiting the scatter list size.

On Mon, Sep 7, 2020 at 8:39 AM Gerd Hoffmann <[email protected]> wrote:
>
> > > + /**
> > > + * @max_segment:
> > > + *
> > > + * Max size for scatter list segments. When unset the default
> > > + * (SCATTERLIST_MAX_SEGMENT) is used.
> > > + */
> > > + size_t max_segment;
> >
> > Is there no better place for this then "at the bottom"? drm_device is a
> > huge structure, piling stuff up randomly doesn't make it better :-)
>
> Moved next to the other gem fields for now (v3 posted).
>
> > I think ideally we'd have a gem substruct like we have on the modeset side
> > at least.
>
> Phew, that'll be quite some churn in the tree. And there aren't that many
> gem-related fields in struct drm_device.
>
> So you are looking for something like below (header changes only)?

Hm yeah it's a lot less than I thought. And yes I think that would be neat.
-Daniel

>
> take care,
> Gerd
>
> diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
> index c455ef404ca6..950167ede98a 100644
> --- a/include/drm/drm_device.h
> +++ b/include/drm/drm_device.h
> @@ -299,22 +299,8 @@ struct drm_device {
> /** @mode_config: Current mode config */
> struct drm_mode_config mode_config;
>
> - /** @object_name_lock: GEM information */
> - struct mutex object_name_lock;
> -
> - /** @object_name_idr: GEM information */
> - struct idr object_name_idr;
> -
> - /** @vma_offset_manager: GEM information */
> - struct drm_vma_offset_manager *vma_offset_manager;
> -
> - /**
> - * @max_segment:
> - *
> - * Max size for scatter list segments for GEM objects. When
> - * unset the default (SCATTERLIST_MAX_SEGMENT) is used.
> - */
> - size_t max_segment;
> + /** @gem_config: Current GEM config */
> + struct drm_gem_config gem_config;
>
> /** @vram_mm: VRAM MM memory manager */
> struct drm_vram_mm *vram_mm;
> diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
> index 337a48321705..74129fb29fb8 100644
> --- a/include/drm/drm_gem.h
> +++ b/include/drm/drm_gem.h
> @@ -39,6 +39,25 @@
>
> #include <drm/drm_vma_manager.h>
>
> +struct drm_gem_config {
> + /** @object_name_lock: GEM information */
> + struct mutex object_name_lock;
> +
> + /** @object_name_idr: GEM information */
> + struct idr object_name_idr;
> +
> + /** @vma_offset_manager: GEM information */
> + struct drm_vma_offset_manager *vma_offset_manager;
> +
> + /**
> + * @max_segment:
> + *
> + * Max size for scatter list segments for GEM objects. When
> + * unset the default (SCATTERLIST_MAX_SEGMENT) is used.
> + */
> + size_t max_segment;
> +};
> +
> struct drm_gem_object;
>
> /**
>
> _______________________________________________
> dri-devel mailing list
> [email protected]
> https://lists.freedesktop.org/mailman/listinfo/dri-devel



--
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch