2024-04-03 10:31:36

by Shawn Sung (宋孝謙)

[permalink] [raw]
Subject: [PATCH v5 2/9] drm/mediatek: Add secure buffer control flow to mtk_drm_gem

From: "Jason-JH.Lin" <[email protected]>

Add secure buffer control flow to mtk_drm_gem.

When user space takes DRM_MTK_GEM_CREATE_ENCRYPTED flag and size
to create a mtk_drm_gem object, mtk_drm_gem will find a matched size
dma buffer from secure dma-heap and bind it to mtk_drm_gem object.

Signed-off-by: Jason-JH.Lin <[email protected]>
Signed-off-by: Hsiao Chien Sung <[email protected]>
---
drivers/gpu/drm/mediatek/mtk_gem.c | 85 +++++++++++++++++++++++++++++-
drivers/gpu/drm/mediatek/mtk_gem.h | 4 ++
2 files changed, 88 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/mediatek/mtk_gem.c b/drivers/gpu/drm/mediatek/mtk_gem.c
index e59e0727717b7..ec34d02c14377 100644
--- a/drivers/gpu/drm/mediatek/mtk_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_gem.c
@@ -4,6 +4,8 @@
*/

#include <linux/dma-buf.h>
+#include <linux/dma-heap.h>
+#include <uapi/linux/dma-heap.h>
#include <drm/mediatek_drm.h>

#include <drm/drm.h>
@@ -102,6 +104,81 @@ struct mtk_gem_obj *mtk_gem_create(struct drm_device *dev,
return ERR_PTR(ret);
}

+struct mtk_gem_obj *mtk_gem_create_from_heap(struct drm_device *dev,
+ const char *heap, size_t size)
+{
+ struct mtk_drm_private *priv = dev->dev_private;
+ struct mtk_gem_obj *mtk_gem;
+ struct drm_gem_object *obj;
+ struct dma_heap *dma_heap;
+ struct dma_buf *dma_buf;
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+ struct iosys_map map = {};
+ int ret;
+
+ mtk_gem = mtk_gem_init(dev, size);
+ if (IS_ERR(mtk_gem))
+ return ERR_CAST(mtk_gem);
+
+ obj = &mtk_gem->base;
+
+ dma_heap = dma_heap_find(heap);
+ if (!dma_heap) {
+ DRM_ERROR("heap find fail\n");
+ goto err_gem_free;
+ }
+ dma_buf = dma_heap_buffer_alloc(dma_heap, size,
+ O_RDWR | O_CLOEXEC, DMA_HEAP_VALID_HEAP_FLAGS);
+ if (IS_ERR(dma_buf)) {
+ DRM_ERROR("buffer alloc fail\n");
+ dma_heap_put(dma_heap);
+ goto err_gem_free;
+ }
+ dma_heap_put(dma_heap);
+
+ attach = dma_buf_attach(dma_buf, priv->dma_dev);
+ if (IS_ERR(attach)) {
+ DRM_ERROR("attach fail, return\n");
+ dma_buf_put(dma_buf);
+ goto err_gem_free;
+ }
+
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt)) {
+ DRM_ERROR("map failed, detach and return\n");
+ dma_buf_detach(dma_buf, attach);
+ dma_buf_put(dma_buf);
+ goto err_gem_free;
+ }
+ obj->import_attach = attach;
+ mtk_gem->dma_addr = sg_dma_address(sgt->sgl);
+ mtk_gem->sg = sgt;
+ mtk_gem->size = dma_buf->size;
+
+ if (!strcmp(heap, "mtk_svp") || !strcmp(heap, "mtk_svp_cma")) {
+ /* secure buffer can not be mapped */
+ mtk_gem->secure = true;
+ } else {
+ ret = dma_buf_vmap(dma_buf, &map);
+ mtk_gem->kvaddr = map.vaddr;
+ if (ret) {
+ DRM_ERROR("map failed, ret=%d\n", ret);
+ dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+ dma_buf_detach(dma_buf, attach);
+ dma_buf_put(dma_buf);
+ mtk_gem->kvaddr = NULL;
+ }
+ }
+
+ return mtk_gem;
+
+err_gem_free:
+ drm_gem_object_release(obj);
+ kfree(mtk_gem);
+ return ERR_PTR(-ENOMEM);
+}
+
void mtk_gem_free_object(struct drm_gem_object *obj)
{
struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
@@ -229,7 +306,9 @@ struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
if (IS_ERR(mtk_gem))
return ERR_CAST(mtk_gem);

+ mtk_gem->secure = !sg_page(sg->sgl);
mtk_gem->dma_addr = sg_dma_address(sg->sgl);
+ mtk_gem->size = attach->dmabuf->size;
mtk_gem->sg = sg;

return &mtk_gem->base;
@@ -304,7 +383,11 @@ int mtk_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_mtk_gem_create *args = data;
int ret;

- mtk_gem = mtk_gem_create(dev, args->size, false);
+ if (args->flags & DRM_MTK_GEM_CREATE_ENCRYPTED)
+ mtk_gem = mtk_gem_create_from_heap(dev, "mtk_svp_cma", args->size);
+ else
+ mtk_gem = mtk_gem_create(dev, args->size, false);
+
if (IS_ERR(mtk_gem))
return PTR_ERR(mtk_gem);

diff --git a/drivers/gpu/drm/mediatek/mtk_gem.h b/drivers/gpu/drm/mediatek/mtk_gem.h
index 4d7598220ca8f..75cf50495abe0 100644
--- a/drivers/gpu/drm/mediatek/mtk_gem.h
+++ b/drivers/gpu/drm/mediatek/mtk_gem.h
@@ -27,9 +27,11 @@ struct mtk_gem_obj {
void *cookie;
void *kvaddr;
dma_addr_t dma_addr;
+ size_t size;
unsigned long dma_attrs;
struct sg_table *sg;
struct page **pages;
+ bool secure;
};

#define to_mtk_gem_obj(x) container_of(x, struct mtk_gem_obj, base)
@@ -37,6 +39,8 @@ struct mtk_gem_obj {
void mtk_gem_free_object(struct drm_gem_object *gem);
struct mtk_gem_obj *mtk_gem_create(struct drm_device *dev, size_t size,
bool alloc_kmap);
+struct mtk_gem_obj *mtk_gem_create_from_heap(struct drm_device *dev,
+ const char *heap, size_t size);
int mtk_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
struct drm_mode_create_dumb *args);
struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj);
--
2.18.0



2024-04-15 09:37:48

by Maxime Ripard

[permalink] [raw]
Subject: Re: [PATCH v5 2/9] drm/mediatek: Add secure buffer control flow to mtk_drm_gem

On Wed, Apr 03, 2024 at 06:26:54PM +0800, Shawn Sung wrote:
> From: "Jason-JH.Lin" <[email protected]>
>
> Add secure buffer control flow to mtk_drm_gem.
>
> When user space takes DRM_MTK_GEM_CREATE_ENCRYPTED flag and size
> to create a mtk_drm_gem object, mtk_drm_gem will find a matched size
> dma buffer from secure dma-heap and bind it to mtk_drm_gem object.
>
> Signed-off-by: Jason-JH.Lin <[email protected]>
> Signed-off-by: Hsiao Chien Sung <[email protected]>
> ---
> drivers/gpu/drm/mediatek/mtk_gem.c | 85 +++++++++++++++++++++++++++++-
> drivers/gpu/drm/mediatek/mtk_gem.h | 4 ++
> 2 files changed, 88 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/mediatek/mtk_gem.c b/drivers/gpu/drm/mediatek/mtk_gem.c
> index e59e0727717b7..ec34d02c14377 100644
> --- a/drivers/gpu/drm/mediatek/mtk_gem.c
> +++ b/drivers/gpu/drm/mediatek/mtk_gem.c
> @@ -4,6 +4,8 @@
> */
>
> #include <linux/dma-buf.h>
> +#include <linux/dma-heap.h>
> +#include <uapi/linux/dma-heap.h>
> #include <drm/mediatek_drm.h>
>
> #include <drm/drm.h>
> @@ -102,6 +104,81 @@ struct mtk_gem_obj *mtk_gem_create(struct drm_device *dev,
> return ERR_PTR(ret);
> }
>
> +struct mtk_gem_obj *mtk_gem_create_from_heap(struct drm_device *dev,
> + const char *heap, size_t size)
> +{
> + struct mtk_drm_private *priv = dev->dev_private;
> + struct mtk_gem_obj *mtk_gem;
> + struct drm_gem_object *obj;
> + struct dma_heap *dma_heap;
> + struct dma_buf *dma_buf;
> + struct dma_buf_attachment *attach;
> + struct sg_table *sgt;
> + struct iosys_map map = {};
> + int ret;
> +
> + mtk_gem = mtk_gem_init(dev, size);
> + if (IS_ERR(mtk_gem))
> + return ERR_CAST(mtk_gem);
> +
> + obj = &mtk_gem->base;
> +
> + dma_heap = dma_heap_find(heap);
> + if (!dma_heap) {
> + DRM_ERROR("heap find fail\n");
> + goto err_gem_free;
> + }
> + dma_buf = dma_heap_buffer_alloc(dma_heap, size,
> + O_RDWR | O_CLOEXEC, DMA_HEAP_VALID_HEAP_FLAGS);
> + if (IS_ERR(dma_buf)) {
> + DRM_ERROR("buffer alloc fail\n");
> + dma_heap_put(dma_heap);
> + goto err_gem_free;
> + }
> + dma_heap_put(dma_heap);
> +
> + attach = dma_buf_attach(dma_buf, priv->dma_dev);
> + if (IS_ERR(attach)) {
> + DRM_ERROR("attach fail, return\n");
> + dma_buf_put(dma_buf);
> + goto err_gem_free;
> + }
> +
> + sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
> + if (IS_ERR(sgt)) {
> + DRM_ERROR("map failed, detach and return\n");
> + dma_buf_detach(dma_buf, attach);
> + dma_buf_put(dma_buf);
> + goto err_gem_free;
> + }
> + obj->import_attach = attach;
> + mtk_gem->dma_addr = sg_dma_address(sgt->sgl);
> + mtk_gem->sg = sgt;
> + mtk_gem->size = dma_buf->size;
> +
> + if (!strcmp(heap, "mtk_svp") || !strcmp(heap, "mtk_svp_cma")) {
> + /* secure buffer can not be mapped */
> + mtk_gem->secure = true;
> + } else {
> + ret = dma_buf_vmap(dma_buf, &map);
> + mtk_gem->kvaddr = map.vaddr;
> + if (ret) {
> + DRM_ERROR("map failed, ret=%d\n", ret);
> + dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
> + dma_buf_detach(dma_buf, attach);
> + dma_buf_put(dma_buf);
> + mtk_gem->kvaddr = NULL;
> + }
> + }
> +
> + return mtk_gem;
> +
> +err_gem_free:
> + drm_gem_object_release(obj);
> + kfree(mtk_gem);
> + return ERR_PTR(-ENOMEM);
> +}
> +
> void mtk_gem_free_object(struct drm_gem_object *obj)
> {
> struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
> @@ -229,7 +306,9 @@ struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
> if (IS_ERR(mtk_gem))
> return ERR_CAST(mtk_gem);
>
> + mtk_gem->secure = !sg_page(sg->sgl);
> mtk_gem->dma_addr = sg_dma_address(sg->sgl);
> + mtk_gem->size = attach->dmabuf->size;
> mtk_gem->sg = sg;
>
> return &mtk_gem->base;
> @@ -304,7 +383,11 @@ int mtk_gem_create_ioctl(struct drm_device *dev, void *data,
> struct drm_mtk_gem_create *args = data;
> int ret;
>
> - mtk_gem = mtk_gem_create(dev, args->size, false);
> + if (args->flags & DRM_MTK_GEM_CREATE_ENCRYPTED)
> + mtk_gem = mtk_gem_create_from_heap(dev, "mtk_svp_cma", args->size);

That heap doesn't exist upstream either. Also, I'm wondering if it's the
right solution there.

From what I can tell, you want to allow to create encrypted buffers from
the TEE. Why do we need this as a DRM ioctl at all? A heap seems like
the perfect solution to do so, and then you just have to import it into
DRM.

I'm also not entirely sure that not having a SG list is enough to
consider the buffer secure. Wouldn't a buffer allocated without a kernel
mapping also be in that situation?

Maxime


Attachments:
(No filename) (4.59 kB)
signature.asc (281.00 B)
Download all attachments