2023-11-22 13:48:31

by Pratyush Brahma

[permalink] [raw]
Subject: [PATCH 0/2] Add qcom secure heaps

From: Vijayanand Jitta <[email protected]>

This patch series is based on mtk patch series [1] of
adding secure heap support.

This patch series adds support for qcom secure heaps which
include secure cma heap and secure system heap, this does
use qcom_scm_assign_mem to secure the memory.

Video would be the user of these heaps and the corresponding
VMIDs are also as added as part of these.


[1] https://lore.kernel.org/linux-arm-kernel/[email protected]/T/

Vijayanand Jitta (2):
dma-buf: heaps: secure_heap: Add secure ops for CMA heap
dma-buf: heaps: secure_heap: Add qcom secure system heap

drivers/dma-buf/heaps/secure_heap.c | 207 +++++++++++++++++++++++++++-
1 file changed, 204 insertions(+), 3 deletions(-)

--
2.34.1


2023-11-22 13:48:38

by Pratyush Brahma

[permalink] [raw]
Subject: [PATCH 1/2] dma-buf: heaps: secure_heap: Add secure ops for CMA heap

From: Vijayanand Jitta <[email protected]>

Add secure ops for CMA heap which would use qcom_scm_assign_mem
to assign the memory to VMID.

Change-Id: I05aff9cb9b7b9668c4352a24bec163b52e38835a
Signed-off-by: Vijayanand Jitta <[email protected]>
---
drivers/dma-buf/heaps/secure_heap.c | 50 +++++++++++++++++++++++++++--
1 file changed, 47 insertions(+), 3 deletions(-)

diff --git a/drivers/dma-buf/heaps/secure_heap.c b/drivers/dma-buf/heaps/secure_heap.c
index 8989ad5d03e9..04e2ee000e19 100644
--- a/drivers/dma-buf/heaps/secure_heap.c
+++ b/drivers/dma-buf/heaps/secure_heap.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/tee_drv.h>
#include <linux/uuid.h>
+#include <linux/firmware/qcom/qcom_scm.h>

#define TZ_TA_MEM_UUID_MTK "4477588a-8476-11e2-ad15-e41f1390d676"

@@ -83,6 +84,12 @@ struct secure_heap_prv_data {
*/
const int tee_command_id_base;

+ /*
+ * VMID and permissions which will be used by QCOM secure system heap to assign mem.
+ */
+ int vmid;
+ int perm;
+
int (*heap_init)(struct secure_heap *sec_heap);
int (*memory_alloc)(struct secure_heap *sec_heap, struct secure_buffer *sec_buf);
void (*memory_free)(struct secure_heap *sec_heap, struct secure_buffer *sec_buf);
@@ -311,10 +318,47 @@ static void cma_secure_memory_free(struct secure_heap *sec_heap,
cma_release(sec_heap->cma, sec_buf->cma_page, sec_buf->size >> PAGE_SHIFT);
}

-const struct secure_heap_prv_data cma_sec_mem_data = {
+static int secure_heap_qcom_secure_memory(struct secure_heap *sec_heap,
+ struct secure_buffer *sec_buf)
+{
+ struct qcom_scm_vmperm next[1];
+ u64 src_perms = BIT(QCOM_SCM_VMID_HLOS);
+ const struct secure_heap_prv_data *data = sec_heap->data;
+ int ret = 0;
+
+ next[0].vmid = data->vmid;
+ next[0].perm = data->perm;
+
+
+ ret = qcom_scm_assign_mem(page_to_phys(sec_buf->cma_page),
+ sec_buf->size, &src_perms,
+ next, 1);
+
+ return ret;
+}
+
+static void secure_heap_qcom_unsecure_memory(struct secure_heap *sec_heap,
+ struct secure_buffer *sec_buf)
+{
+ struct qcom_scm_vmperm next[1];
+ const struct secure_heap_prv_data *data = sec_heap->data;
+ u64 src_perms = BIT(data->vmid);
+
+ next[0].vmid = QCOM_SCM_VMID_HLOS;
+ next[0].perm = QCOM_SCM_PERM_RWX;
+
+ qcom_scm_assign_mem(page_to_phys(sec_buf->cma_page),
+ sec_buf->size, &src_perms,
+ next, 1);
+}
+
+const struct secure_heap_prv_data qcom_cma_sec_mem_data = {
+ .vmid = QCOM_SCM_VMID_CP_BITSTREAM,
+ .perm = QCOM_SCM_PERM_RW,
.memory_alloc = cma_secure_memory_allocate,
.memory_free = cma_secure_memory_free,
- /* TODO : secure the buffer. */
+ .secure_the_memory = secure_heap_qcom_secure_memory,
+ .unsecure_the_memory = secure_heap_qcom_unsecure_memory,
};

static int secure_heap_secure_memory_allocate(struct secure_heap *sec_heap,
@@ -529,7 +573,7 @@ static struct secure_heap secure_heaps[] = {
{
.name = "secure_cma",
.mem_type = SECURE_MEMORY_TYPE_CMA,
- .data = &cma_sec_mem_data,
+ .data = &qcom_cma_sec_mem_data,
},
{
.name = "secure_mtk_cm",
--
2.34.1

2023-11-22 13:48:43

by Pratyush Brahma

[permalink] [raw]
Subject: [PATCH 2/2] dma-buf: heaps: secure_heap: Add qcom secure system heap

From: Vijayanand Jitta <[email protected]>

Add secure system for Pixel and Non pixel video usecases, this
allocates from system heap and secures using qcom_scm_aasign_mem.

Change-Id: If0702f85bff651843c6a5c83694043364229e66b
Signed-off-by: Vijayanand Jitta <[email protected]>
---
drivers/dma-buf/heaps/secure_heap.c | 163 +++++++++++++++++++++++++++-
1 file changed, 160 insertions(+), 3 deletions(-)

diff --git a/drivers/dma-buf/heaps/secure_heap.c b/drivers/dma-buf/heaps/secure_heap.c
index 04e2ee000e19..cdcf4b3f5333 100644
--- a/drivers/dma-buf/heaps/secure_heap.c
+++ b/drivers/dma-buf/heaps/secure_heap.c
@@ -58,6 +58,11 @@ enum secure_memory_type {
* protect it, then the detail memory management also is inside the TEE.
*/
SECURE_MEMORY_TYPE_MTK_CM_CMA = 2,
+ /*
+ * QCOM secure system heap, use system heap to alloc/free.
+ * and use qcom_scm_assign_mem to secure the memory.
+ */
+ SECURE_MEMORY_TYPE_QCOM_SYSTEM = 3,
};

struct secure_buffer {
@@ -69,6 +74,7 @@ struct secure_buffer {
*/
u32 sec_handle;
struct page *cma_page;
+ struct sg_table sg_table;
};

#define TEE_MEM_COMMAND_ID_BASE_MTK 0x10000
@@ -329,11 +335,26 @@ static int secure_heap_qcom_secure_memory(struct secure_heap *sec_heap,
next[0].vmid = data->vmid;
next[0].perm = data->perm;

-
- ret = qcom_scm_assign_mem(page_to_phys(sec_buf->cma_page),
+ if (sec_heap->mem_type == SECURE_MEMORY_TYPE_CMA) {
+ ret = qcom_scm_assign_mem(page_to_phys(sec_buf->cma_page),
sec_buf->size, &src_perms,
next, 1);
+ } else if (sec_heap->mem_type == SECURE_MEMORY_TYPE_QCOM_SYSTEM) {
+ struct sg_table *table;
+ struct scatterlist *sg;
+ int i = 0;
+
+ table = &sec_buf->sg_table;
+ for_each_sgtable_sg(table, sg, i) {
+ struct page *page = sg_page(sg);

+ ret = qcom_scm_assign_mem(page_to_phys(page),
+ page_size(page), &src_perms,
+ next, 1);
+ if (ret)
+ break;
+ }
+ }
return ret;
}

@@ -347,9 +368,24 @@ static void secure_heap_qcom_unsecure_memory(struct secure_heap *sec_heap,
next[0].vmid = QCOM_SCM_VMID_HLOS;
next[0].perm = QCOM_SCM_PERM_RWX;

- qcom_scm_assign_mem(page_to_phys(sec_buf->cma_page),
+ if (sec_heap->mem_type == SECURE_MEMORY_TYPE_CMA) {
+ qcom_scm_assign_mem(page_to_phys(sec_buf->cma_page),
sec_buf->size, &src_perms,
next, 1);
+ } else if (sec_heap->mem_type == SECURE_MEMORY_TYPE_QCOM_SYSTEM) {
+ struct sg_table *table;
+ struct scatterlist *sg;
+ int i = 0;
+
+ table = &sec_buf->sg_table;
+ for_each_sgtable_sg(table, sg, i) {
+ struct page *page = sg_page(sg);
+
+ qcom_scm_assign_mem(page_to_phys(page),
+ page_size(page), &src_perms,
+ next, 1);
+ }
+ }
}

const struct secure_heap_prv_data qcom_cma_sec_mem_data = {
@@ -361,6 +397,117 @@ const struct secure_heap_prv_data qcom_cma_sec_mem_data = {
.unsecure_the_memory = secure_heap_qcom_unsecure_memory,
};

+/* Using system heap allocator */
+#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO)
+#define HIGH_ORDER_GFP (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \
+ | __GFP_NORETRY) & ~__GFP_RECLAIM) \
+ | __GFP_COMP)
+static gfp_t order_flags[] = {HIGH_ORDER_GFP, HIGH_ORDER_GFP, LOW_ORDER_GFP};
+static const unsigned int orders[] = {8, 4, 0};
+#define NUM_ORDERS ARRAY_SIZE(orders)
+
+static struct page *alloc_largest_available(unsigned long size,
+ unsigned int max_order)
+{
+ struct page *page;
+ int i;
+
+ for (i = 0; i < NUM_ORDERS; i++) {
+ if (size < (PAGE_SIZE << orders[i]))
+ continue;
+ if (max_order < orders[i])
+ continue;
+
+ page = alloc_pages(order_flags[i], orders[i]);
+ if (!page)
+ continue;
+ return page;
+ }
+ return NULL;
+}
+
+static int qcom_system_secure_memory_allocate(struct secure_heap *sec_heap,
+ struct secure_buffer *sec_buf)
+{
+ unsigned long size_remaining = sec_buf->size;
+ unsigned int max_order = orders[0];
+ struct sg_table *table;
+ struct scatterlist *sg;
+ struct list_head pages;
+ struct page *page, *tmp_page;
+ int i = 0, ret = -ENOMEM;
+
+ INIT_LIST_HEAD(&pages);
+ while (size_remaining > 0) {
+ /*
+ * Avoid trying to allocate memory if the process
+ * has been killed by SIGKILL
+ */
+ if (fatal_signal_pending(current)) {
+ return -EINTR;
+ }
+
+ page = alloc_largest_available(size_remaining, max_order);
+ if (!page)
+ goto free;
+
+ list_add_tail(&page->lru, &pages);
+ size_remaining -= page_size(page);
+ max_order = compound_order(page);
+ i++;
+ }
+ table = &sec_buf->sg_table;
+ if (sg_alloc_table(table, i, GFP_KERNEL))
+ goto free;
+
+ sg = table->sgl;
+ list_for_each_entry_safe(page, tmp_page, &pages, lru) {
+ sg_set_page(sg, page, page_size(page), 0);
+ sg = sg_next(sg);
+ list_del(&page->lru);
+ }
+ return 0;
+free:
+ list_for_each_entry_safe(page, tmp_page, &pages, lru)
+ __free_pages(page, compound_order(page));
+
+ return ret;
+}
+
+static void qcom_system_secure_memory_free(struct secure_heap *sec_heap,
+ struct secure_buffer *sec_buf)
+{
+ struct sg_table *table;
+ struct scatterlist *sg;
+ int i;
+
+ table = &sec_buf->sg_table;
+ for_each_sgtable_sg(table, sg, i) {
+ struct page *page = sg_page(sg);
+
+ __free_pages(page, compound_order(page));
+ }
+ sg_free_table(table);
+}
+
+const struct secure_heap_prv_data qcom_system_pixel_sec_mem_data = {
+ .vmid = QCOM_SCM_VMID_CP_PIXEL,
+ .perm = QCOM_SCM_PERM_RW,
+ .memory_alloc = qcom_system_secure_memory_allocate,
+ .memory_free = qcom_system_secure_memory_free,
+ .secure_the_memory = secure_heap_qcom_secure_memory,
+ .unsecure_the_memory = secure_heap_qcom_unsecure_memory,
+};
+
+const struct secure_heap_prv_data qcom_system_non_pixel_sec_mem_data = {
+ .vmid = QCOM_SCM_VMID_CP_NON_PIXEL,
+ .perm = QCOM_SCM_PERM_RW,
+ .memory_alloc = qcom_system_secure_memory_allocate,
+ .memory_free = qcom_system_secure_memory_free,
+ .secure_the_memory = secure_heap_qcom_secure_memory,
+ .unsecure_the_memory = secure_heap_qcom_unsecure_memory,
+};
+
static int secure_heap_secure_memory_allocate(struct secure_heap *sec_heap,
struct secure_buffer *sec_buf)
{
@@ -585,6 +732,16 @@ static struct secure_heap secure_heaps[] = {
.mem_type = SECURE_MEMORY_TYPE_MTK_CM_CMA,
.data = &mtk_sec_mem_data_cma,
},
+ {
+ .name = "secure_system_pixel",
+ .mem_type = SECURE_MEMORY_TYPE_QCOM_SYSTEM,
+ .data = &qcom_system_pixel_sec_mem_data,
+ },
+ {
+ .name = "secure_system_non_pixel",
+ .mem_type = SECURE_MEMORY_TYPE_QCOM_SYSTEM,
+ .data = &qcom_system_non_pixel_sec_mem_data,
+ },
};

static int __init secure_cma_init(struct reserved_mem *rmem)
--
2.34.1

2023-11-24 07:44:28

by Pavan Kondeti

[permalink] [raw]
Subject: Re: [PATCH 1/2] dma-buf: heaps: secure_heap: Add secure ops for CMA heap

Hi Pratyush,

On Wed, Nov 22, 2023 at 07:17:46PM +0530, Pratyush Brahma wrote:
> From: Vijayanand Jitta <[email protected]>
>
> Add secure ops for CMA heap which would use qcom_scm_assign_mem
> to assign the memory to VMID.
>
> Change-Id: I05aff9cb9b7b9668c4352a24bec163b52e38835a
> Signed-off-by: Vijayanand Jitta <[email protected]>
> ---
> drivers/dma-buf/heaps/secure_heap.c | 50 +++++++++++++++++++++++++++--
> 1 file changed, 47 insertions(+), 3 deletions(-)
>

Please add your Signed-off-by line when sending patches. This is a
requirement for accpeting patches in upstream. This applies even for the
patches not authored by you. Given that you are sending the patches,
your Signed-off-by line should be added after Vijay's. For more details
https://docs.kernel.org/process/submitting-patches.html

Thanks,
Pavan

2024-02-20 18:27:57

by Elliot Berman

[permalink] [raw]
Subject: Re: [PATCH 2/2] dma-buf: heaps: secure_heap: Add qcom secure system heap



On 11/22/2023 5:47 AM, Pratyush Brahma wrote:
> From: Vijayanand Jitta <[email protected]>
>
> Add secure system for Pixel and Non pixel video usecases, this
> allocates from system heap and secures using qcom_scm_aasign_mem.
^^^^^^
typo
>
> Change-Id: If0702f85bff651843c6a5c83694043364229e66b
> Signed-off-by: Vijayanand Jitta <[email protected]>

Please get these patches reviewed internally before sending to mailing
list for basic checks. You can review go/upstream when within Qualcomm corp network.

Pavan mentioned S-o-B is incorrect. Commit text should also not have Change-Id.

Please be sure to send to linux-arm-msm mailing list as well since this affects
Qualcomm chipsets

> ---
> drivers/dma-buf/heaps/secure_heap.c | 163 +++++++++++++++++++++++++++-
> 1 file changed, 160 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/dma-buf/heaps/secure_heap.c b/drivers/dma-buf/heaps/secure_heap.c
> index 04e2ee000e19..cdcf4b3f5333 100644
> --- a/drivers/dma-buf/heaps/secure_heap.c
> +++ b/drivers/dma-buf/heaps/secure_heap.c
> @@ -58,6 +58,11 @@ enum secure_memory_type {
> * protect it, then the detail memory management also is inside the TEE.
> */
> SECURE_MEMORY_TYPE_MTK_CM_CMA = 2,
> + /*
> + * QCOM secure system heap, use system heap to alloc/free.
> + * and use qcom_scm_assign_mem to secure the memory.
> + */
> + SECURE_MEMORY_TYPE_QCOM_SYSTEM = 3,
> };
>
> struct secure_buffer {
> @@ -69,6 +74,7 @@ struct secure_buffer {
> */
> u32 sec_handle;
> struct page *cma_page;
> + struct sg_table sg_table;
> };
>
> #define TEE_MEM_COMMAND_ID_BASE_MTK 0x10000
> @@ -329,11 +335,26 @@ static int secure_heap_qcom_secure_memory(struct secure_heap *sec_heap,
> next[0].vmid = data->vmid;
> next[0].perm = data->perm;
>
> -
> - ret = qcom_scm_assign_mem(page_to_phys(sec_buf->cma_page),
> + if (sec_heap->mem_type == SECURE_MEMORY_TYPE_CMA) {
> + ret = qcom_scm_assign_mem(page_to_phys(sec_buf->cma_page),
> sec_buf->size, &src_perms,
> next, 1);
> + } else if (sec_heap->mem_type == SECURE_MEMORY_TYPE_QCOM_SYSTEM) {
> + struct sg_table *table;
> + struct scatterlist *sg;
> + int i = 0;
> +
> + table = &sec_buf->sg_table;
> + for_each_sgtable_sg(table, sg, i) {
> + struct page *page = sg_page(sg);
>
> + ret = qcom_scm_assign_mem(page_to_phys(page),
> + page_size(page), &src_perms,
> + next, 1);
> + if (ret)
> + break;
> + }
> + }
> return ret;
> }
>
> @@ -347,9 +368,24 @@ static void secure_heap_qcom_unsecure_memory(struct secure_heap *sec_heap,
> next[0].vmid = QCOM_SCM_VMID_HLOS;
> next[0].perm = QCOM_SCM_PERM_RWX;
>
> - qcom_scm_assign_mem(page_to_phys(sec_buf->cma_page),
> + if (sec_heap->mem_type == SECURE_MEMORY_TYPE_CMA) {
> + qcom_scm_assign_mem(page_to_phys(sec_buf->cma_page),
> sec_buf->size, &src_perms,
> next, 1);
> + } else if (sec_heap->mem_type == SECURE_MEMORY_TYPE_QCOM_SYSTEM) {
> + struct sg_table *table;
> + struct scatterlist *sg;
> + int i = 0;
> +
> + table = &sec_buf->sg_table;
> + for_each_sgtable_sg(table, sg, i) {
> + struct page *page = sg_page(sg);
> +
> + qcom_scm_assign_mem(page_to_phys(page),
> + page_size(page), &src_perms,
> + next, 1);
> + }
> + }
> }
>
> const struct secure_heap_prv_data qcom_cma_sec_mem_data = {
> @@ -361,6 +397,117 @@ const struct secure_heap_prv_data qcom_cma_sec_mem_data = {
> .unsecure_the_memory = secure_heap_qcom_unsecure_memory,
> };
>
> +/* Using system heap allocator */
> +#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO)
> +#define HIGH_ORDER_GFP (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \
> + | __GFP_NORETRY) & ~__GFP_RECLAIM) \
> + | __GFP_COMP)
> +static gfp_t order_flags[] = {HIGH_ORDER_GFP, HIGH_ORDER_GFP, LOW_ORDER_GFP};
> +static const unsigned int orders[] = {8, 4, 0};
> +#define NUM_ORDERS ARRAY_SIZE(orders)
> +
> +static struct page *alloc_largest_available(unsigned long size,
> + unsigned int max_order)
> +{
> + struct page *page;
> + int i;
> +
> + for (i = 0; i < NUM_ORDERS; i++) {
> + if (size < (PAGE_SIZE << orders[i]))
> + continue;
> + if (max_order < orders[i])
> + continue;
> +
> + page = alloc_pages(order_flags[i], orders[i]);
> + if (!page)
> + continue;
> + return page;
> + }
> + return NULL;
> +}
> +
> +static int qcom_system_secure_memory_allocate(struct secure_heap *sec_heap,
> + struct secure_buffer *sec_buf)
> +{
> + unsigned long size_remaining = sec_buf->size;
> + unsigned int max_order = orders[0];
> + struct sg_table *table;
> + struct scatterlist *sg;
> + struct list_head pages;
> + struct page *page, *tmp_page;
> + int i = 0, ret = -ENOMEM;
> +
> + INIT_LIST_HEAD(&pages);
> + while (size_remaining > 0) {
> + /*
> + * Avoid trying to allocate memory if the process
> + * has been killed by SIGKILL
> + */
> + if (fatal_signal_pending(current)) {
> + return -EINTR;
> + }
> +
> + page = alloc_largest_available(size_remaining, max_order);
> + if (!page)
> + goto free;
> +
> + list_add_tail(&page->lru, &pages);
> + size_remaining -= page_size(page);
> + max_order = compound_order(page);
> + i++;
> + }
> + table = &sec_buf->sg_table;
> + if (sg_alloc_table(table, i, GFP_KERNEL))
> + goto free;
> +
> + sg = table->sgl;
> + list_for_each_entry_safe(page, tmp_page, &pages, lru) {
> + sg_set_page(sg, page, page_size(page), 0);
> + sg = sg_next(sg);
> + list_del(&page->lru);
> + }
> + return 0;
> +free:
> + list_for_each_entry_safe(page, tmp_page, &pages, lru)
> + __free_pages(page, compound_order(page));
> +
> + return ret;
> +}
> +
> +static void qcom_system_secure_memory_free(struct secure_heap *sec_heap,
> + struct secure_buffer *sec_buf)
> +{
> + struct sg_table *table;
> + struct scatterlist *sg;
> + int i;
> +
> + table = &sec_buf->sg_table;
> + for_each_sgtable_sg(table, sg, i) {
> + struct page *page = sg_page(sg);
> +
> + __free_pages(page, compound_order(page));
> + }
> + sg_free_table(table);
> +}
> +
> +const struct secure_heap_prv_data qcom_system_pixel_sec_mem_data = {
> + .vmid = QCOM_SCM_VMID_CP_PIXEL,
> + .perm = QCOM_SCM_PERM_RW,
> + .memory_alloc = qcom_system_secure_memory_allocate,
> + .memory_free = qcom_system_secure_memory_free,
> + .secure_the_memory = secure_heap_qcom_secure_memory,
> + .unsecure_the_memory = secure_heap_qcom_unsecure_memory,
> +};
> +
> +const struct secure_heap_prv_data qcom_system_non_pixel_sec_mem_data = {
> + .vmid = QCOM_SCM_VMID_CP_NON_PIXEL,
> + .perm = QCOM_SCM_PERM_RW,
> + .memory_alloc = qcom_system_secure_memory_allocate,
> + .memory_free = qcom_system_secure_memory_free,
> + .secure_the_memory = secure_heap_qcom_secure_memory,
> + .unsecure_the_memory = secure_heap_qcom_unsecure_memory,
> +};
> +
> static int secure_heap_secure_memory_allocate(struct secure_heap *sec_heap,
> struct secure_buffer *sec_buf)
> {
> @@ -585,6 +732,16 @@ static struct secure_heap secure_heaps[] = {
> .mem_type = SECURE_MEMORY_TYPE_MTK_CM_CMA,
> .data = &mtk_sec_mem_data_cma,
> },
> + {
> + .name = "secure_system_pixel",
> + .mem_type = SECURE_MEMORY_TYPE_QCOM_SYSTEM,
> + .data = &qcom_system_pixel_sec_mem_data,
> + },
> + {
> + .name = "secure_system_non_pixel",
> + .mem_type = SECURE_MEMORY_TYPE_QCOM_SYSTEM,
> + .data = &qcom_system_non_pixel_sec_mem_data,
> + },
> };
>
> static int __init secure_cma_init(struct reserved_mem *rmem)