Add changes to support multimode invocation ioctl request. This
ioctl call facilitates multiple types of requests from user including
CRC check, performance counters, shared context bank usage, etc.
This series also carries patch to save and restore interrupted
context.
Ekansh Gupta (5):
misc: fastrpc: Add fastrpc multimode invoke request support
misc: fastrpc: Add CRC support for remote buffers
misc: fastrpc: Capture kernel and DSP performance counters
misc: fastrpc: Add support to save and restore interrupted
misc: fastrpc: Add support to allocate shared context bank
drivers/misc/fastrpc.c | 492 +++++++++++++++++++++++++++++-------
include/uapi/misc/fastrpc.h | 52 ++++
2 files changed, 459 insertions(+), 85 deletions(-)
--
2.17.1
CRC check for input and output argument helps in ensuring data
consistency over a remote call. If user intends to enable CRC check,
first local user CRC is calculated at user end and a CRC buffer is
passed to DSP to capture remote CRC values. DSP is expected to
write to the remote CRC buffer which is then compared at user level
with the local CRC values.
Signed-off-by: Ekansh Gupta <[email protected]>
---
Changes in v7:
- Rebase the patch to latest kernel version
drivers/misc/fastrpc.c | 10 +++++++++-
1 file changed, 9 insertions(+), 1 deletion(-)
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 265e34f53c4e..55f126c779cb 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -611,6 +611,7 @@ static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
/* Released in fastrpc_context_put() */
fastrpc_channel_ctx_get(cctx);
+ ctx->crc = (u32 *)(uintptr_t)invoke->crc;
ctx->sc = sc;
ctx->retval = -1;
ctx->pid = current->pid;
@@ -1067,6 +1068,7 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
struct fastrpc_invoke_buf *list;
struct fastrpc_phy_page *pages;
u64 *fdlist;
+ u32 *crclist;
int i, inbufs, outbufs, handles;
inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
@@ -1074,7 +1076,8 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
handles = REMOTE_SCALARS_INHANDLES(ctx->sc) + REMOTE_SCALARS_OUTHANDLES(ctx->sc);
list = fastrpc_invoke_buf_start(rpra, ctx->nscalars);
pages = fastrpc_phy_page_start(list, ctx->nscalars);
- fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
+ fdlist = (u64 *)(pages + inbufs + outbufs + handles);
+ crclist = (u32 *)(fdlist + FASTRPC_MAX_FDLIST);
for (i = inbufs; i < ctx->nbufs; ++i) {
if (!ctx->maps[i]) {
@@ -1099,6 +1102,10 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
fastrpc_map_put(mmap);
}
+ if (ctx->crc && crclist && rpra) {
+ if (copy_to_user((void __user *)ctx->crc, crclist, FASTRPC_MAX_CRCLIST * sizeof(u32)))
+ return -EFAULT;
+ }
return 0;
}
@@ -1719,6 +1726,7 @@ static int fastrpc_multimode_invoke(struct fastrpc_user *fl, char __user *argp)
switch (invoke.req) {
case FASTRPC_INVOKE:
+ case FASTRPC_INVOKE_ENHANCED:
/* nscalars is truncated here to max supported value */
if (copy_from_user(&einv, (void __user *)(uintptr_t)invoke.invparam,
invoke.size))
--
2.17.1
For any remote call, driver sends a message to DSP using RPMSG
framework. After message is sent, there is a wait on a completion
object at driver which is completed when DSP response is received.
There is a possibility that a signal is received while waiting
causing the wait function to return -ERESTARTSYS. In this case
the context should be saved and it should get restored for the
next invocation for the thread.
Adding changes to support saving and restoring of interrupted
fastrpc contexts.
Signed-off-by: Ekansh Gupta <[email protected]>
---
Changes in v2:
- Fixed missing definition
- Fixes compile time issue
Changes in v5:
- Removed Change-Id tag
Changes in v7:
- Rebase the patch to latest kernel version
drivers/misc/fastrpc.c | 97 +++++++++++++++++++++++++++++++++++-------
1 file changed, 82 insertions(+), 15 deletions(-)
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index cbcac0b3d09b..aa0695f9576e 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -333,6 +333,7 @@ struct fastrpc_user {
struct list_head user;
struct list_head maps;
struct list_head pending;
+ struct list_head interrupted;
struct list_head mmaps;
struct fastrpc_channel_ctx *cctx;
@@ -712,6 +713,40 @@ static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
return ERR_PTR(ret);
}
+static struct fastrpc_invoke_ctx *fastrpc_context_restore_interrupted(
+ struct fastrpc_user *fl, struct fastrpc_invoke *inv)
+{
+ struct fastrpc_invoke_ctx *ctx = NULL, *ictx = NULL, *n;
+
+ spin_lock(&fl->lock);
+ list_for_each_entry_safe(ictx, n, &fl->interrupted, node) {
+ if (ictx->pid == current->pid) {
+ if (inv->sc != ictx->sc || ictx->fl != fl) {
+ dev_err(ictx->fl->sctx->dev,
+ "interrupted sc (0x%x) or fl (%pK) does not match with invoke sc (0x%x) or fl (%pK)\n",
+ ictx->sc, ictx->fl, inv->sc, fl);
+ spin_unlock(&fl->lock);
+ return ERR_PTR(-EINVAL);
+ }
+ ctx = ictx;
+ list_del(&ctx->node);
+ list_add_tail(&ctx->node, &fl->pending);
+ break;
+ }
+ }
+ spin_unlock(&fl->lock);
+ return ctx;
+}
+
+static void fastrpc_context_save_interrupted(
+ struct fastrpc_invoke_ctx *ctx)
+{
+ spin_lock(&ctx->fl->lock);
+ list_del(&ctx->node);
+ list_add_tail(&ctx->node, &ctx->fl->interrupted);
+ spin_unlock(&ctx->fl->lock);
+}
+
static struct sg_table *
fastrpc_map_dma_buf(struct dma_buf_attachment *attachment,
enum dma_data_direction dir)
@@ -1267,6 +1302,14 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
return -EPERM;
}
+ if (!kernel) {
+ ctx = fastrpc_context_restore_interrupted(fl, inv);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+ if (ctx)
+ goto wait;
+ }
+
ctx = fastrpc_context_alloc(fl, kernel, sc, invoke);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
@@ -1288,6 +1331,7 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
goto bail;
PERF_END);
+wait:
if (kernel) {
if (!wait_for_completion_timeout(&ctx->work, 10 * HZ))
err = -ETIMEDOUT;
@@ -1322,6 +1366,9 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
}
if (err == -ERESTARTSYS) {
+ if (ctx)
+ fastrpc_context_save_interrupted(ctx);
+
list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
list_del(&buf->node);
list_add_tail(&buf->node, &fl->cctx->invoke_interrupted_mmaps);
@@ -1443,7 +1490,7 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
ioctl.inv.handle = FASTRPC_INIT_HANDLE;
ioctl.inv.sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_STATIC, 3, 0);
- ioctl.inv.args = (__u64)args;
+ ioctl.inv.args = (u64)args;
err = fastrpc_internal_invoke(fl, true, &ioctl);
if (err)
@@ -1576,7 +1623,7 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
ioctl.inv.sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE, 4, 0);
if (init.attrs)
ioctl.inv.sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 4, 0);
- ioctl.inv.args = (__u64)args;
+ ioctl.inv.args = (u64)args;
err = fastrpc_internal_invoke(fl, true, &ioctl);
if (err)
@@ -1627,6 +1674,25 @@ static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx,
spin_unlock_irqrestore(&cctx->lock, flags);
}
+static void fastrpc_context_list_free(struct fastrpc_user *fl)
+{
+ struct fastrpc_invoke_ctx *ctx, *n;
+
+ list_for_each_entry_safe(ctx, n, &fl->interrupted, node) {
+ spin_lock(&fl->lock);
+ list_del(&ctx->node);
+ spin_unlock(&fl->lock);
+ fastrpc_context_put(ctx);
+ }
+
+ list_for_each_entry_safe(ctx, n, &fl->pending, node) {
+ spin_lock(&fl->lock);
+ list_del(&ctx->node);
+ spin_unlock(&fl->lock);
+ fastrpc_context_put(ctx);
+ }
+}
+
static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl)
{
struct fastrpc_invoke_args args[1];
@@ -1640,7 +1706,7 @@ static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl)
ioctl.inv.handle = FASTRPC_INIT_HANDLE;
ioctl.inv.sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_RELEASE, 1, 0);
- ioctl.inv.args = (__u64)args;
+ ioctl.inv.args = (u64)args;
return fastrpc_internal_invoke(fl, true, &ioctl);
}
@@ -1649,7 +1715,6 @@ static int fastrpc_device_release(struct inode *inode, struct file *file)
{
struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
struct fastrpc_channel_ctx *cctx = fl->cctx;
- struct fastrpc_invoke_ctx *ctx, *n;
struct fastrpc_map *map, *m;
struct fastrpc_buf *buf, *b;
unsigned long flags;
@@ -1663,10 +1728,7 @@ static int fastrpc_device_release(struct inode *inode, struct file *file)
if (fl->init_mem)
fastrpc_buf_free(fl->init_mem);
- list_for_each_entry_safe(ctx, n, &fl->pending, node) {
- list_del(&ctx->node);
- fastrpc_context_put(ctx);
- }
+ fastrpc_context_list_free(fl);
list_for_each_entry_safe(map, m, &fl->maps, node)
fastrpc_map_put(map);
@@ -1707,6 +1769,7 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
spin_lock_init(&fl->lock);
mutex_init(&fl->mutex);
INIT_LIST_HEAD(&fl->pending);
+ INIT_LIST_HEAD(&fl->interrupted);
INIT_LIST_HEAD(&fl->maps);
INIT_LIST_HEAD(&fl->mmaps);
INIT_LIST_HEAD(&fl->user);
@@ -1788,7 +1851,7 @@ static int fastrpc_init_attach(struct fastrpc_user *fl, int pd)
ioctl.inv.handle = FASTRPC_INIT_HANDLE;
ioctl.inv.sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH, 1, 0);
- ioctl.inv.args = (__u64)args;
+ ioctl.inv.args = (u64)args;
return fastrpc_internal_invoke(fl, true, &ioctl);
}
@@ -1819,7 +1882,7 @@ static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp)
}
ioctl.inv = inv;
- ioctl.inv.args = (__u64)args;
+ ioctl.inv.args = (u64)args;
err = fastrpc_internal_invoke(fl, false, &ioctl);
kfree(args);
@@ -1901,7 +1964,7 @@ static int fastrpc_get_info_from_dsp(struct fastrpc_user *fl, uint32_t *dsp_attr
ioctl.inv.handle = FASTRPC_DSP_UTILITIES_HANDLE;
ioctl.inv.sc = FASTRPC_SCALARS(0, 1, 1);
- ioctl.inv.args = (__u64)args;
+ ioctl.inv.args = (u64)args;
return fastrpc_internal_invoke(fl, true, &ioctl);
}
@@ -2004,7 +2067,7 @@ static int fastrpc_req_munmap_impl(struct fastrpc_user *fl, struct fastrpc_buf *
ioctl.inv.handle = FASTRPC_INIT_HANDLE;
ioctl.inv.sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MUNMAP, 1, 0);
- ioctl.inv.args = (__u64)args;
+ ioctl.inv.args = (u64)args;
err = fastrpc_internal_invoke(fl, true, &ioctl);
if (!err) {
@@ -2102,7 +2165,7 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
ioctl.inv.handle = FASTRPC_INIT_HANDLE;
ioctl.inv.sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MMAP, 2, 1);
- ioctl.inv.args = (__u64)args;
+ ioctl.inv.args = (u64)args;
err = fastrpc_internal_invoke(fl, true, &ioctl);
if (err) {
@@ -2183,7 +2246,7 @@ static int fastrpc_req_mem_unmap_impl(struct fastrpc_user *fl, struct fastrpc_me
ioctl.inv.handle = FASTRPC_INIT_HANDLE;
ioctl.inv.sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MEM_UNMAP, 1, 0);
- ioctl.inv.args = (__u64)args;
+ ioctl.inv.args = (u64)args;
err = fastrpc_internal_invoke(fl, true, &ioctl);
if (err) {
@@ -2254,7 +2317,7 @@ static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp)
ioctl.inv.handle = FASTRPC_INIT_HANDLE;
ioctl.inv.sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MEM_MAP, 3, 1);
- ioctl.inv.args = (__u64)args;
+ ioctl.inv.args = (u64)args;
err = fastrpc_internal_invoke(fl, true, &ioctl);
if (err) {
@@ -2575,6 +2638,10 @@ static void fastrpc_notify_users(struct fastrpc_user *user)
ctx->retval = -EPIPE;
complete(&ctx->work);
}
+ list_for_each_entry(ctx, &user->interrupted, node) {
+ ctx->retval = -EPIPE;
+ complete(&ctx->work);
+ }
spin_unlock(&user->lock);
}
--
2.17.1
Context banks could be set as a shared one using a DT propery
"qcom,nsessions". The property takes the number of session to
be created of the context bank. This change provides a control
mechanism for user to use shared context banks for light weight
processes. The session is set as shared while its creation and if
a user requests for shared context bank, the same will be allocated
during process initialization.
Signed-off-by: Ekansh Gupta <[email protected]>
---
Changes in v7:
- Rebase the patch to latest kernel version
drivers/misc/fastrpc.c | 122 ++++++++++++++++++++++++------------
include/uapi/misc/fastrpc.h | 12 ++++
2 files changed, 95 insertions(+), 39 deletions(-)
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index aa0695f9576e..8e77beb3a693 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -297,6 +297,7 @@ struct fastrpc_session_ctx {
int sid;
bool used;
bool valid;
+ bool sharedcb;
};
struct fastrpc_channel_ctx {
@@ -344,12 +345,22 @@ struct fastrpc_user {
int tgid;
int pd;
bool is_secure_dev;
+ bool sharedcb;
/* Lock for lists */
spinlock_t lock;
/* lock for allocations */
struct mutex mutex;
};
+struct fastrpc_ctrl_smmu {
+ u32 sharedcb; /* Set to SMMU share context bank */
+};
+
+struct fastrpc_internal_control {
+ u32 req;
+ struct fastrpc_ctrl_smmu smmu;
+};
+
static inline int64_t getnstimediff(struct timespec64 *start)
{
int64_t ns;
@@ -851,6 +862,37 @@ static const struct dma_buf_ops fastrpc_dma_buf_ops = {
.release = fastrpc_release,
};
+static struct fastrpc_session_ctx *fastrpc_session_alloc(
+ struct fastrpc_channel_ctx *cctx, bool sharedcb)
+{
+ struct fastrpc_session_ctx *session = NULL;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&cctx->lock, flags);
+ for (i = 0; i < cctx->sesscount; i++) {
+ if (!cctx->session[i].used && cctx->session[i].valid &&
+ cctx->session[i].sharedcb == sharedcb) {
+ cctx->session[i].used = true;
+ session = &cctx->session[i];
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&cctx->lock, flags);
+
+ return session;
+}
+
+static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx,
+ struct fastrpc_session_ctx *session)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cctx->lock, flags);
+ session->used = false;
+ spin_unlock_irqrestore(&cctx->lock, flags);
+}
+
static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
u64 len, u32 attr, struct fastrpc_map **ppmap)
{
@@ -1448,6 +1490,10 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
goto err_name;
}
+ fl->sctx = fastrpc_session_alloc(fl->cctx, fl->sharedcb);
+ if (!fl->sctx)
+ return -EBUSY;
+
if (!fl->cctx->remote_heap) {
err = fastrpc_remote_heap_alloc(fl, fl->sctx->dev, init.memlen,
&fl->cctx->remote_heap);
@@ -1570,6 +1616,10 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
goto err;
}
+ fl->sctx = fastrpc_session_alloc(fl->cctx, fl->sharedcb);
+ if (!fl->sctx)
+ return -EBUSY;
+
inbuf.pgid = fl->tgid;
inbuf.namelen = strlen(current->comm) + 1;
inbuf.filelen = init.filelen;
@@ -1644,36 +1694,6 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
return err;
}
-static struct fastrpc_session_ctx *fastrpc_session_alloc(
- struct fastrpc_channel_ctx *cctx)
-{
- struct fastrpc_session_ctx *session = NULL;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&cctx->lock, flags);
- for (i = 0; i < cctx->sesscount; i++) {
- if (!cctx->session[i].used && cctx->session[i].valid) {
- cctx->session[i].used = true;
- session = &cctx->session[i];
- break;
- }
- }
- spin_unlock_irqrestore(&cctx->lock, flags);
-
- return session;
-}
-
-static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx,
- struct fastrpc_session_ctx *session)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cctx->lock, flags);
- session->used = false;
- spin_unlock_irqrestore(&cctx->lock, flags);
-}
-
static void fastrpc_context_list_free(struct fastrpc_user *fl)
{
struct fastrpc_invoke_ctx *ctx, *n;
@@ -1777,15 +1797,6 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
fl->cctx = cctx;
fl->is_secure_dev = fdevice->secure;
- fl->sctx = fastrpc_session_alloc(cctx);
- if (!fl->sctx) {
- dev_err(&cctx->rpdev->dev, "No session available\n");
- mutex_destroy(&fl->mutex);
- kfree(fl);
-
- return -EBUSY;
- }
-
spin_lock_irqsave(&cctx->lock, flags);
list_add_tail(&fl->user, &cctx->users);
spin_unlock_irqrestore(&cctx->lock, flags);
@@ -1844,6 +1855,10 @@ static int fastrpc_init_attach(struct fastrpc_user *fl, int pd)
struct fastrpc_enhanced_invoke ioctl;
int tgid = fl->tgid;
+ fl->sctx = fastrpc_session_alloc(fl->cctx, fl->sharedcb);
+ if (!fl->sctx)
+ return -EBUSY;
+
args[0].ptr = (u64)(uintptr_t) &tgid;
args[0].length = sizeof(tgid);
args[0].fd = -1;
@@ -1890,11 +1905,33 @@ static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp)
return err;
}
+static int fastrpc_internal_control(struct fastrpc_user *fl,
+ struct fastrpc_internal_control *cp)
+{
+ int err = 0;
+
+ if (!fl)
+ return -EBADF;
+ if (!cp)
+ return -EINVAL;
+
+ switch (cp->req) {
+ case FASTRPC_CONTROL_SMMU:
+ fl->sharedcb = cp->smmu.sharedcb;
+ break;
+ default:
+ err = -EBADRQC;
+ break;
+ }
+ return err;
+}
+
static int fastrpc_multimode_invoke(struct fastrpc_user *fl, char __user *argp)
{
struct fastrpc_enhanced_invoke einv;
struct fastrpc_invoke_args *args = NULL;
struct fastrpc_ioctl_multimode_invoke invoke;
+ struct fastrpc_internal_control cp = {0};
u32 nscalars;
u64 *perf_kernel;
int err, i;
@@ -1938,6 +1975,12 @@ static int fastrpc_multimode_invoke(struct fastrpc_user *fl, char __user *argp)
err = fastrpc_internal_invoke(fl, false, &einv);
kfree(args);
break;
+ case FASTRPC_INVOKE_CONTROL:
+ if (copy_from_user(&cp, (void __user *)(uintptr_t)invoke.invparam, sizeof(cp)))
+ return -EFAULT;
+
+ err = fastrpc_internal_control(fl, &cp);
+ break;
default:
err = -ENOTTY;
break;
@@ -2440,6 +2483,7 @@ static int fastrpc_cb_probe(struct platform_device *pdev)
if (sessions > 0) {
struct fastrpc_session_ctx *dup_sess;
+ sess->sharedcb = true;
for (i = 1; i < sessions; i++) {
if (cctx->sesscount >= FASTRPC_MAX_SESSIONS)
break;
diff --git a/include/uapi/misc/fastrpc.h b/include/uapi/misc/fastrpc.h
index 074675ee646f..3dfd8e95eda8 100644
--- a/include/uapi/misc/fastrpc.h
+++ b/include/uapi/misc/fastrpc.h
@@ -166,6 +166,18 @@ struct fastrpc_ioctl_capability {
__u32 reserved[4];
};
+enum fastrpc_control_type {
+ FASTRPC_CONTROL_LATENCY = 1,
+ FASTRPC_CONTROL_SMMU = 2,
+ FASTRPC_CONTROL_KALLOC = 3,
+ FASTRPC_CONTROL_WAKELOCK = 4,
+ FASTRPC_CONTROL_PM = 5,
+ FASTRPC_CONTROL_DSPPROCESS_CLEAN = 6,
+ FASTRPC_CONTROL_RPC_POLL = 7,
+ FASTRPC_CONTROL_ASYNC_WAKE = 8,
+ FASTRPC_CONTROL_NOTIF_WAKE = 9,
+};
+
enum fastrpc_perfkeys {
PERF_COUNT = 0,
PERF_RESERVED1 = 1,
--
2.17.1
Add support to capture kernel performance counters for different
kernel level operations. These counters collects the information
for remote call and copies the information to a buffer shared
by user.
Collection of DSP performance counters is also added as part of
this change. DSP updates the performance information in the
metadata which is then copied to a buffer passed by the user.
Signed-off-by: Ekansh Gupta <[email protected]>
---
Changes in v2:
- Fixed compile time warnings
Changes in v3:
- Squashed commits to get proper patch series
Changes in v7:
- Rebase the patch to latest kernel version
drivers/misc/fastrpc.c | 141 ++++++++++++++++++++++++++++++++++--
include/uapi/misc/fastrpc.h | 14 ++++
2 files changed, 147 insertions(+), 8 deletions(-)
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 55f126c779cb..cbcac0b3d09b 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -19,6 +19,7 @@
#include <linux/rpmsg.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
+#include <linux/delay.h>
#include <linux/firmware/qcom/qcom_scm.h>
#include <uapi/misc/fastrpc.h>
#include <linux/of_reserved_mem.h>
@@ -33,6 +34,8 @@
#define FASTRPC_ALIGN 128
#define FASTRPC_MAX_FDLIST 16
#define FASTRPC_MAX_CRCLIST 64
+#define FASTRPC_KERNEL_PERF_LIST (PERF_KEY_MAX)
+#define FASTRPC_DSP_PERF_LIST 12
#define FASTRPC_PHYS(p) ((p) & 0xffffffff)
#define FASTRPC_CTX_MAX (256)
#define FASTRPC_INIT_HANDLE 1
@@ -105,6 +108,27 @@
#define miscdev_to_fdevice(d) container_of(d, struct fastrpc_device, miscdev)
+#define PERF_END ((void)0)
+
+#define PERF(enb, cnt, ff) \
+ {\
+ struct timespec64 startT = {0};\
+ uint64_t *counter = cnt;\
+ if (enb && counter) {\
+ ktime_get_real_ts64(&startT);\
+ } \
+ ff ;\
+ if (enb && counter) {\
+ *counter += getnstimediff(&startT);\
+ } \
+ }
+
+#define GET_COUNTER(perf_ptr, offset) \
+ (perf_ptr != NULL ?\
+ (((offset >= 0) && (offset < PERF_KEY_MAX)) ?\
+ (uint64_t *)(perf_ptr + offset)\
+ : (uint64_t *)NULL) : (uint64_t *)NULL)
+
static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp",
"sdsp", "cdsp"};
struct fastrpc_phy_page {
@@ -228,6 +252,19 @@ struct fastrpc_map {
struct kref refcount;
};
+struct fastrpc_perf {
+ u64 count;
+ u64 flush;
+ u64 map;
+ u64 copy;
+ u64 link;
+ u64 getargs;
+ u64 putargs;
+ u64 invargs;
+ u64 invoke;
+ u64 tid;
+};
+
struct fastrpc_invoke_ctx {
int nscalars;
int nbufs;
@@ -236,6 +273,8 @@ struct fastrpc_invoke_ctx {
int tgid;
u32 sc;
u32 *crc;
+ u64 *perf_kernel;
+ u64 *perf_dsp;
u64 ctxid;
u64 msg_sz;
struct kref refcount;
@@ -250,6 +289,7 @@ struct fastrpc_invoke_ctx {
struct fastrpc_invoke_args *args;
struct fastrpc_buf_overlap *olaps;
struct fastrpc_channel_ctx *cctx;
+ struct fastrpc_perf *perf;
};
struct fastrpc_session_ctx {
@@ -299,6 +339,7 @@ struct fastrpc_user {
struct fastrpc_session_ctx *sctx;
struct fastrpc_buf *init_mem;
+ u32 profile;
int tgid;
int pd;
bool is_secure_dev;
@@ -308,6 +349,17 @@ struct fastrpc_user {
struct mutex mutex;
};
+static inline int64_t getnstimediff(struct timespec64 *start)
+{
+ int64_t ns;
+ struct timespec64 ts, b;
+
+ ktime_get_real_ts64(&ts);
+ b = timespec64_sub(ts, *start);
+ ns = timespec64_to_ns(&b);
+ return ns;
+}
+
static void fastrpc_free_map(struct kref *ref)
{
struct fastrpc_map *map;
@@ -493,6 +545,9 @@ static void fastrpc_context_free(struct kref *ref)
if (ctx->buf)
fastrpc_buf_free(ctx->buf);
+ if (ctx->fl->profile)
+ kfree(ctx->perf);
+
spin_lock_irqsave(&cctx->lock, flags);
idr_remove(&cctx->ctx_idr, ctx->ctxid >> 4);
spin_unlock_irqrestore(&cctx->lock, flags);
@@ -612,6 +667,14 @@ static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
fastrpc_channel_ctx_get(cctx);
ctx->crc = (u32 *)(uintptr_t)invoke->crc;
+ ctx->perf_dsp = (u64 *)(uintptr_t)invoke->perf_dsp;
+ ctx->perf_kernel = (u64 *)(uintptr_t)invoke->perf_kernel;
+ if (ctx->fl->profile) {
+ ctx->perf = kzalloc(sizeof(*(ctx->perf)), GFP_KERNEL);
+ if (!ctx->perf)
+ return ERR_PTR(-ENOMEM);
+ ctx->perf->tid = ctx->fl->tgid;
+ }
ctx->sc = sc;
ctx->retval = -1;
ctx->pid = current->pid;
@@ -875,7 +938,8 @@ static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx)
sizeof(struct fastrpc_invoke_buf) +
sizeof(struct fastrpc_phy_page)) * ctx->nscalars +
sizeof(u64) * FASTRPC_MAX_FDLIST +
- sizeof(u32) * FASTRPC_MAX_CRCLIST;
+ sizeof(u32) * FASTRPC_MAX_CRCLIST +
+ sizeof(u32) + sizeof(u64) * FASTRPC_DSP_PERF_LIST;
return size;
}
@@ -942,16 +1006,22 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
int inbufs, i, oix, err = 0;
u64 len, rlen, pkt_size;
u64 pg_start, pg_end;
+ u64 *perf_counter = NULL;
uintptr_t args;
int metalen;
+ if (ctx->fl->profile)
+ perf_counter = (u64 *)ctx->perf + PERF_COUNT;
+
inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
metalen = fastrpc_get_meta_size(ctx);
pkt_size = fastrpc_get_payload_size(ctx, metalen);
+ PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_MAP),
err = fastrpc_create_maps(ctx);
if (err)
return err;
+ PERF_END);
ctx->msg_sz = pkt_size;
@@ -984,6 +1054,7 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
if (ctx->maps[i]) {
struct vm_area_struct *vma = NULL;
+ PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_MAP),
rpra[i].buf.pv = (u64) ctx->args[i].ptr;
pages[i].addr = ctx->maps[i]->phys;
@@ -998,9 +1069,9 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
pg_end = ((ctx->args[i].ptr + len - 1) & PAGE_MASK) >>
PAGE_SHIFT;
pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
-
+ PERF_END);
} else {
-
+ PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_COPY),
if (ctx->olaps[oix].offset == 0) {
rlen -= ALIGN(args, FASTRPC_ALIGN) - args;
args = ALIGN(args, FASTRPC_ALIGN);
@@ -1022,12 +1093,14 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
args = args + mlen;
rlen -= mlen;
+ PERF_END);
}
if (i < inbufs && !ctx->maps[i]) {
void *dst = (void *)(uintptr_t)rpra[i].buf.pv;
void *src = (void *)(uintptr_t)ctx->args[i].ptr;
+ PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_COPY),
if (!kernel) {
if (copy_from_user(dst, (void __user *)src,
len)) {
@@ -1037,6 +1110,7 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
} else {
memcpy(dst, src, len);
}
+ PERF_END);
}
}
@@ -1067,9 +1141,9 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
struct fastrpc_map *mmap = NULL;
struct fastrpc_invoke_buf *list;
struct fastrpc_phy_page *pages;
- u64 *fdlist;
- u32 *crclist;
- int i, inbufs, outbufs, handles;
+ u64 *fdlist, *perf_dsp_list;
+ u32 *crclist, *poll;
+ int i, inbufs, outbufs, handles, perferr;
inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
@@ -1078,6 +1152,8 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
pages = fastrpc_phy_page_start(list, ctx->nscalars);
fdlist = (u64 *)(pages + inbufs + outbufs + handles);
crclist = (u32 *)(fdlist + FASTRPC_MAX_FDLIST);
+ poll = (u32 *)(crclist + FASTRPC_MAX_CRCLIST);
+ perf_dsp_list = (u64 *)(poll + 1);
for (i = inbufs; i < ctx->nbufs; ++i) {
if (!ctx->maps[i]) {
@@ -1103,8 +1179,16 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
}
if (ctx->crc && crclist && rpra) {
- if (copy_to_user((void __user *)ctx->crc, crclist, FASTRPC_MAX_CRCLIST * sizeof(u32)))
+ if (copy_to_user((void __user *)ctx->crc, crclist,
+ FASTRPC_MAX_CRCLIST * sizeof(u32))) {
return -EFAULT;
+ }
+ }
+ if (ctx->perf_dsp && perf_dsp_list) {
+ perferr = copy_to_user((void __user *)ctx->perf_dsp,
+ perf_dsp_list, FASTRPC_DSP_PERF_LIST * sizeof(u64));
+ if (perferr)
+ dev_info(fl->sctx->dev, "Warning: failed to copy perf data %d\n", perferr);
}
return 0;
}
@@ -1141,6 +1225,21 @@ static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx,
}
+static void fastrpc_update_invoke_count(u32 handle, u64 *perf_counter,
+ struct timespec64 *invoket)
+{
+ u64 *invcount, *count;
+
+ invcount = GET_COUNTER(perf_counter, PERF_INVOKE);
+ if (invcount)
+ *invcount += getnstimediff(invoket);
+
+ count = GET_COUNTER(perf_counter, PERF_COUNT);
+ if (count)
+ *count += 1;
+}
+
+
static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
struct fastrpc_enhanced_invoke *invoke)
{
@@ -1148,7 +1247,12 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
struct fastrpc_buf *buf, *b;
struct fastrpc_invoke *inv = &invoke->inv;
u32 handle, sc;
- int err = 0;
+ u64 *perf_counter = NULL;
+ int err = 0, perferr = 0;
+ struct timespec64 invoket = {0};
+
+ if (fl->profile)
+ ktime_get_real_ts64(&invoket);
if (!fl->sctx)
return -EINVAL;
@@ -1167,16 +1271,22 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ if (fl->profile)
+ perf_counter = (u64 *)ctx->perf + PERF_COUNT;
+ PERF(fl->profile, GET_COUNTER(perf_counter, PERF_GETARGS),
err = fastrpc_get_args(kernel, ctx);
if (err)
goto bail;
+ PERF_END);
/* make sure that all CPU memory writes are seen by DSP */
dma_wmb();
+ PERF(fl->profile, GET_COUNTER(perf_counter, PERF_LINK),
/* Send invoke buffer to remote dsp */
err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle);
if (err)
goto bail;
+ PERF_END);
if (kernel) {
if (!wait_for_completion_timeout(&ctx->work, 10 * HZ))
@@ -1190,10 +1300,12 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
/* make sure that all memory writes by DSP are seen by CPU */
dma_rmb();
+ PERF(fl->profile, GET_COUNTER(perf_counter, PERF_PUTARGS),
/* populate all the output buffers with results */
err = fastrpc_put_args(ctx, kernel);
if (err)
goto bail;
+ PERF_END);
/* Check the response from remote dsp */
err = ctx->retval;
@@ -1214,6 +1326,15 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
list_del(&buf->node);
list_add_tail(&buf->node, &fl->cctx->invoke_interrupted_mmaps);
}
+ } else if (ctx) {
+ if (fl->profile && !err)
+ fastrpc_update_invoke_count(handle, perf_counter, &invoket);
+ if (fl->profile && ctx->perf && ctx->perf_kernel) {
+ perferr = copy_to_user((void __user *)ctx->perf_kernel,
+ ctx->perf, FASTRPC_KERNEL_PERF_LIST * sizeof(u64));
+ if (perferr)
+ dev_info(fl->sctx->dev, "Warning: failed to copy perf data %d\n", perferr);
+ }
}
if (err)
@@ -1712,6 +1833,7 @@ static int fastrpc_multimode_invoke(struct fastrpc_user *fl, char __user *argp)
struct fastrpc_invoke_args *args = NULL;
struct fastrpc_ioctl_multimode_invoke invoke;
u32 nscalars;
+ u64 *perf_kernel;
int err, i;
if (copy_from_user(&invoke, argp, sizeof(invoke)))
@@ -1746,6 +1868,9 @@ static int fastrpc_multimode_invoke(struct fastrpc_user *fl, char __user *argp)
return -EFAULT;
}
}
+ perf_kernel = (u64 *)(uintptr_t)einv.perf_kernel;
+ if (perf_kernel)
+ fl->profile = true;
einv.inv.args = (__u64)args;
err = fastrpc_internal_invoke(fl, false, &einv);
kfree(args);
diff --git a/include/uapi/misc/fastrpc.h b/include/uapi/misc/fastrpc.h
index 45c15be1de58..074675ee646f 100644
--- a/include/uapi/misc/fastrpc.h
+++ b/include/uapi/misc/fastrpc.h
@@ -166,4 +166,18 @@ struct fastrpc_ioctl_capability {
__u32 reserved[4];
};
+enum fastrpc_perfkeys {
+ PERF_COUNT = 0,
+ PERF_RESERVED1 = 1,
+ PERF_MAP = 2,
+ PERF_COPY = 3,
+ PERF_LINK = 4,
+ PERF_GETARGS = 5,
+ PERF_PUTARGS = 6,
+ PERF_RESERVED2 = 7,
+ PERF_INVOKE = 8,
+ PERF_RESERVED3 = 9,
+ PERF_KEY_MAX = 10,
+};
+
#endif /* __QCOM_FASTRPC_H__ */
--
2.17.1
On 21/11/2023 09:48, Ekansh Gupta wrote:
> static int fastrpc_multimode_invoke(struct fastrpc_user *fl, char __user *argp)
> {
> struct fastrpc_enhanced_invoke einv;
> struct fastrpc_invoke_args *args = NULL;
> struct fastrpc_ioctl_multimode_invoke invoke;
> + struct fastrpc_internal_control cp = {0};
> u32 nscalars;
> u64 *perf_kernel;
> int err, i;
> @@ -1938,6 +1975,12 @@ static int fastrpc_multimode_invoke(struct fastrpc_user *fl, char __user *argp)
> err = fastrpc_internal_invoke(fl, false, &einv);
> kfree(args);
> break;
> + case FASTRPC_INVOKE_CONTROL:
> + if (copy_from_user(&cp, (void __user *)(uintptr_t)invoke.invparam, sizeof(cp)))
> + return -EFAULT;
wow, this struct is not even exposed in a uapi header, how come
userspace knows about this struct?
Every struct that userspace fills in needs to be part of the UAPI headers.
--srini
> +
> + err = fastrpc_internal_control(fl, &cp);
> + break;
> default:
> err = -ENOTTY;
> break;
> @@ -2440,6 +2483,7 @@ static int fastrpc_cb_probe(struct platform_device *pdev)
> if (sessions > 0) {
> struct fastrpc_session_ctx *dup_sess;
>
> + sess->sharedcb = true;
> for (i = 1; i < sessions; i++) {
On 21/11/2023 09:48, Ekansh Gupta wrote:
> Add support to capture kernel performance counters for different
> kernel level operations. These counters collects the information
> for remote call and copies the information to a buffer shared
> by user.
>
> Collection of DSP performance counters is also added as part of
> this change. DSP updates the performance information in the
> metadata which is then copied to a buffer passed by the user.
>
> Signed-off-by: Ekansh Gupta <[email protected]>
> ---
> Changes in v2:
> - Fixed compile time warnings
> Changes in v3:
> - Squashed commits to get proper patch series
> Changes in v7:
> - Rebase the patch to latest kernel version
>
> drivers/misc/fastrpc.c | 141 ++++++++++++++++++++++++++++++++++--
> include/uapi/misc/fastrpc.h | 14 ++++
> 2 files changed, 147 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.cin:sent
> index 55f126c779cb..cbcac0b3d09b 100644
> --- a/drivers/misc/fastrpc.c
> +++ b/drivers/misc/fastrpc.c
> @@ -19,6 +19,7 @@
> #include <linux/rpmsg.h>
> #include <linux/scatterlist.h>
> #include <linux/slab.h>
> +#include <linux/delay.h>
> #include <linux/firmware/qcom/qcom_scm.h>
> #include <uapi/misc/fastrpc.h>
> #include <linux/of_reserved_mem.h>
> @@ -33,6 +34,8 @@
> #define FASTRPC_ALIGN 128
> #define FASTRPC_MAX_FDLIST 16
> #define FASTRPC_MAX_CRCLIST 64
> +#define FASTRPC_KERNEL_PERF_LIST (PERF_KEY_MAX)
> +#define FASTRPC_DSP_PERF_LIST 12
> #define FASTRPC_PHYS(p) ((p) & 0xffffffff)
> #define FASTRPC_CTX_MAX (256)
> #define FASTRPC_INIT_HANDLE 1
> @@ -105,6 +108,27 @@
>
> #define miscdev_to_fdevice(d) container_of(d, struct fastrpc_device, miscdev)
>
> +#define PERF_END ((void)0)
> +
> +#define PERF(enb, cnt, ff) \
> + {\
> + struct timespec64 startT = {0};\
> + uint64_t *counter = cnt;\
> + if (enb && counter) {\
> + ktime_get_real_ts64(&startT);\
> + } \
> + ff ;\
> + if (enb && counter) {\
> + *counter += getnstimediff(&startT);\
> + } \
> + }
> +
> +#define GET_COUNTER(perf_ptr, offset) \
> + (perf_ptr != NULL ?\
> + (((offset >= 0) && (offset < PERF_KEY_MAX)) ?\
> + (uint64_t *)(perf_ptr + offset)\
> + : (uint64_t *)NULL) : (uint64_t *)NULL)
> +
> static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp",
> "sdsp", "cdsp"};
> struct fastrpc_phy_page {
> @@ -228,6 +252,19 @@ struct fastrpc_map {
> struct kref refcount;
> };
>
> +struct fastrpc_perf {
> + u64 count;
> + u64 flush;
> + u64 map;
> + u64 copy;
> + u64 link;
> + u64 getargs;
> + u64 putargs;
> + u64 invargs;
> + u64 invoke;
> + u64 tid;
> +};
> +
> struct fastrpc_invoke_ctx {
> int nscalars;
> int nbufs;
> @@ -236,6 +273,8 @@ struct fastrpc_invoke_ctx {
> int tgid;
> u32 sc;
> u32 *crc;
> + u64 *perf_kernel;
> + u64 *perf_dsp;
> u64 ctxid;
> u64 msg_sz;
> struct kref refcount;
> @@ -250,6 +289,7 @@ struct fastrpc_invoke_ctx {
> struct fastrpc_invoke_args *args;
> struct fastrpc_buf_overlap *olaps;
> struct fastrpc_channel_ctx *cctx;
> + struct fastrpc_perf *perf;
> };
>
> struct fastrpc_session_ctx {
> @@ -299,6 +339,7 @@ struct fastrpc_user {
> struct fastrpc_session_ctx *sctx;
> struct fastrpc_buf *init_mem;
>
> + u32 profile;
> int tgid;
> int pd;
> bool is_secure_dev;
> @@ -308,6 +349,17 @@ struct fastrpc_user {
> struct mutex mutex;
> };
>
> +static inline int64_t getnstimediff(struct timespec64 *start)
> +{
> + int64_t ns;
> + struct timespec64 ts, b;
> +
> + ktime_get_real_ts64(&ts);
> + b = timespec64_sub(ts, *start);
> + ns = timespec64_to_ns(&b);
> + return ns;
> +}
> +
> static void fastrpc_free_map(struct kref *ref)
> {
> struct fastrpc_map *map;
> @@ -493,6 +545,9 @@ static void fastrpc_context_free(struct kref *ref)
> if (ctx->buf)
> fastrpc_buf_free(ctx->buf);
>
> + if (ctx->fl->profile)
> + kfree(ctx->perf);
> +
> spin_lock_irqsave(&cctx->lock, flags);
> idr_remove(&cctx->ctx_idr, ctx->ctxid >> 4);
> spin_unlock_irqrestore(&cctx->lock, flags);
> @@ -612,6 +667,14 @@ static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
> fastrpc_channel_ctx_get(cctx);
>
> ctx->crc = (u32 *)(uintptr_t)invoke->crc;
> + ctx->perf_dsp = (u64 *)(uintptr_t)invoke->perf_dsp;
> + ctx->perf_kernel = (u64 *)(uintptr_t)invoke->perf_kernel;
> + if (ctx->fl->profile) {
> + ctx->perf = kzalloc(sizeof(*(ctx->perf)), GFP_KERNEL);
> + if (!ctx->perf)
> + return ERR_PTR(-ENOMEM);
> + ctx->perf->tid = ctx->fl->tgid;
> + }
> ctx->sc = sc;
> ctx->retval = -1;
> ctx->pid = current->pid;
> @@ -875,7 +938,8 @@ static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx)
> sizeof(struct fastrpc_invoke_buf) +
> sizeof(struct fastrpc_phy_page)) * ctx->nscalars +
> sizeof(u64) * FASTRPC_MAX_FDLIST +
> - sizeof(u32) * FASTRPC_MAX_CRCLIST;
> + sizeof(u32) * FASTRPC_MAX_CRCLIST +
> + sizeof(u32) + sizeof(u64) * FASTRPC_DSP_PERF_LIST;
>
> return size;
> }
> @@ -942,16 +1006,22 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
> int inbufs, i, oix, err = 0;
> u64 len, rlen, pkt_size;
> u64 pg_start, pg_end;
> + u64 *perf_counter = NULL;
> uintptr_t args;
> int metalen;
>
> + if (ctx->fl->profile)
> + perf_counter = (u64 *)ctx->perf + PERF_COUNT;
> +
> inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
> metalen = fastrpc_get_meta_size(ctx);
> pkt_size = fastrpc_get_payload_size(ctx, metalen);
>
> + PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_MAP),
> err = fastrpc_create_maps(ctx);
> if (err)
> return err;
> + PERF_END);
>
> ctx->msg_sz = pkt_size;
>
> @@ -984,6 +1054,7 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
> if (ctx->maps[i]) {
> struct vm_area_struct *vma = NULL;
>
> + PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_MAP),
> rpra[i].buf.pv = (u64) ctx->args[i].ptr;
> pages[i].addr = ctx->maps[i]->phys;
>
> @@ -998,9 +1069,9 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
> pg_end = ((ctx->args[i].ptr + len - 1) & PAGE_MASK) >>
> PAGE_SHIFT;
> pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
> -
> + PERF_END);
> } else {
> -
> + PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_COPY),
> if (ctx->olaps[oix].offset == 0) {
> rlen -= ALIGN(args, FASTRPC_ALIGN) - args;
> args = ALIGN(args, FASTRPC_ALIGN);
> @@ -1022,12 +1093,14 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
> pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
> args = args + mlen;
> rlen -= mlen;
> + PERF_END);
> }
>
> if (i < inbufs && !ctx->maps[i]) {
> void *dst = (void *)(uintptr_t)rpra[i].buf.pv;
> void *src = (void *)(uintptr_t)ctx->args[i].ptr;
>
> + PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_COPY),
> if (!kernel) {
> if (copy_from_user(dst, (void __user *)src,
> len)) {
> @@ -1037,6 +1110,7 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
> } else {
> memcpy(dst, src, len);
> }
> + PERF_END);
> }
> }
>
> @@ -1067,9 +1141,9 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
> struct fastrpc_map *mmap = NULL;
> struct fastrpc_invoke_buf *list;
> struct fastrpc_phy_page *pages;
> - u64 *fdlist;
> - u32 *crclist;
> - int i, inbufs, outbufs, handles;
> + u64 *fdlist, *perf_dsp_list;
> + u32 *crclist, *poll;
> + int i, inbufs, outbufs, handles, perferr;
>
> inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
> outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
> @@ -1078,6 +1152,8 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
> pages = fastrpc_phy_page_start(list, ctx->nscalars);
> fdlist = (u64 *)(pages + inbufs + outbufs + handles);
> crclist = (u32 *)(fdlist + FASTRPC_MAX_FDLIST);
> + poll = (u32 *)(crclist + FASTRPC_MAX_CRCLIST);
> + perf_dsp_list = (u64 *)(poll + 1);
>
> for (i = inbufs; i < ctx->nbufs; ++i) {
> if (!ctx->maps[i]) {
> @@ -1103,8 +1179,16 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
> }
>
> if (ctx->crc && crclist && rpra) {
> - if (copy_to_user((void __user *)ctx->crc, crclist, FASTRPC_MAX_CRCLIST * sizeof(u32)))
> + if (copy_to_user((void __user *)ctx->crc, crclist,
> + FASTRPC_MAX_CRCLIST * sizeof(u32))) {
> return -EFAULT;
> + }
> + }
> + if (ctx->perf_dsp && perf_dsp_list) {
> + perferr = copy_to_user((void __user *)ctx->perf_dsp,
> + perf_dsp_list, FASTRPC_DSP_PERF_LIST * sizeof(u64));
> + if (perferr)
> + dev_info(fl->sctx->dev, "Warning: failed to copy perf data %d\n", perferr);
> }
> return 0;
> }
> @@ -1141,6 +1225,21 @@ static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx,
>
> }
>
> +static void fastrpc_update_invoke_count(u32 handle, u64 *perf_counter,
> + struct timespec64 *invoket)
> +{
> + u64 *invcount, *count;
> +
> + invcount = GET_COUNTER(perf_counter, PERF_INVOKE);
> + if (invcount)
> + *invcount += getnstimediff(invoket);
> +
> + count = GET_COUNTER(perf_counter, PERF_COUNT);
> + if (count)
> + *count += 1;
> +}
> +
> +
> static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
> struct fastrpc_enhanced_invoke *invoke)
> {
> @@ -1148,7 +1247,12 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
> struct fastrpc_buf *buf, *b;
> struct fastrpc_invoke *inv = &invoke->inv;
> u32 handle, sc;
> - int err = 0;
> + u64 *perf_counter = NULL;
> + int err = 0, perferr = 0;
> + struct timespec64 invoket = {0};
> +
> + if (fl->profile)
> + ktime_get_real_ts64(&invoket);
>
> if (!fl->sctx)
> return -EINVAL;
> @@ -1167,16 +1271,22 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
> if (IS_ERR(ctx))
> return PTR_ERR(ctx);
>
> + if (fl->profile)
> + perf_counter = (u64 *)ctx->perf + PERF_COUNT;
> + PERF(fl->profile, GET_COUNTER(perf_counter, PERF_GETARGS),
> err = fastrpc_get_args(kernel, ctx);
> if (err)
> goto bail;
> + PERF_END);
>
> /* make sure that all CPU memory writes are seen by DSP */
> dma_wmb();
> + PERF(fl->profile, GET_COUNTER(perf_counter, PERF_LINK),
> /* Send invoke buffer to remote dsp */
> err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle);
> if (err)
> goto bail;
> + PERF_END);
>
> if (kernel) {
> if (!wait_for_completion_timeout(&ctx->work, 10 * HZ))
> @@ -1190,10 +1300,12 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
>
> /* make sure that all memory writes by DSP are seen by CPU */
> dma_rmb();
> + PERF(fl->profile, GET_COUNTER(perf_counter, PERF_PUTARGS),
> /* populate all the output buffers with results */
> err = fastrpc_put_args(ctx, kernel);
> if (err)
> goto bail;
> + PERF_END);
>
> /* Check the response from remote dsp */
> err = ctx->retval;
> @@ -1214,6 +1326,15 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
> list_del(&buf->node);
> list_add_tail(&buf->node, &fl->cctx->invoke_interrupted_mmaps);
> }
> + } else if (ctx) {
> + if (fl->profile && !err)
> + fastrpc_update_invoke_count(handle, perf_counter, &invoket);
> + if (fl->profile && ctx->perf && ctx->perf_kernel) {
> + perferr = copy_to_user((void __user *)ctx->perf_kernel,
> + ctx->perf, FASTRPC_KERNEL_PERF_LIST * sizeof(u64));
> + if (perferr)
> + dev_info(fl->sctx->dev, "Warning: failed to copy perf data %d\n", perferr);
> + }
> }
>
> if (err)
> @@ -1712,6 +1833,7 @@ static int fastrpc_multimode_invoke(struct fastrpc_user *fl, char __user *argp)
> struct fastrpc_invoke_args *args = NULL;
> struct fastrpc_ioctl_multimode_invoke invoke;
> u32 nscalars;
> + u64 *perf_kernel;
> int err, i;
>
> if (copy_from_user(&invoke, argp, sizeof(invoke)))
> @@ -1746,6 +1868,9 @@ static int fastrpc_multimode_invoke(struct fastrpc_user *fl, char __user *argp)
> return -EFAULT;
> }
> }
> + perf_kernel = (u64 *)(uintptr_t)einv.perf_kernel;
> + if (perf_kernel)
> + fl->profile = true;
> einv.inv.args = (__u64)args;
> err = fastrpc_internal_invoke(fl, false, &einv);
> kfree(args);
> diff --git a/include/uapi/misc/fastrpc.h b/include/uapi/misc/fastrpc.h
> index 45c15be1de58..074675ee646f 100644
> --- a/include/uapi/misc/fastrpc.h
> +++ b/include/uapi/misc/fastrpc.h
> @@ -166,4 +166,18 @@ struct fastrpc_ioctl_capability {
> __u32 reserved[4];
> };
>
> +enum fastrpc_perfkeys {
> + PERF_COUNT = 0,
> + PERF_RESERVED1 = 1,
why reserved in middle of ranges? if you know already pl add the proper
name for it.
> + PERF_MAP = 2,
> + PERF_COPY = 3,
> + PERF_LINK = 4,
> + PERF_GETARGS = 5,
> + PERF_PUTARGS = 6,
> + PERF_RESERVED2 = 7,
> + PERF_INVOKE = 8,
> + PERF_RESERVED3 = 9,
> + PERF_KEY_MAX = 10,
> +};
> +
> #endif /* __QCOM_FASTRPC_H__ */
On 21/11/2023 09:48, Ekansh Gupta wrote:
> CRC check for input and output argument helps in ensuring data
> consistency over a remote call. If user intends to enable CRC check,
> first local user CRC is calculated at user end and a CRC buffer is
> passed to DSP to capture remote CRC values. DSP is expected to
> write to the remote CRC buffer which is then compared at user level
> with the local CRC values.
>
> Signed-off-by: Ekansh Gupta <[email protected]>
> ---
> Changes in v7:
> - Rebase the patch to latest kernel version
>
> drivers/misc/fastrpc.c | 10 +++++++++-
> 1 file changed, 9 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
> index 265e34f53c4e..55f126c779cb 100644
> --- a/drivers/misc/fastrpc.c
> +++ b/drivers/misc/fastrpc.c
> @@ -611,6 +611,7 @@ static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
> /* Released in fastrpc_context_put() */
> fastrpc_channel_ctx_get(cctx);
>
> + ctx->crc = (u32 *)(uintptr_t)invoke->crc;
> ctx->sc = sc;
> ctx->retval = -1;
> ctx->pid = current->pid;
> @@ -1067,6 +1068,7 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
> struct fastrpc_invoke_buf *list;
> struct fastrpc_phy_page *pages;
> u64 *fdlist;
> + u32 *crclist;
> int i, inbufs, outbufs, handles;
>
> inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
> @@ -1074,7 +1076,8 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
> handles = REMOTE_SCALARS_INHANDLES(ctx->sc) + REMOTE_SCALARS_OUTHANDLES(ctx->sc);
> list = fastrpc_invoke_buf_start(rpra, ctx->nscalars);
> pages = fastrpc_phy_page_start(list, ctx->nscalars);
> - fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
> + fdlist = (u64 *)(pages + inbufs + outbufs + handles);
> + crclist = (u32 *)(fdlist + FASTRPC_MAX_FDLIST);
>
> for (i = inbufs; i < ctx->nbufs; ++i) {
> if (!ctx->maps[i]) {
> @@ -1099,6 +1102,10 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
> fastrpc_map_put(mmap);
> }
>
> + if (ctx->crc && crclist && rpra) {
> + if (copy_to_user((void __user *)ctx->crc, crclist, FASTRPC_MAX_CRCLIST * sizeof(u32)))
> + return -EFAULT;
> + }
> return 0;
> }
>
> @@ -1719,6 +1726,7 @@ static int fastrpc_multimode_invoke(struct fastrpc_user *fl, char __user *argp)
>
> switch (invoke.req) {
> case FASTRPC_INVOKE:
> + case FASTRPC_INVOKE_ENHANCED:
Isn't this change part of 1/5 patch?
> /* nscalars is truncated here to max supported value */
> if (copy_from_user(&einv, (void __user *)(uintptr_t)invoke.invparam,
> invoke.size))
Hi Ekansh,
kernel test robot noticed the following build warnings:
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Ekansh-Gupta/misc-fastrpc-Add-fastrpc-multimode-invoke-request-support/20231121-175147
base: linus/master
patch link: https://lore.kernel.org/r/20231121094844.5764-6-quic_ekangupt%40quicinc.com
patch subject: [PATCH v7 5/5] misc: fastrpc: Add support to allocate shared context bank
config: arm-randconfig-r081-20231123 (https://download.01.org/0day-ci/archive/20231125/[email protected]/config)
compiler: arm-linux-gnueabi-gcc (GCC) 13.2.0
reproduce: (https://download.01.org/0day-ci/archive/20231125/[email protected]/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <[email protected]>
| Reported-by: Dan Carpenter <[email protected]>
| Closes: https://lore.kernel.org/r/[email protected]/
New smatch warnings:
drivers/misc/fastrpc.c:1621 fastrpc_init_create_process() warn: missing unwind goto?
vim +1621 drivers/misc/fastrpc.c
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1576 static int fastrpc_init_create_process(struct fastrpc_user *fl,
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1577 char __user *argp)
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1578 {
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1579 struct fastrpc_init_create init;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1580 struct fastrpc_invoke_args *args;
becdceed7669e5 Ekansh Gupta 2023-11-21 1581 struct fastrpc_enhanced_invoke ioctl;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1582 struct fastrpc_phy_page pages[1];
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1583 struct fastrpc_map *map = NULL;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1584 struct fastrpc_buf *imem = NULL;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1585 int memlen;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1586 int err;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1587 struct {
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1588 int pgid;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1589 u32 namelen;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1590 u32 filelen;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1591 u32 pageslen;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1592 u32 attrs;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1593 u32 siglen;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1594 } inbuf;
7f1f481263c3ce Jeya R 2022-02-14 1595 bool unsigned_module = false;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1596
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1597 args = kcalloc(FASTRPC_CREATE_PROCESS_NARGS, sizeof(*args), GFP_KERNEL);
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1598 if (!args)
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1599 return -ENOMEM;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1600
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1601 if (copy_from_user(&init, argp, sizeof(init))) {
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1602 err = -EFAULT;
b49f6d83e290f1 Thierry Escande 2019-03-07 1603 goto err;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1604 }
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1605
7f1f481263c3ce Jeya R 2022-02-14 1606 if (init.attrs & FASTRPC_MODE_UNSIGNED_MODULE)
7f1f481263c3ce Jeya R 2022-02-14 1607 unsigned_module = true;
7f1f481263c3ce Jeya R 2022-02-14 1608
7f1f481263c3ce Jeya R 2022-02-14 1609 if (is_session_rejected(fl, unsigned_module)) {
7f1f481263c3ce Jeya R 2022-02-14 1610 err = -ECONNREFUSED;
7f1f481263c3ce Jeya R 2022-02-14 1611 goto err;
7f1f481263c3ce Jeya R 2022-02-14 1612 }
7f1f481263c3ce Jeya R 2022-02-14 1613
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1614 if (init.filelen > INIT_FILELEN_MAX) {
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1615 err = -EINVAL;
b49f6d83e290f1 Thierry Escande 2019-03-07 1616 goto err;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1617 }
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1618
92fe4bcba19c31 Ekansh Gupta 2023-11-21 1619 fl->sctx = fastrpc_session_alloc(fl->cctx, fl->sharedcb);
92fe4bcba19c31 Ekansh Gupta 2023-11-21 1620 if (!fl->sctx)
92fe4bcba19c31 Ekansh Gupta 2023-11-21 @1621 return -EBUSY;
Should be "ret = -EBUSY; goto err;".
92fe4bcba19c31 Ekansh Gupta 2023-11-21 1622
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1623 inbuf.pgid = fl->tgid;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1624 inbuf.namelen = strlen(current->comm) + 1;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1625 inbuf.filelen = init.filelen;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1626 inbuf.pageslen = 1;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1627 inbuf.attrs = init.attrs;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1628 inbuf.siglen = init.siglen;
84195d206e1fbd Jonathan Marek 2020-09-08 1629 fl->pd = USER_PD;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1630
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1631 if (init.filelen && init.filefd) {
e90d911906196b Vamsi Krishna Gattupalli 2022-02-14 1632 err = fastrpc_map_create(fl, init.filefd, init.filelen, 0, &map);
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1633 if (err)
b49f6d83e290f1 Thierry Escande 2019-03-07 1634 goto err;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1635 }
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1636
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1637 memlen = ALIGN(max(INIT_FILELEN_MAX, (int)init.filelen * 4),
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1638 1024 * 1024);
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1639 err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen,
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1640 &imem);
b49f6d83e290f1 Thierry Escande 2019-03-07 1641 if (err)
b49f6d83e290f1 Thierry Escande 2019-03-07 1642 goto err_alloc;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1643
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1644 fl->init_mem = imem;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1645 args[0].ptr = (u64)(uintptr_t)&inbuf;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1646 args[0].length = sizeof(inbuf);
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1647 args[0].fd = -1;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1648
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1649 args[1].ptr = (u64)(uintptr_t)current->comm;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1650 args[1].length = inbuf.namelen;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1651 args[1].fd = -1;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1652
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1653 args[2].ptr = (u64) init.file;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1654 args[2].length = inbuf.filelen;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1655 args[2].fd = init.filefd;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1656
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1657 pages[0].addr = imem->phys;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1658 pages[0].size = imem->size;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1659
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1660 args[3].ptr = (u64)(uintptr_t) pages;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1661 args[3].length = 1 * sizeof(*pages);
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1662 args[3].fd = -1;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1663
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1664 args[4].ptr = (u64)(uintptr_t)&inbuf.attrs;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1665 args[4].length = sizeof(inbuf.attrs);
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1666 args[4].fd = -1;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1667
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1668 args[5].ptr = (u64)(uintptr_t) &inbuf.siglen;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1669 args[5].length = sizeof(inbuf.siglen);
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1670 args[5].fd = -1;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1671
becdceed7669e5 Ekansh Gupta 2023-11-21 1672 ioctl.inv.handle = FASTRPC_INIT_HANDLE;
becdceed7669e5 Ekansh Gupta 2023-11-21 1673 ioctl.inv.sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE, 4, 0);
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1674 if (init.attrs)
becdceed7669e5 Ekansh Gupta 2023-11-21 1675 ioctl.inv.sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 4, 0);
e27748f5c08306 Ekansh Gupta 2023-11-21 1676 ioctl.inv.args = (u64)args;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1677
becdceed7669e5 Ekansh Gupta 2023-11-21 1678 err = fastrpc_internal_invoke(fl, true, &ioctl);
b49f6d83e290f1 Thierry Escande 2019-03-07 1679 if (err)
b49f6d83e290f1 Thierry Escande 2019-03-07 1680 goto err_invoke;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1681
b49f6d83e290f1 Thierry Escande 2019-03-07 1682 kfree(args);
b49f6d83e290f1 Thierry Escande 2019-03-07 1683
b49f6d83e290f1 Thierry Escande 2019-03-07 1684 return 0;
b49f6d83e290f1 Thierry Escande 2019-03-07 1685
b49f6d83e290f1 Thierry Escande 2019-03-07 1686 err_invoke:
b49f6d83e290f1 Thierry Escande 2019-03-07 1687 fl->init_mem = NULL;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1688 fastrpc_buf_free(imem);
b49f6d83e290f1 Thierry Escande 2019-03-07 1689 err_alloc:
b49f6d83e290f1 Thierry Escande 2019-03-07 1690 fastrpc_map_put(map);
b49f6d83e290f1 Thierry Escande 2019-03-07 1691 err:
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1692 kfree(args);
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1693
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1694 return err;
d73f71c7c6ee15 Srinivas Kandagatla 2019-02-08 1695 }
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki