2023-02-13 09:58:47

by Etienne Carriere

[permalink] [raw]
Subject: [PATCH v4 1/2] dt-bindings: optee driver interrupt can be a per-cpu interrupt

Explicit in optee firmware device tree bindings that the interrupt
used by optee driver for async notification can be a peripheral
interrupt or a per-cpu interrupt.

Signed-off-by: Etienne Carriere <[email protected]>
---
Changes since v3:
- Patch added in this v4 to address review comments.
---
.../devicetree/bindings/arm/firmware/linaro,optee-tz.yaml | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/Documentation/devicetree/bindings/arm/firmware/linaro,optee-tz.yaml b/Documentation/devicetree/bindings/arm/firmware/linaro,optee-tz.yaml
index d4dc0749f9fd..5d033570b57b 100644
--- a/Documentation/devicetree/bindings/arm/firmware/linaro,optee-tz.yaml
+++ b/Documentation/devicetree/bindings/arm/firmware/linaro,optee-tz.yaml
@@ -28,7 +28,8 @@ properties:
maxItems: 1
description: |
This interrupt which is used to signal an event by the secure world
- software is expected to be edge-triggered.
+ software is expected to be either a per-cpu interrupt or an
+ edge-triggered peripheral interrupt.

method:
enum: [smc, hvc]
--
2.25.1



2023-02-13 09:58:51

by Etienne Carriere

[permalink] [raw]
Subject: [PATCH v4 2/2] optee: add per cpu asynchronous notification

Implements use of per-cpu irq for optee asynchronous notification.

Existing optee async notif implementation allows OP-TEE world to
raise an interrupt for which Linux optee driver will query pending
events bound to waiting tasks in Linux world or threaded bottom half
tasks to be invoked in TEE world. This change allows the signaling
interrupt to be a per-cpu interrupt as with Arm GIC PPIs.

Using a PPI instead of an SPI is useful when no GIC lines are provisioned
in the chip design for OP-TEE async notifications. Instead of using an
unused GIC SPI for a specific platform, optee can use a common GIC PPI
across platforms.

Cc: Jens Wiklander <[email protected]>
Cc: Sumit Garg <[email protected]>
Cc: Marc Zyngier <[email protected]>

Co-developed-by: Alexandre Torgue <[email protected]>
Signed-off-by: Alexandre Torgue <[email protected]>
Signed-off-by: Etienne Carriere <[email protected]>
---
Changes since v3:
- Fixed typo in commit message.
- Added few words in commit message about why we do this change.
- Appended a 2nd commit to the series for request DT bindings update.

Changes since v2:
- Irq and per-cpu irq no more share the primary same handler function
but have a common irq_handler() helper function.
- Removed useless spinlocks.
- Wrapped lines > 80 char.

Changes since v1:
- Fixed missing __percpu attribute reported by kernel test robot.
- Rephrased commit message and added Cc tags.
---
drivers/tee/optee/optee_private.h | 22 +++++++
drivers/tee/optee/smc_abi.c | 97 +++++++++++++++++++++++++++++--
2 files changed, 115 insertions(+), 4 deletions(-)

diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
index 04ae58892608..e5bd3548691f 100644
--- a/drivers/tee/optee/optee_private.h
+++ b/drivers/tee/optee/optee_private.h
@@ -94,11 +94,33 @@ struct optee_supp {
struct completion reqs_c;
};

+/*
+ * struct optee_pcpu - per cpu notif private struct passed to work functions
+ * @optee optee device reference
+ */
+struct optee_pcpu {
+ struct optee *optee;
+};
+
+/*
+ * struct optee_smc - optee smc communication struct
+ * @invoke_fn handler function to invoke secure monitor
+ * @memremaped_shm virtual address of memory in shared memory pool
+ * @sec_caps: secure world capabilities defined by
+ * OPTEE_SMC_SEC_CAP_* in optee_smc.h
+ * @notif_irq interrupt used as async notification by OP-TEE or 0
+ * @optee_pcpu per_cpu optee instance for per cpu work or NULL
+ * @notif_pcpu_wq workqueue for per cpu aynchronous notification or NULL
+ * @notif_pcpu_work work for per cpu asynchronous notification
+ */
struct optee_smc {
optee_invoke_fn *invoke_fn;
void *memremaped_shm;
u32 sec_caps;
unsigned int notif_irq;
+ struct optee_pcpu __percpu *optee_pcpu;
+ struct workqueue_struct *notif_pcpu_wq;
+ struct work_struct notif_pcpu_work;
};

/**
diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c
index a1c1fa1a9c28..eff35f66399e 100644
--- a/drivers/tee/optee/smc_abi.c
+++ b/drivers/tee/optee/smc_abi.c
@@ -991,9 +991,8 @@ static u32 get_async_notif_value(optee_invoke_fn *invoke_fn, bool *value_valid,
return res.a1;
}

-static irqreturn_t notif_irq_handler(int irq, void *dev_id)
+static irqreturn_t irq_handler(struct optee *optee)
{
- struct optee *optee = dev_id;
bool do_bottom_half = false;
bool value_valid;
bool value_pending;
@@ -1016,6 +1015,11 @@ static irqreturn_t notif_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}

+static irqreturn_t notif_irq_handler(int irq, void *dev_id)
+{
+ return irq_handler((struct optee *)dev_id);
+}
+
static irqreturn_t notif_irq_thread_fn(int irq, void *dev_id)
{
struct optee *optee = dev_id;
@@ -1025,7 +1029,7 @@ static irqreturn_t notif_irq_thread_fn(int irq, void *dev_id)
return IRQ_HANDLED;
}

-static int optee_smc_notif_init_irq(struct optee *optee, u_int irq)
+static int init_irq(struct optee *optee, u_int irq)
{
int rc;

@@ -1040,12 +1044,97 @@ static int optee_smc_notif_init_irq(struct optee *optee, u_int irq)
return 0;
}

+static irqreturn_t notif_pcpu_irq_handler(int irq, void *dev_id)
+{
+ struct optee_pcpu __percpu *pcpu = (struct optee_pcpu *)dev_id;
+ struct optee *optee = pcpu->optee;
+
+ if (irq_handler(optee) == IRQ_WAKE_THREAD)
+ queue_work(optee->smc.notif_pcpu_wq,
+ &optee->smc.notif_pcpu_work);
+
+ return IRQ_HANDLED;
+}
+
+static void notif_pcpu_irq_work_fn(struct work_struct *work)
+{
+ struct optee_smc *optee_smc = container_of(work, struct optee_smc,
+ notif_pcpu_work);
+ struct optee *optee = container_of(optee_smc, struct optee, smc);
+
+ optee_smc_do_bottom_half(optee->ctx);
+}
+
+static int init_pcpu_irq(struct optee *optee, u_int irq)
+{
+ struct optee_pcpu __percpu *optee_pcpu;
+ int cpu;
+ int rc;
+
+ optee_pcpu = alloc_percpu(struct optee_pcpu);
+ if (!optee_pcpu)
+ return -ENOMEM;
+
+ for_each_present_cpu(cpu) {
+ struct optee_pcpu __percpu *p = per_cpu_ptr(optee_pcpu, cpu);
+
+ p->optee = optee;
+ }
+
+ rc = request_percpu_irq(irq, notif_pcpu_irq_handler,
+ "optee_pcpu_notification", optee_pcpu);
+ if (rc)
+ goto err_free_pcpu;
+
+ enable_percpu_irq(irq, 0);
+
+ INIT_WORK(&optee->smc.notif_pcpu_work, notif_pcpu_irq_work_fn);
+ optee->smc.notif_pcpu_wq = create_workqueue("optee_pcpu_notification");
+ if (!optee->smc.notif_pcpu_wq) {
+ rc = -EINVAL;
+ goto err_free_pcpu_irq;
+ }
+
+ optee->smc.optee_pcpu = optee_pcpu;
+ optee->smc.notif_irq = irq;
+
+ return 0;
+
+err_free_pcpu_irq:
+ disable_percpu_irq(irq);
+ free_percpu_irq(irq, optee_pcpu);
+err_free_pcpu:
+ free_percpu(optee_pcpu);
+
+ return rc;
+}
+
+static int optee_smc_notif_init_irq(struct optee *optee, u_int irq)
+{
+ if (irq_is_percpu_devid(irq))
+ return init_pcpu_irq(optee, irq);
+ else
+ return init_irq(optee, irq);
+}
+
+static void uninit_pcpu_irq(struct optee *optee)
+{
+ disable_percpu_irq(optee->smc.notif_irq);
+
+ free_percpu_irq(optee->smc.notif_irq, optee->smc.optee_pcpu);
+ free_percpu(optee->smc.optee_pcpu);
+}
+
static void optee_smc_notif_uninit_irq(struct optee *optee)
{
if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) {
optee_smc_stop_async_notif(optee->ctx);
if (optee->smc.notif_irq) {
- free_irq(optee->smc.notif_irq, optee);
+ if (irq_is_percpu_devid(optee->smc.notif_irq))
+ uninit_pcpu_irq(optee);
+ else
+ free_irq(optee->smc.notif_irq, optee);
+
irq_dispose_mapping(optee->smc.notif_irq);
}
}
--
2.25.1


2023-02-14 11:12:17

by Sumit Garg

[permalink] [raw]
Subject: Re: [PATCH v4 2/2] optee: add per cpu asynchronous notification

On Mon, 13 Feb 2023 at 15:28, Etienne Carriere
<[email protected]> wrote:
>
> Implements use of per-cpu irq for optee asynchronous notification.
>
> Existing optee async notif implementation allows OP-TEE world to
> raise an interrupt for which Linux optee driver will query pending
> events bound to waiting tasks in Linux world or threaded bottom half
> tasks to be invoked in TEE world. This change allows the signaling
> interrupt to be a per-cpu interrupt as with Arm GIC PPIs.
>
> Using a PPI instead of an SPI is useful when no GIC lines are provisioned
> in the chip design for OP-TEE async notifications. Instead of using an
> unused GIC SPI for a specific platform, optee can use a common GIC PPI
> across platforms.
>
> Cc: Jens Wiklander <[email protected]>
> Cc: Sumit Garg <[email protected]>
> Cc: Marc Zyngier <[email protected]>
>
> Co-developed-by: Alexandre Torgue <[email protected]>
> Signed-off-by: Alexandre Torgue <[email protected]>
> Signed-off-by: Etienne Carriere <[email protected]>
> ---
> Changes since v3:
> - Fixed typo in commit message.
> - Added few words in commit message about why we do this change.
> - Appended a 2nd commit to the series for request DT bindings update.
>
> Changes since v2:
> - Irq and per-cpu irq no more share the primary same handler function
> but have a common irq_handler() helper function.
> - Removed useless spinlocks.
> - Wrapped lines > 80 char.
>
> Changes since v1:
> - Fixed missing __percpu attribute reported by kernel test robot.
> - Rephrased commit message and added Cc tags.
> ---
> drivers/tee/optee/optee_private.h | 22 +++++++
> drivers/tee/optee/smc_abi.c | 97 +++++++++++++++++++++++++++++--
> 2 files changed, 115 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
> index 04ae58892608..e5bd3548691f 100644
> --- a/drivers/tee/optee/optee_private.h
> +++ b/drivers/tee/optee/optee_private.h
> @@ -94,11 +94,33 @@ struct optee_supp {
> struct completion reqs_c;
> };
>
> +/*
> + * struct optee_pcpu - per cpu notif private struct passed to work functions
> + * @optee optee device reference
> + */
> +struct optee_pcpu {
> + struct optee *optee;
> +};
> +
> +/*
> + * struct optee_smc - optee smc communication struct
> + * @invoke_fn handler function to invoke secure monitor
> + * @memremaped_shm virtual address of memory in shared memory pool
> + * @sec_caps: secure world capabilities defined by
> + * OPTEE_SMC_SEC_CAP_* in optee_smc.h
> + * @notif_irq interrupt used as async notification by OP-TEE or 0
> + * @optee_pcpu per_cpu optee instance for per cpu work or NULL
> + * @notif_pcpu_wq workqueue for per cpu aynchronous notification or NULL
> + * @notif_pcpu_work work for per cpu asynchronous notification
> + */
> struct optee_smc {
> optee_invoke_fn *invoke_fn;
> void *memremaped_shm;
> u32 sec_caps;
> unsigned int notif_irq;
> + struct optee_pcpu __percpu *optee_pcpu;
> + struct workqueue_struct *notif_pcpu_wq;
> + struct work_struct notif_pcpu_work;
> };
>
> /**
> diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c
> index a1c1fa1a9c28..eff35f66399e 100644
> --- a/drivers/tee/optee/smc_abi.c
> +++ b/drivers/tee/optee/smc_abi.c
> @@ -991,9 +991,8 @@ static u32 get_async_notif_value(optee_invoke_fn *invoke_fn, bool *value_valid,
> return res.a1;
> }
>
> -static irqreturn_t notif_irq_handler(int irq, void *dev_id)
> +static irqreturn_t irq_handler(struct optee *optee)
> {
> - struct optee *optee = dev_id;
> bool do_bottom_half = false;
> bool value_valid;
> bool value_pending;
> @@ -1016,6 +1015,11 @@ static irqreturn_t notif_irq_handler(int irq, void *dev_id)
> return IRQ_HANDLED;
> }
>
> +static irqreturn_t notif_irq_handler(int irq, void *dev_id)
> +{
> + return irq_handler((struct optee *)dev_id);
> +}
> +
> static irqreturn_t notif_irq_thread_fn(int irq, void *dev_id)
> {
> struct optee *optee = dev_id;
> @@ -1025,7 +1029,7 @@ static irqreturn_t notif_irq_thread_fn(int irq, void *dev_id)
> return IRQ_HANDLED;
> }
>
> -static int optee_smc_notif_init_irq(struct optee *optee, u_int irq)
> +static int init_irq(struct optee *optee, u_int irq)
> {
> int rc;
>
> @@ -1040,12 +1044,97 @@ static int optee_smc_notif_init_irq(struct optee *optee, u_int irq)
> return 0;
> }
>
> +static irqreturn_t notif_pcpu_irq_handler(int irq, void *dev_id)
> +{
> + struct optee_pcpu __percpu *pcpu = (struct optee_pcpu *)dev_id;
> + struct optee *optee = pcpu->optee;
> +
> + if (irq_handler(optee) == IRQ_WAKE_THREAD)
> + queue_work(optee->smc.notif_pcpu_wq,
> + &optee->smc.notif_pcpu_work);
> +
> + return IRQ_HANDLED;
> +}
> +
> +static void notif_pcpu_irq_work_fn(struct work_struct *work)
> +{
> + struct optee_smc *optee_smc = container_of(work, struct optee_smc,
> + notif_pcpu_work);
> + struct optee *optee = container_of(optee_smc, struct optee, smc);
> +
> + optee_smc_do_bottom_half(optee->ctx);
> +}
> +
> +static int init_pcpu_irq(struct optee *optee, u_int irq)
> +{
> + struct optee_pcpu __percpu *optee_pcpu;
> + int cpu;
> + int rc;
> +
> + optee_pcpu = alloc_percpu(struct optee_pcpu);
> + if (!optee_pcpu)
> + return -ENOMEM;
> +
> + for_each_present_cpu(cpu) {
> + struct optee_pcpu __percpu *p = per_cpu_ptr(optee_pcpu, cpu);
> +
> + p->optee = optee;
> + }
> +
> + rc = request_percpu_irq(irq, notif_pcpu_irq_handler,
> + "optee_pcpu_notification", optee_pcpu);
> + if (rc)
> + goto err_free_pcpu;
> +
> + enable_percpu_irq(irq, 0);

AFAICS, this percpu irq is only enabled for CPU which is doing OP-TEE
driver probe. How would it be enabled for other CPUs? Hot plugged
CPUs?

> +
> + INIT_WORK(&optee->smc.notif_pcpu_work, notif_pcpu_irq_work_fn);
> + optee->smc.notif_pcpu_wq = create_workqueue("optee_pcpu_notification");
> + if (!optee->smc.notif_pcpu_wq) {
> + rc = -EINVAL;
> + goto err_free_pcpu_irq;
> + }
> +
> + optee->smc.optee_pcpu = optee_pcpu;
> + optee->smc.notif_irq = irq;
> +
> + return 0;
> +
> +err_free_pcpu_irq:
> + disable_percpu_irq(irq);
> + free_percpu_irq(irq, optee_pcpu);
> +err_free_pcpu:
> + free_percpu(optee_pcpu);
> +
> + return rc;
> +}
> +
> +static int optee_smc_notif_init_irq(struct optee *optee, u_int irq)
> +{
> + if (irq_is_percpu_devid(irq))
> + return init_pcpu_irq(optee, irq);
> + else
> + return init_irq(optee, irq);
> +}
> +
> +static void uninit_pcpu_irq(struct optee *optee)
> +{
> + disable_percpu_irq(optee->smc.notif_irq);

OP-TEE remove may be called on a different CPU than the one which did
the OP-TEE probe. So we need to disable percpu irq for every CPU which
I am not sure can be done in a clean manner here. AFAICS,
cpuhp_setup_state() and friends are the commonly used APIs to
enable/disable percpu irq.

-Sumit

> +
> + free_percpu_irq(optee->smc.notif_irq, optee->smc.optee_pcpu);
> + free_percpu(optee->smc.optee_pcpu);
> +}
> +
> static void optee_smc_notif_uninit_irq(struct optee *optee)
> {
> if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) {
> optee_smc_stop_async_notif(optee->ctx);
> if (optee->smc.notif_irq) {
> - free_irq(optee->smc.notif_irq, optee);
> + if (irq_is_percpu_devid(optee->smc.notif_irq))
> + uninit_pcpu_irq(optee);
> + else
> + free_irq(optee->smc.notif_irq, optee);
> +
> irq_dispose_mapping(optee->smc.notif_irq);
> }
> }
> --
> 2.25.1
>

2023-02-15 12:42:01

by Etienne Carriere

[permalink] [raw]
Subject: Re: [PATCH v4 2/2] optee: add per cpu asynchronous notification

Hello Sumit,

On Tue, 14 Feb 2023 at 12:11, Sumit Garg <[email protected]> wrote:
>
> On Mon, 13 Feb 2023 at 15:28, Etienne Carriere
> <[email protected]> wrote:
> >
> > Implements use of per-cpu irq for optee asynchronous notification.
> >
> > Existing optee async notif implementation allows OP-TEE world to
> > raise an interrupt for which Linux optee driver will query pending
> > events bound to waiting tasks in Linux world or threaded bottom half
> > tasks to be invoked in TEE world. This change allows the signaling
> > interrupt to be a per-cpu interrupt as with Arm GIC PPIs.
> >
> > Using a PPI instead of an SPI is useful when no GIC lines are provisioned
> > in the chip design for OP-TEE async notifications. Instead of using an
> > unused GIC SPI for a specific platform, optee can use a common GIC PPI
> > across platforms.
> >
> > Cc: Jens Wiklander <[email protected]>
> > Cc: Sumit Garg <[email protected]>
> > Cc: Marc Zyngier <[email protected]>
> >
> > Co-developed-by: Alexandre Torgue <[email protected]>
> > Signed-off-by: Alexandre Torgue <[email protected]>
> > Signed-off-by: Etienne Carriere <[email protected]>
> > ---
> > Changes since v3:
> > - Fixed typo in commit message.
> > - Added few words in commit message about why we do this change.
> > - Appended a 2nd commit to the series for request DT bindings update.
> >
> > Changes since v2:
> > - Irq and per-cpu irq no more share the primary same handler function
> > but have a common irq_handler() helper function.
> > - Removed useless spinlocks.
> > - Wrapped lines > 80 char.
> >
> > Changes since v1:
> > - Fixed missing __percpu attribute reported by kernel test robot.
> > - Rephrased commit message and added Cc tags.
> > ---
> > drivers/tee/optee/optee_private.h | 22 +++++++
> > drivers/tee/optee/smc_abi.c | 97 +++++++++++++++++++++++++++++--
> > 2 files changed, 115 insertions(+), 4 deletions(-)
> >
> > diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
> > index 04ae58892608..e5bd3548691f 100644
> > --- a/drivers/tee/optee/optee_private.h
> > +++ b/drivers/tee/optee/optee_private.h
> > @@ -94,11 +94,33 @@ struct optee_supp {
> > struct completion reqs_c;
> > };
> >
> > +/*
> > + * struct optee_pcpu - per cpu notif private struct passed to work functions
> > + * @optee optee device reference
> > + */
> > +struct optee_pcpu {
> > + struct optee *optee;
> > +};
> > +
> > +/*
> > + * struct optee_smc - optee smc communication struct
> > + * @invoke_fn handler function to invoke secure monitor
> > + * @memremaped_shm virtual address of memory in shared memory pool
> > + * @sec_caps: secure world capabilities defined by
> > + * OPTEE_SMC_SEC_CAP_* in optee_smc.h
> > + * @notif_irq interrupt used as async notification by OP-TEE or 0
> > + * @optee_pcpu per_cpu optee instance for per cpu work or NULL
> > + * @notif_pcpu_wq workqueue for per cpu aynchronous notification or NULL
> > + * @notif_pcpu_work work for per cpu asynchronous notification
> > + */
> > struct optee_smc {
> > optee_invoke_fn *invoke_fn;
> > void *memremaped_shm;
> > u32 sec_caps;
> > unsigned int notif_irq;
> > + struct optee_pcpu __percpu *optee_pcpu;
> > + struct workqueue_struct *notif_pcpu_wq;
> > + struct work_struct notif_pcpu_work;
> > };
> >
> > /**
> > diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c
> > index a1c1fa1a9c28..eff35f66399e 100644
> > --- a/drivers/tee/optee/smc_abi.c
> > +++ b/drivers/tee/optee/smc_abi.c
> > @@ -991,9 +991,8 @@ static u32 get_async_notif_value(optee_invoke_fn *invoke_fn, bool *value_valid,
> > return res.a1;
> > }
> >
> > -static irqreturn_t notif_irq_handler(int irq, void *dev_id)
> > +static irqreturn_t irq_handler(struct optee *optee)
> > {
> > - struct optee *optee = dev_id;
> > bool do_bottom_half = false;
> > bool value_valid;
> > bool value_pending;
> > @@ -1016,6 +1015,11 @@ static irqreturn_t notif_irq_handler(int irq, void *dev_id)
> > return IRQ_HANDLED;
> > }
> >
> > +static irqreturn_t notif_irq_handler(int irq, void *dev_id)
> > +{
> > + return irq_handler((struct optee *)dev_id);
> > +}
> > +
> > static irqreturn_t notif_irq_thread_fn(int irq, void *dev_id)
> > {
> > struct optee *optee = dev_id;
> > @@ -1025,7 +1029,7 @@ static irqreturn_t notif_irq_thread_fn(int irq, void *dev_id)
> > return IRQ_HANDLED;
> > }
> >
> > -static int optee_smc_notif_init_irq(struct optee *optee, u_int irq)
> > +static int init_irq(struct optee *optee, u_int irq)
> > {
> > int rc;
> >
> > @@ -1040,12 +1044,97 @@ static int optee_smc_notif_init_irq(struct optee *optee, u_int irq)
> > return 0;
> > }
> >
> > +static irqreturn_t notif_pcpu_irq_handler(int irq, void *dev_id)
> > +{
> > + struct optee_pcpu __percpu *pcpu = (struct optee_pcpu *)dev_id;
> > + struct optee *optee = pcpu->optee;
> > +
> > + if (irq_handler(optee) == IRQ_WAKE_THREAD)
> > + queue_work(optee->smc.notif_pcpu_wq,
> > + &optee->smc.notif_pcpu_work);
> > +
> > + return IRQ_HANDLED;
> > +}
> > +
> > +static void notif_pcpu_irq_work_fn(struct work_struct *work)
> > +{
> > + struct optee_smc *optee_smc = container_of(work, struct optee_smc,
> > + notif_pcpu_work);
> > + struct optee *optee = container_of(optee_smc, struct optee, smc);
> > +
> > + optee_smc_do_bottom_half(optee->ctx);
> > +}
> > +
> > +static int init_pcpu_irq(struct optee *optee, u_int irq)
> > +{
> > + struct optee_pcpu __percpu *optee_pcpu;
> > + int cpu;
> > + int rc;
> > +
> > + optee_pcpu = alloc_percpu(struct optee_pcpu);
> > + if (!optee_pcpu)
> > + return -ENOMEM;
> > +
> > + for_each_present_cpu(cpu) {
> > + struct optee_pcpu __percpu *p = per_cpu_ptr(optee_pcpu, cpu);
> > +
> > + p->optee = optee;
> > + }
> > +
> > + rc = request_percpu_irq(irq, notif_pcpu_irq_handler,
> > + "optee_pcpu_notification", optee_pcpu);
> > + if (rc)
> > + goto err_free_pcpu;
> > +
> > + enable_percpu_irq(irq, 0);
>
> AFAICS, this percpu irq is only enabled for CPU which is doing OP-TEE
> driver probe. How would it be enabled for other CPUs? Hot plugged
> CPUs?
>
> > +
> > + INIT_WORK(&optee->smc.notif_pcpu_work, notif_pcpu_irq_work_fn);
> > + optee->smc.notif_pcpu_wq = create_workqueue("optee_pcpu_notification");
> > + if (!optee->smc.notif_pcpu_wq) {
> > + rc = -EINVAL;
> > + goto err_free_pcpu_irq;
> > + }
> > +
> > + optee->smc.optee_pcpu = optee_pcpu;
> > + optee->smc.notif_irq = irq;
> > +
> > + return 0;
> > +
> > +err_free_pcpu_irq:
> > + disable_percpu_irq(irq);
> > + free_percpu_irq(irq, optee_pcpu);
> > +err_free_pcpu:
> > + free_percpu(optee_pcpu);
> > +
> > + return rc;
> > +}
> > +
> > +static int optee_smc_notif_init_irq(struct optee *optee, u_int irq)
> > +{
> > + if (irq_is_percpu_devid(irq))
> > + return init_pcpu_irq(optee, irq);
> > + else
> > + return init_irq(optee, irq);
> > +}
> > +
> > +static void uninit_pcpu_irq(struct optee *optee)
> > +{
> > + disable_percpu_irq(optee->smc.notif_irq);
>
> OP-TEE remove may be called on a different CPU than the one which did
> the OP-TEE probe. So we need to disable percpu irq for every CPU which
> I am not sure can be done in a clean manner here. AFAICS,
> cpuhp_setup_state() and friends are the commonly used APIs to
> enable/disable percpu irq.

I see your point, thanks.
Using cpuhp_setup_state() will mandate the optee driver to be
built-in, not loadable, as there is no way to unregister hpcpu
callbacks. A config switch could ensure this.

BR,
Etienne


>
> -Sumit
>
> > +
> > + free_percpu_irq(optee->smc.notif_irq, optee->smc.optee_pcpu);
> > + free_percpu(optee->smc.optee_pcpu);
> > +}
> > +
> > static void optee_smc_notif_uninit_irq(struct optee *optee)
> > {
> > if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) {
> > optee_smc_stop_async_notif(optee->ctx);
> > if (optee->smc.notif_irq) {
> > - free_irq(optee->smc.notif_irq, optee);
> > + if (irq_is_percpu_devid(optee->smc.notif_irq))
> > + uninit_pcpu_irq(optee);
> > + else
> > + free_irq(optee->smc.notif_irq, optee);
> > +
> > irq_dispose_mapping(optee->smc.notif_irq);
> > }
> > }
> > --
> > 2.25.1
> >