2024-03-25 14:19:58

by Jens Wiklander

[permalink] [raw]
Subject: [PATCH v2] firmware: arm_ffa: support running as a guest in a vm

Add support for running the driver in a guest to a hypervisor. The main
difference is introducing notification pending interrupt and that
FFA_NOTIFICATION_BITMAP_CREATE doesn't need to be called.

The guest may need to use a notification pending interrupt instead of or
in addition to the schedule receiver interrupt.
FFA_FEAT_NOTIFICATION_PENDING_INT gives the interrupt the hypervisor has
chosen to notify its guest of pending notifications.

Signed-off-by: Jens Wiklander <[email protected]>
---
Hi,

I've tested this with Xen with as hypervisor (Xen patches to be posted) and
the NPI handler delivers notifications as expected using
FFA_NOTIFICATION_GET. I've also tested without a hypervisor to see that the
SRI handler still works as expected.

Thanks,
Jens

v1->v2:
* Adds a Notification Pending Interrupt handler to retrieve notifications
using for the interrupted CPU.
* The original Schedule Receiver Interrupt handler is initialized in parallel
depending on whether FFA_FEAT_SCHEDULE_RECEIVER_INT is available.

drivers/firmware/arm_ffa/driver.c | 117 +++++++++++++++++++++---------
1 file changed, 82 insertions(+), 35 deletions(-)

diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index f2556a8e9401..9732f0066c70 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -101,11 +101,12 @@ struct ffa_drv_info {
bool bitmap_created;
bool notif_enabled;
unsigned int sched_recv_irq;
+ unsigned int notif_pend_irq;
unsigned int cpuhp_state;
struct ffa_pcpu_irq __percpu *irq_pcpu;
struct workqueue_struct *notif_pcpu_wq;
struct work_struct notif_pcpu_work;
- struct work_struct irq_work;
+ struct work_struct sched_recv_irq_work;
struct xarray partition_info;
DECLARE_HASHTABLE(notifier_hash, ilog2(FFA_MAX_NOTIFICATIONS));
struct mutex notify_lock; /* lock to protect notifier hashtable */
@@ -1291,12 +1292,23 @@ static void ffa_partitions_cleanup(void)
#define FFA_FEAT_SCHEDULE_RECEIVER_INT (2)
#define FFA_FEAT_MANAGED_EXIT_INT (3)

-static irqreturn_t irq_handler(int irq, void *irq_data)
+static irqreturn_t sched_recv_irq_handler(int irq, void *irq_data)
{
struct ffa_pcpu_irq *pcpu = irq_data;
struct ffa_drv_info *info = pcpu->info;

- queue_work(info->notif_pcpu_wq, &info->irq_work);
+ queue_work(info->notif_pcpu_wq, &info->sched_recv_irq_work);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t notif_pend_irq_handler(int irq, void *irq_data)
+{
+ struct ffa_pcpu_irq *pcpu = irq_data;
+ struct ffa_drv_info *info = pcpu->info;
+
+ queue_work_on(smp_processor_id(), info->notif_pcpu_wq,
+ &info->notif_pcpu_work);

return IRQ_HANDLED;
}
@@ -1306,15 +1318,16 @@ static void ffa_sched_recv_irq_work_fn(struct work_struct *work)
ffa_notification_info_get();
}

-static int ffa_sched_recv_irq_map(void)
+static int ffa_irq_map(u32 id)
{
- int ret, irq, sr_intid;
+ int ret, irq, intid;

- /* The returned sr_intid is assumed to be SGI donated to NS world */
- ret = ffa_features(FFA_FEAT_SCHEDULE_RECEIVER_INT, 0, &sr_intid, NULL);
+ /* The returned intid is assumed to be SGI donated to NS world */
+ ret = ffa_features(id, 0, &intid, NULL);
if (ret < 0) {
+ pr_err("Failed to retrieve FF-A id %u interrupt\n", id);
if (ret != -EOPNOTSUPP)
- pr_err("Failed to retrieve scheduler Rx interrupt\n");
+ pr_err("Failed to retrieve FF-A id %u interrupt\n", id);
return ret;
}

@@ -1329,12 +1342,12 @@ static int ffa_sched_recv_irq_map(void)

oirq.np = gic;
oirq.args_count = 1;
- oirq.args[0] = sr_intid;
+ oirq.args[0] = intid;
irq = irq_create_of_mapping(&oirq);
of_node_put(gic);
#ifdef CONFIG_ACPI
} else {
- irq = acpi_register_gsi(NULL, sr_intid, ACPI_EDGE_SENSITIVE,
+ irq = acpi_register_gsi(NULL, intid, ACPI_EDGE_SENSITIVE,
ACPI_ACTIVE_HIGH);
#endif
}
@@ -1355,15 +1368,29 @@ static void ffa_sched_recv_irq_unmap(void)
}
}

+static void ffa_notif_pend_irq_unmap(void)
+{
+ if (drv_info->notif_pend_irq) {
+ irq_dispose_mapping(drv_info->notif_pend_irq);
+ drv_info->notif_pend_irq = 0;
+ }
+}
+
static int ffa_cpuhp_pcpu_irq_enable(unsigned int cpu)
{
- enable_percpu_irq(drv_info->sched_recv_irq, IRQ_TYPE_NONE);
+ if (drv_info->sched_recv_irq)
+ enable_percpu_irq(drv_info->sched_recv_irq, IRQ_TYPE_NONE);
+ if (drv_info->notif_pend_irq)
+ enable_percpu_irq(drv_info->notif_pend_irq, IRQ_TYPE_NONE);
return 0;
}

static int ffa_cpuhp_pcpu_irq_disable(unsigned int cpu)
{
- disable_percpu_irq(drv_info->sched_recv_irq);
+ if (drv_info->sched_recv_irq)
+ disable_percpu_irq(drv_info->sched_recv_irq);
+ if (drv_info->notif_pend_irq)
+ disable_percpu_irq(drv_info->notif_pend_irq);
return 0;
}

@@ -1382,13 +1409,16 @@ static void ffa_uninit_pcpu_irq(void)
if (drv_info->sched_recv_irq)
free_percpu_irq(drv_info->sched_recv_irq, drv_info->irq_pcpu);

+ if (drv_info->notif_pend_irq)
+ free_percpu_irq(drv_info->notif_pend_irq, drv_info->irq_pcpu);
+
if (drv_info->irq_pcpu) {
free_percpu(drv_info->irq_pcpu);
drv_info->irq_pcpu = NULL;
}
}

-static int ffa_init_pcpu_irq(unsigned int irq)
+static int ffa_init_pcpu_irq(void)
{
struct ffa_pcpu_irq __percpu *irq_pcpu;
int ret, cpu;
@@ -1402,13 +1432,29 @@ static int ffa_init_pcpu_irq(unsigned int irq)

drv_info->irq_pcpu = irq_pcpu;

- ret = request_percpu_irq(irq, irq_handler, "ARM-FFA", irq_pcpu);
- if (ret) {
- pr_err("Error registering notification IRQ %d: %d\n", irq, ret);
- return ret;
+ if (drv_info->sched_recv_irq) {
+ ret = request_percpu_irq(drv_info->sched_recv_irq,
+ sched_recv_irq_handler,
+ "ARM-FFA-SR", irq_pcpu);
+ if (ret) {
+ pr_err("Error registering schedule receiver nIRQ %d: %d\n",
+ drv_info->sched_recv_irq, ret);
+ return ret;
+ }
+ }
+
+ if (drv_info->notif_pend_irq) {
+ ret = request_percpu_irq(drv_info->notif_pend_irq,
+ notif_pend_irq_handler,
+ "ARM-FFA-NP", irq_pcpu);
+ if (ret) {
+ pr_err("Error registering notification pendig nIRQ %d: %d\n",
+ drv_info->notif_pend_irq, ret);
+ return ret;
+ }
}

- INIT_WORK(&drv_info->irq_work, ffa_sched_recv_irq_work_fn);
+ INIT_WORK(&drv_info->sched_recv_irq_work, ffa_sched_recv_irq_work_fn);
INIT_WORK(&drv_info->notif_pcpu_work, notif_pcpu_irq_work_fn);
drv_info->notif_pcpu_wq = create_workqueue("ffa_pcpu_irq_notification");
if (!drv_info->notif_pcpu_wq)
@@ -1429,6 +1475,7 @@ static void ffa_notifications_cleanup(void)
{
ffa_uninit_pcpu_irq();
ffa_sched_recv_irq_unmap();
+ ffa_notif_pend_irq_unmap();

if (drv_info->bitmap_created) {
ffa_notification_bitmap_destroy();
@@ -1439,30 +1486,30 @@ static void ffa_notifications_cleanup(void)

static void ffa_notifications_setup(void)
{
- int ret, irq;
+ int ret;

ret = ffa_features(FFA_NOTIFICATION_BITMAP_CREATE, 0, NULL, NULL);
- if (ret) {
- pr_info("Notifications not supported, continuing with it ..\n");
- return;
+ if (!ret) {
+ ret = ffa_notification_bitmap_create();
+ if (ret) {
+ pr_info("Notification bitmap create error %d\n", ret);
+ return;
+ }
+ drv_info->bitmap_created = true;
}

- ret = ffa_notification_bitmap_create();
- if (ret) {
- pr_info("Notification bitmap create error %d\n", ret);
- return;
- }
- drv_info->bitmap_created = true;
+ ret = ffa_irq_map(FFA_FEAT_SCHEDULE_RECEIVER_INT);
+ if (ret > 0)
+ drv_info->sched_recv_irq = ret;

- irq = ffa_sched_recv_irq_map();
- if (irq <= 0) {
- ret = irq;
- goto cleanup;
- }
+ ret = ffa_irq_map(FFA_FEAT_NOTIFICATION_PENDING_INT);
+ if (ret > 0)
+ drv_info->notif_pend_irq = ret;

- drv_info->sched_recv_irq = irq;
+ if (!drv_info->sched_recv_irq && !drv_info->notif_pend_irq)
+ goto cleanup;

- ret = ffa_init_pcpu_irq(irq);
+ ret = ffa_init_pcpu_irq();
if (ret)
goto cleanup;

--
2.34.1



2024-03-26 15:39:57

by Sudeep Holla

[permalink] [raw]
Subject: Re: [PATCH v2] firmware: arm_ffa: support running as a guest in a vm

On Mon, Mar 25, 2024 at 09:13:35AM +0100, Jens Wiklander wrote:
> Add support for running the driver in a guest to a hypervisor. The main
> difference is introducing notification pending interrupt and that
> FFA_NOTIFICATION_BITMAP_CREATE doesn't need to be called.
>
> The guest may need to use a notification pending interrupt instead of or
> in addition to the schedule receiver interrupt.

The above statement makes me worry a bit that we are still not on the same
page about NPI vs SRI. NPI need not exist in addition to SRI. And in v1
you did mention you have SRI in the guest as well. Then why do we need
NPI in addition to that. As part of SRI, the callback ffa_self_notif_handle
gets registered and will be called as part of SRI handling. What you
do in notif_pend_irq_handler(), exactly what ffa_self_notif_handle()
already does.

I am still struggling to understand the usecase here. If you just have
NPI and no SRI when running the driver in the VM, then it aligns with
my understanding of possible use-case(not the one you mentioned in v1:
where FF-A driver in VM will have SRI as OPTEE is the secondary scheduler)

If we are supporting NPI or SRI, I think we can see if we can further
simplify this change, but I want to get to an agreement with usage model
before we dig into implementation details in this patch.

--
Regards,
Sudeep

2024-03-27 09:23:57

by Jens Wiklander

[permalink] [raw]
Subject: Re: [PATCH v2] firmware: arm_ffa: support running as a guest in a vm

On Tue, Mar 26, 2024 at 4:35 PM Sudeep Holla <[email protected]> wrote:
>
> On Mon, Mar 25, 2024 at 09:13:35AM +0100, Jens Wiklander wrote:
> > Add support for running the driver in a guest to a hypervisor. The main
> > difference is introducing notification pending interrupt and that
> > FFA_NOTIFICATION_BITMAP_CREATE doesn't need to be called.
> >
> > The guest may need to use a notification pending interrupt instead of or
> > in addition to the schedule receiver interrupt.
>
> The above statement makes me worry a bit that we are still not on the same
> page about NPI vs SRI. NPI need not exist in addition to SRI. And in v1
> you did mention you have SRI in the guest as well. Then why do we need
> NPI in addition to that. As part of SRI, the callback ffa_self_notif_handle
> gets registered and will be called as part of SRI handling. What you
> do in notif_pend_irq_handler(), exactly what ffa_self_notif_handle()
> already does.

That's my understanding of what an NPI handler should do to be able to
receive per-vCPU notifications.

>
> I am still struggling to understand the usecase here. If you just have
> NPI and no SRI when running the driver in the VM, then it aligns with
> my understanding of possible use-case(not the one you mentioned in v1:
> where FF-A driver in VM will have SRI as OPTEE is the secondary scheduler)

OP-TEE is not a secondary scheduler. OP-TEE (the SP) is scheduled as
usual by the normal world using direct request. OP-TEE doesn't receive
FF-A notifications and I'm not sure it will ever be needed.

>
> If we are supporting NPI or SRI, I think we can see if we can further
> simplify this change, but I want to get to an agreement with usage model
> before we dig into implementation details in this patch.

The spec doesn't as far as I know explicitly make NPI and SRI mutually
exclusive, it doesn't make sense to use both in all configurations.
I'm trying to be as dynamic as possible when configuring the NPI and
SRI handlers.

If the kernel is a physical endpoint, it's easy, it only uses SRI and
the SPMC will not give an NPI when asked.

If the kernel is a virtual endpoint it might be more complicated since
a VM may need to act as a secondary scheduler. That's not fully
supported in this patch, since it can only schedule itself. SRI is not
used in my current configuration. If a hypervisor injects an SRI I
expect it to filter what's returned by FFA_NOTIFICATION_INFO_GET for
this VM so it doesn't interfere with notifications for other VMs.

In my current configuration, the hypervisor uses NPI to signal pending
notifications to the guest. I do not need a secondary scheduler since
OP-TEE doesn't receive notifications. At a later time, we may have SPs
that need to be scheduled, but that's not a problem I'm trying to
solve here.

Thanks,
Jens

2024-04-03 13:04:44

by Sudeep Holla

[permalink] [raw]
Subject: Re: [PATCH v2] firmware: arm_ffa: support running as a guest in a vm

On Wed, Mar 27, 2024 at 10:23:35AM +0100, Jens Wiklander wrote:
> On Tue, Mar 26, 2024 at 4:35 PM Sudeep Holla <[email protected]> wrote:
> >
> > On Mon, Mar 25, 2024 at 09:13:35AM +0100, Jens Wiklander wrote:
> > > Add support for running the driver in a guest to a hypervisor. The main
> > > difference is introducing notification pending interrupt and that
> > > FFA_NOTIFICATION_BITMAP_CREATE doesn't need to be called.
> > >
> > > The guest may need to use a notification pending interrupt instead of or
> > > in addition to the schedule receiver interrupt.
> >
> > The above statement makes me worry a bit that we are still not on the same
> > page about NPI vs SRI. NPI need not exist in addition to SRI. And in v1
> > you did mention you have SRI in the guest as well. Then why do we need
> > NPI in addition to that. As part of SRI, the callback ffa_self_notif_handle
> > gets registered and will be called as part of SRI handling. What you
> > do in notif_pend_irq_handler(), exactly what ffa_self_notif_handle()
> > already does.
>
> That's my understanding of what an NPI handler should do to be able to
> receive per-vCPU notifications.
>
> >
> > I am still struggling to understand the usecase here. If you just have
> > NPI and no SRI when running the driver in the VM, then it aligns with
> > my understanding of possible use-case(not the one you mentioned in v1:
> > where FF-A driver in VM will have SRI as OPTEE is the secondary scheduler)
>
> OP-TEE is not a secondary scheduler. OP-TEE (the SP) is scheduled as
> usual by the normal world using direct request. OP-TEE doesn't receive
> FF-A notifications and I'm not sure it will ever be needed.
>

Sorry for my poor choice of words yet again. I meant VM kernel(running
as NS virtual endpoint) with OPTEE driver running in it as secondary
scheduler. IIUC, there will be another instance of OPTEE driver in the
primary scheduler endpoint(i.e. host kernel) and it will take care of
running SRI handler ?

> >
> > If we are supporting NPI or SRI, I think we can see if we can further
> > simplify this change, but I want to get to an agreement with usage model
> > before we dig into implementation details in this patch.
>
> The spec doesn't as far as I know explicitly make NPI and SRI mutually
> exclusive, it doesn't make sense to use both in all configurations.
> I'm trying to be as dynamic as possible when configuring the NPI and
> SRI handlers.
>

Fair enough

> If the kernel is a physical endpoint, it's easy, it only uses SRI and
> the SPMC will not give an NPI when asked.
>

Agreed.

> If the kernel is a virtual endpoint it might be more complicated since
> a VM may need to act as a secondary scheduler. That's not fully
> supported in this patch, since it can only schedule itself. SRI is not
> used in my current configuration. If a hypervisor injects an SRI I
> expect it to filter what's returned by FFA_NOTIFICATION_INFO_GET for
> this VM so it doesn't interfere with notifications for other VMs.
>

OK

> In my current configuration, the hypervisor uses NPI to signal pending
> notifications to the guest. I do not need a secondary scheduler since
> OP-TEE doesn't receive notifications. At a later time, we may have SPs
> that need to be scheduled, but that's not a problem I'm trying to
> solve here.

Understood. I will take a look at the patch with the above information.

--
Regards,
Sudeep

2024-04-03 14:29:04

by Jens Wiklander

[permalink] [raw]
Subject: Re: [PATCH v2] firmware: arm_ffa: support running as a guest in a vm

On Wed, Apr 3, 2024 at 3:03 PM Sudeep Holla <[email protected]> wrote:
>
> On Wed, Mar 27, 2024 at 10:23:35AM +0100, Jens Wiklander wrote:
> > On Tue, Mar 26, 2024 at 4:35 PM Sudeep Holla <[email protected]> wrote:
> > >
> > > On Mon, Mar 25, 2024 at 09:13:35AM +0100, Jens Wiklander wrote:
> > > > Add support for running the driver in a guest to a hypervisor. The main
> > > > difference is introducing notification pending interrupt and that
> > > > FFA_NOTIFICATION_BITMAP_CREATE doesn't need to be called.
> > > >
> > > > The guest may need to use a notification pending interrupt instead of or
> > > > in addition to the schedule receiver interrupt.
> > >
> > > The above statement makes me worry a bit that we are still not on the same
> > > page about NPI vs SRI. NPI need not exist in addition to SRI. And in v1
> > > you did mention you have SRI in the guest as well. Then why do we need
> > > NPI in addition to that. As part of SRI, the callback ffa_self_notif_handle
> > > gets registered and will be called as part of SRI handling. What you
> > > do in notif_pend_irq_handler(), exactly what ffa_self_notif_handle()
> > > already does.
> >
> > That's my understanding of what an NPI handler should do to be able to
> > receive per-vCPU notifications.
> >
> > >
> > > I am still struggling to understand the usecase here. If you just have
> > > NPI and no SRI when running the driver in the VM, then it aligns with
> > > my understanding of possible use-case(not the one you mentioned in v1:
> > > where FF-A driver in VM will have SRI as OPTEE is the secondary scheduler)
> >
> > OP-TEE is not a secondary scheduler. OP-TEE (the SP) is scheduled as
> > usual by the normal world using direct request. OP-TEE doesn't receive
> > FF-A notifications and I'm not sure it will ever be needed.
> >
>
> Sorry for my poor choice of words yet again. I meant VM kernel(running
> as NS virtual endpoint) with OPTEE driver running in it as secondary
> scheduler. IIUC, there will be another instance of OPTEE driver in the
> primary scheduler endpoint(i.e. host kernel) and it will take care of
> running SRI handler ?

Yes, that's what we have in the Xen configuration, except that we
don't use an OP-TEE driver, it's only generic FF-A processing.
The SRI in this case is a physical interrupt, raised by the SPMC.

>
> > >
> > > If we are supporting NPI or SRI, I think we can see if we can further
> > > simplify this change, but I want to get to an agreement with usage model
> > > before we dig into implementation details in this patch.
> >
> > The spec doesn't as far as I know explicitly make NPI and SRI mutually
> > exclusive, it doesn't make sense to use both in all configurations.
> > I'm trying to be as dynamic as possible when configuring the NPI and
> > SRI handlers.
> >
>
> Fair enough
>
> > If the kernel is a physical endpoint, it's easy, it only uses SRI and
> > the SPMC will not give an NPI when asked.
> >
>
> Agreed.
>
> > If the kernel is a virtual endpoint it might be more complicated since
> > a VM may need to act as a secondary scheduler. That's not fully
> > supported in this patch, since it can only schedule itself. SRI is not
> > used in my current configuration. If a hypervisor injects an SRI I
> > expect it to filter what's returned by FFA_NOTIFICATION_INFO_GET for
> > this VM so it doesn't interfere with notifications for other VMs.
> >
>
> OK
>
> > In my current configuration, the hypervisor uses NPI to signal pending
> > notifications to the guest. I do not need a secondary scheduler since
> > OP-TEE doesn't receive notifications. At a later time, we may have SPs
> > that need to be scheduled, but that's not a problem I'm trying to
> > solve here.
>
> Understood. I will take a look at the patch with the above information.

Thanks,
Jens

>
> --
> Regards,
> Sudeep

2024-04-10 11:45:51

by Sudeep Holla

[permalink] [raw]
Subject: Re: [PATCH v2] firmware: arm_ffa: support running as a guest in a vm

On Wed, Apr 03, 2024 at 04:27:43PM +0200, Jens Wiklander wrote:
> On Wed, Apr 3, 2024 at 3:03 PM Sudeep Holla <[email protected]> wrote:
> > Understood. I will take a look at the patch with the above information.
>

I reworked this a bit with the idea SRI and NPI will not be required at
the same time. Posted the series [1]. It would be good if you provide
feedback on the same and check if it works fine under your setup.

--
Regards,
Sudeep

[1] https://lore.kernel.org/all/[email protected]