2023-01-20 23:29:58

by Elliot Berman

[permalink] [raw]
Subject: [PATCH v9 22/27] virt: gunyah: Add resource tickets

Some VM functions need to acquire Gunyah resources. For instance, Gunyah
vCPUs are exposed to the host as a resource. The Gunyah vCPU function
will register a resource ticket and be able to interact with the
hypervisor once the resource ticket is filled.

Signed-off-by: Elliot Berman <[email protected]>
---
drivers/virt/gunyah/vm_mgr.c | 105 +++++++++++++++++++++++++++++++++-
drivers/virt/gunyah/vm_mgr.h | 5 ++
include/linux/gunyah.h | 4 ++
include/linux/gunyah_vm_mgr.h | 14 +++++
4 files changed, 127 insertions(+), 1 deletion(-)

diff --git a/drivers/virt/gunyah/vm_mgr.c b/drivers/virt/gunyah/vm_mgr.c
index 1e795f3d19d5..ee593cf1b074 100644
--- a/drivers/virt/gunyah/vm_mgr.c
+++ b/drivers/virt/gunyah/vm_mgr.c
@@ -135,6 +135,56 @@ static long gh_vm_rm_function(struct gunyah_vm *ghvm, struct gh_vm_function *fn)
return 0;
}

+int ghvm_add_resource_ticket(struct gunyah_vm *ghvm, struct gunyah_vm_resource_ticket *ticket)
+{
+ struct gunyah_vm_resource_ticket *iter;
+ struct gunyah_resource *ghrsc;
+ int ret = 0;
+
+ mutex_lock(&ghvm->resources_lock);
+ list_for_each_entry(iter, &ghvm->resource_tickets, list) {
+ if (iter->resource_type == ticket->resource_type && iter->label == ticket->label) {
+ ret = -EEXIST;
+ goto out;
+ }
+ }
+
+ if (!try_module_get(ticket->owner)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ list_add(&ticket->list, &ghvm->resource_tickets);
+ INIT_LIST_HEAD(&ticket->resources);
+
+ list_for_each_entry(ghrsc, &ghvm->resources, list) {
+ if (ghrsc->type == ticket->resource_type && ghrsc->rm_label == ticket->label) {
+ if (!ticket->populate(ticket, ghrsc))
+ list_move(&ghrsc->list, &ticket->resources);
+ }
+ }
+out:
+ mutex_unlock(&ghvm->resources_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ghvm_add_resource_ticket);
+
+void ghvm_remove_resource_ticket(struct gunyah_vm *ghvm, struct gunyah_vm_resource_ticket *ticket)
+{
+ struct gunyah_resource *ghrsc, *iter;
+
+ mutex_lock(&ghvm->resources_lock);
+ list_for_each_entry_safe(ghrsc, iter, &ticket->resources, list) {
+ ticket->unpopulate(ticket, ghrsc);
+ list_move(&ghrsc->list, &ghvm->resources);
+ }
+
+ module_put(ticket->owner);
+ list_del(&ticket->list);
+ mutex_unlock(&ghvm->resources_lock);
+}
+EXPORT_SYMBOL_GPL(ghvm_remove_resource_ticket);
+
static void ghvm_put(struct kref *kref)
{
struct gunyah_vm *ghvm = container_of(kref, struct gunyah_vm, kref);
@@ -180,17 +230,40 @@ static __must_check struct gunyah_vm *gunyah_vm_alloc(struct gh_rm *rm)
INIT_LIST_HEAD(&ghvm->memory_mappings);
init_rwsem(&ghvm->status_lock);
kref_init(&ghvm->kref);
+ mutex_init(&ghvm->resources_lock);
+ INIT_LIST_HEAD(&ghvm->resources);
+ INIT_LIST_HEAD(&ghvm->resource_tickets);
INIT_LIST_HEAD(&ghvm->functions);

return ghvm;
}

+static void gh_vm_add_resource(struct gunyah_vm *ghvm, struct gunyah_resource *ghrsc)
+{
+ struct gunyah_vm_resource_ticket *ticket;
+
+ mutex_lock(&ghvm->resources_lock);
+ list_for_each_entry(ticket, &ghvm->resource_tickets, list) {
+ if (ghrsc->type == ticket->resource_type && ghrsc->rm_label == ticket->label) {
+ if (!ticket->populate(ticket, ghrsc)) {
+ list_add(&ghrsc->list, &ticket->resources);
+ goto found;
+ }
+ }
+ }
+ list_add(&ghrsc->list, &ghvm->resources);
+found:
+ mutex_unlock(&ghvm->resources_lock);
+}
+
static int gh_vm_start(struct gunyah_vm *ghvm)
{
struct gunyah_vm_memory_mapping *mapping;
u64 dtb_offset;
u32 mem_handle;
- int ret;
+ struct gunyah_resource *ghrsc;
+ struct gh_rm_hyp_resources *resources;
+ int ret, i;

down_write(&ghvm->status_lock);
if (ghvm->vm_status != GH_RM_VM_STATUS_NO_STATE) {
@@ -241,6 +314,22 @@ static int gh_vm_start(struct gunyah_vm *ghvm)
goto err;
}

+ ret = gh_rm_get_hyp_resources(ghvm->rm, ghvm->vmid, &resources);
+ if (ret) {
+ pr_warn("Failed to get hypervisor resources for VM: %d\n", ret);
+ goto err;
+ }
+
+ for (i = 0; i < le32_to_cpu(resources->n_entries); i++) {
+ ghrsc = gh_rm_alloc_resource(ghvm->rm, &resources->entries[i]);
+ if (!ghrsc) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ gh_vm_add_resource(ghvm, ghrsc);
+ }
+
ret = gh_rm_vm_start(ghvm->rm, ghvm->vmid);
if (ret) {
pr_warn("Failed to start VM: %d\n", ret);
@@ -380,6 +469,8 @@ static int gh_vm_release(struct inode *inode, struct file *filp)
struct gunyah_vm *ghvm = filp->private_data;
struct gunyah_vm_memory_mapping *mapping, *tmp;
struct gunyah_vm_function *f, *fiter;
+ struct gunyah_vm_resource_ticket *ticket, *titer;
+ struct gunyah_resource *ghrsc, *riter;

gh_vm_stop(ghvm);

@@ -394,6 +485,18 @@ static int gh_vm_release(struct inode *inode, struct file *filp)
kfree(mapping);
}

+ if (!list_empty(&ghvm->resource_tickets)) {
+ pr_warn("Dangling resource tickets:\n");
+ list_for_each_entry_safe(ticket, titer, &ghvm->resource_tickets, list) {
+ pr_warn(" %pS\n", ticket->populate);
+ ghvm_remove_resource_ticket(ghvm, ticket);
+ }
+ }
+
+ list_for_each_entry_safe(ghrsc, riter, &ghvm->resources, list) {
+ gh_rm_free_resource(ghrsc);
+ }
+
put_gh_rm(ghvm->rm);
put_gunyah_vm(ghvm);
return 0;
diff --git a/drivers/virt/gunyah/vm_mgr.h b/drivers/virt/gunyah/vm_mgr.h
index 8d3b0678fb96..f52350145010 100644
--- a/drivers/virt/gunyah/vm_mgr.h
+++ b/drivers/virt/gunyah/vm_mgr.h
@@ -7,6 +7,7 @@
#define _GH_PRIV_VM_MGR_H

#include <linux/gunyah_rsc_mgr.h>
+#include <linux/gunyah_vm_mgr.h>
#include <linux/list.h>
#include <linux/kref.h>
#include <linux/miscdevice.h>
@@ -47,6 +48,10 @@ struct gunyah_vm {
struct mutex mm_lock;
struct list_head memory_mappings;

+ struct mutex resources_lock;
+ struct list_head resources;
+ struct list_head resource_tickets;
+
struct list_head functions;
};

diff --git a/include/linux/gunyah.h b/include/linux/gunyah.h
index 91d2c9f5b2f0..d745916cabcc 100644
--- a/include/linux/gunyah.h
+++ b/include/linux/gunyah.h
@@ -27,6 +27,10 @@ struct gunyah_resource {
enum gunyah_resource_type type;
u64 capid;
int irq;
+
+ /* To help allocator of resource manager */
+ struct list_head list;
+ u32 rm_label;
};

/**
diff --git a/include/linux/gunyah_vm_mgr.h b/include/linux/gunyah_vm_mgr.h
index 69f98eb503e9..995a9b0eb6b7 100644
--- a/include/linux/gunyah_vm_mgr.h
+++ b/include/linux/gunyah_vm_mgr.h
@@ -65,4 +65,18 @@ void gunyah_vm_function_unregister(struct gunyah_vm_function_driver *f);
} \
module_exit(_name##_mod_exit)

+struct gunyah_vm_resource_ticket {
+ struct list_head list;
+ struct list_head resources;
+ enum gunyah_resource_type resource_type;
+ u32 label;
+
+ struct module *owner;
+ int (*populate)(struct gunyah_vm_resource_ticket *ticket, struct gunyah_resource *ghrsc);
+ void (*unpopulate)(struct gunyah_vm_resource_ticket *ticket, struct gunyah_resource *ghrsc);
+};
+
+int ghvm_add_resource_ticket(struct gunyah_vm *ghvm, struct gunyah_vm_resource_ticket *ticket);
+void ghvm_remove_resource_ticket(struct gunyah_vm *ghvm, struct gunyah_vm_resource_ticket *ticket);
+
#endif
--
2.39.0


2023-02-06 09:50:55

by Srivatsa Vaddagiri

[permalink] [raw]
Subject: Re: [PATCH v9 22/27] virt: gunyah: Add resource tickets

* Elliot Berman <[email protected]> [2023-01-20 14:46:21]:

> +int ghvm_add_resource_ticket(struct gunyah_vm *ghvm, struct gunyah_vm_resource_ticket *ticket)
> +{
> + struct gunyah_vm_resource_ticket *iter;
> + struct gunyah_resource *ghrsc;
> + int ret = 0;
> +
> + mutex_lock(&ghvm->resources_lock);
> + list_for_each_entry(iter, &ghvm->resource_tickets, list) {
> + if (iter->resource_type == ticket->resource_type && iter->label == ticket->label) {
> + ret = -EEXIST;
> + goto out;
> + }
> + }
> +
> + if (!try_module_get(ticket->owner)) {
> + ret = -ENODEV;
> + goto out;
> + }
> +
> + list_add(&ticket->list, &ghvm->resource_tickets);
> + INIT_LIST_HEAD(&ticket->resources);
> +
> + list_for_each_entry(ghrsc, &ghvm->resources, list) {
> + if (ghrsc->type == ticket->resource_type && ghrsc->rm_label == ticket->label) {
> + if (!ticket->populate(ticket, ghrsc))
> + list_move(&ghrsc->list, &ticket->resources);

Do we need the search to continue in case of a hit? 'gh_vm_add_resource' seems to
break loop on first occurrence.

Also do we have examples of more than one 'gunyah_resource' being associated
with same 'gunyah_vm_resource_ticket'? Both vcpu and irqfd tickets seem to deal
with just one resource?

> static int gh_vm_start(struct gunyah_vm *ghvm)
> {
> struct gunyah_vm_memory_mapping *mapping;
> u64 dtb_offset;
> u32 mem_handle;
> - int ret;
> + struct gunyah_resource *ghrsc;
> + struct gh_rm_hyp_resources *resources;
> + int ret, i;
>
> down_write(&ghvm->status_lock);
> if (ghvm->vm_status != GH_RM_VM_STATUS_NO_STATE) {
> @@ -241,6 +314,22 @@ static int gh_vm_start(struct gunyah_vm *ghvm)
> goto err;
> }
>
> + ret = gh_rm_get_hyp_resources(ghvm->rm, ghvm->vmid, &resources);
> + if (ret) {
> + pr_warn("Failed to get hypervisor resources for VM: %d\n", ret);
> + goto err;
> + }
> +
> + for (i = 0; i < le32_to_cpu(resources->n_entries); i++) {

minor nit: not sure if we can rely on compiler to optimize this, but it would
be better if we run le32_to_cpu once and use the result in loop.

> + ghrsc = gh_rm_alloc_resource(ghvm->rm, &resources->entries[i]);
> + if (!ghrsc) {
> + ret = -ENOMEM;
> + goto err;
> + }
> +
> + gh_vm_add_resource(ghvm, ghrsc);

Shouldn't we have gh_vm_add_resource()-> ticket->populate() return a result and
in case of failure we should bail out from this loop?

> + }
> +
> ret = gh_rm_vm_start(ghvm->rm, ghvm->vmid);
> if (ret) {
> pr_warn("Failed to start VM: %d\n", ret);

2023-02-06 21:30:57

by Elliot Berman

[permalink] [raw]
Subject: Re: [PATCH v9 22/27] virt: gunyah: Add resource tickets



On 2/6/2023 1:50 AM, Srivatsa Vaddagiri wrote:
> * Elliot Berman <[email protected]> [2023-01-20 14:46:21]:
>
>> +int ghvm_add_resource_ticket(struct gunyah_vm *ghvm, struct gunyah_vm_resource_ticket *ticket)
>> +{
>> + struct gunyah_vm_resource_ticket *iter;
>> + struct gunyah_resource *ghrsc;
>> + int ret = 0;
>> +
>> + mutex_lock(&ghvm->resources_lock);
>> + list_for_each_entry(iter, &ghvm->resource_tickets, list) {
>> + if (iter->resource_type == ticket->resource_type && iter->label == ticket->label) {
>> + ret = -EEXIST;
>> + goto out;
>> + }
>> + }
>> +
>> + if (!try_module_get(ticket->owner)) {
>> + ret = -ENODEV;
>> + goto out;
>> + }
>> +
>> + list_add(&ticket->list, &ghvm->resource_tickets);
>> + INIT_LIST_HEAD(&ticket->resources);
>> +
>> + list_for_each_entry(ghrsc, &ghvm->resources, list) {
>> + if (ghrsc->type == ticket->resource_type && ghrsc->rm_label == ticket->label) {
>> + if (!ticket->populate(ticket, ghrsc))
>> + list_move(&ghrsc->list, &ticket->resources);
>
> Do we need the search to continue in case of a hit? 'gh_vm_add_resource' seems to
> break loop on first occurrence.
>
> Also do we have examples of more than one 'gunyah_resource' being associated
> with same 'gunyah_vm_resource_ticket'? Both vcpu and irqfd tickets seem to deal
> with just one resource?
>

I'll mention this in the commit text as well.

Resources are created by Gunyah as configured in the VM's devicetree
configuration. Gunyah doesn't process the label and that makes it
possible for userspace to create multiple resources with the same label.
The kernel needs to be prepared for that to happen. IMO, this isn't a
framework issue, so I've chosen the policy to be "many-to-one": resource
tickets can bind to many resources and resources are bound to only one
ticket. If the resource ticket handler isn't designed to accept multiple
resources, they can skip/ignore any further populate callbacks.

>> static int gh_vm_start(struct gunyah_vm *ghvm)
>> {
>> struct gunyah_vm_memory_mapping *mapping;
>> u64 dtb_offset;
>> u32 mem_handle;
>> - int ret;
>> + struct gunyah_resource *ghrsc;
>> + struct gh_rm_hyp_resources *resources;
>> + int ret, i;
>>
>> down_write(&ghvm->status_lock);
>> if (ghvm->vm_status != GH_RM_VM_STATUS_NO_STATE) {
>> @@ -241,6 +314,22 @@ static int gh_vm_start(struct gunyah_vm *ghvm)
>> goto err;
>> }
>>
>> + ret = gh_rm_get_hyp_resources(ghvm->rm, ghvm->vmid, &resources);
>> + if (ret) {
>> + pr_warn("Failed to get hypervisor resources for VM: %d\n", ret);
>> + goto err;
>> + }
>> +
>> + for (i = 0; i < le32_to_cpu(resources->n_entries); i++) {
>
> minor nit: not sure if we can rely on compiler to optimize this, but it would
> be better if we run le32_to_cpu once and use the result in loop.
>

Done.

>> + ghrsc = gh_rm_alloc_resource(ghvm->rm, &resources->entries[i]);
>> + if (!ghrsc) {
>> + ret = -ENOMEM;
>> + goto err;
>> + }
>> +
>> + gh_vm_add_resource(ghvm, ghrsc);
>
> Shouldn't we have gh_vm_add_resource()-> ticket->populate() return a result and
> in case of failure we should bail out from this loop?
>

I'm hesitant to treat the resource ticket rejecting the resource as a
bail condition.

Userspace is able to detect when functions didn't get set up and I
wanted to avoid adding further complexity to kernel drivers.

>> + }
>> +
>> ret = gh_rm_vm_start(ghvm->rm, ghvm->vmid);
>> if (ret) {
>> pr_warn("Failed to start VM: %d\n", ret);