From: Erwan Yvin <[email protected]>
This driver depends on Rusty's new host virtio ring implementation,
so this patch-set is based on the vringh branch in Rusty's git.
with the vringh wrapper patch on top. They do not apply cleanly on top of the
remoteproc virtio config patches from Sjur, but it merges fine.
CAIF will use this new host virtio ring implementation.
Ido, Ohad , Please could you review these patchs ?
Best regards
Erwan YVIN (1):
remoteproc: refactor find_vqs to prepare for vringh
Erwan Yvin (1):
remoteproc: Add support for host virtio rings (vringh)
drivers/remoteproc/remoteproc_virtio.c | 237 +++++++++++++++++++++++++-------
include/linux/remoteproc.h | 22 +++
2 files changed, 208 insertions(+), 51 deletions(-)
--
1.7.9.2
From: Erwan YVIN <[email protected]>
Refactor code for creating virtio queues and prepare
for accommodating the new host virtio rings.
This refactoring moves all handing of struct virtqueue
to a separate function __create_new_virtqueue().
A more generic function __rproc_virtio_find_rings()
allocates the virtio rings and ring IDs and calls the
virtqueue specific function when a virtio ring and the
ring IDs are allocated. Some type safety is sacrificed,
in order to make the code more generic.
Signed-off-by: Erwan Yvin <[email protected]>
---
drivers/remoteproc/remoteproc_virtio.c | 138 +++++++++++++++++++-------------
1 file changed, 82 insertions(+), 56 deletions(-)
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index 9e198e5..ef5ec8a 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -67,50 +67,93 @@ irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int notifyid)
}
EXPORT_SYMBOL(rproc_vq_interrupt);
-static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
- unsigned id,
- void (*callback)(struct virtqueue *vq),
- const char *name)
+/*
+ * Allocate and intitialize the virtio rings. We downcast some types here
+ * in order to have common code between virtqueue and vringh.
+ */
+static int __rproc_virtio_find_rings(struct virtio_device *vdev,
+ unsigned nrings, void *rings[],
+ void *callbacks[], const char *name[],
+ void *create(struct rproc_vring *rvring,
+ unsigned int index,
+ void *callbacks,
+ const char *name))
{
struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
struct rproc *rproc = vdev_to_rproc(vdev);
struct device *dev = &rproc->dev;
struct rproc_vring *rvring;
- struct virtqueue *vq;
- void *addr;
- int len, size, ret;
+ /* Use resource entry fw_rsc_vdev.num_of_rings instead when available */
+ size_t max = ARRAY_SIZE(rvdev->vring);
+ const char *rng_name;
+ int rng, id, err;
/* we're temporarily limited to two virtqueues per rvdev */
- if (id >= ARRAY_SIZE(rvdev->vring))
- return ERR_PTR(-EINVAL);
+ if (nrings > max)
+ return -EINVAL;
- if (!name)
- return NULL;
+ /*
+ * We have two different arrays here:
+ * rvdev->vring[] holds the virtio rings in shared memory as
+ * specified by the resource table. It uses "rng" as index.
+ *
+ * The parameter rings[] is used for passing the requested
+ * virtio rings back to the caller. nrings is used by the
+ * caller to specify the number of rings to return.
+ */
+ for (id = rng = 0; id < nrings && rng < max; ++rng) {
+ rvring = &rvdev->vring[rng];
+ if (rvring->vq)
+ continue;
+
+ /* Allocate and initialize the virtio ring */
+ err = rproc_alloc_vring(rvdev, rng);
+ if (err)
+ return err;
+
+ memset(rvring->va, 0, vring_size(rvring->len, rvring->align));
+ rng_name = name ? name[id] : NULL;
+ rings[id] = create(rvring, id, callbacks[id], rng_name);
+ if (IS_ERR(rings[id])) {
+ rproc_free_vring(rvring);
+ return PTR_ERR(rings[id]);
+ }
- ret = rproc_alloc_vring(rvdev, id);
- if (ret)
- return ERR_PTR(ret);
+ dev_dbg(dev, "vring%d: va %p qsz %d notifyid %d\n",
+ rng, rvring->va, rvring->len, rvring->notifyid);
+ ++id;
+ }
- rvring = &rvdev->vring[id];
- addr = rvring->va;
- len = rvring->len;
+ if (id < nrings) {
+ dev_err(dev, "couldn't find the requested rings: %d\n", nrings);
+ return -ENOBUFS;
+ }
- /* zero vring */
- size = vring_size(len, rvring->align);
- memset(addr, 0, size);
+ /* now that the rings are all set, boot the remote processor */
+ return rproc_boot(rproc);
+}
- dev_dbg(dev, "vring%d: va %p qsz %d notifyid %d\n",
- id, addr, len, rvring->notifyid);
+/* Helper function that creates and initializes the virtqueue */
+static void *__create_new_virtqueue(struct rproc_vring *rvring,
+ unsigned int id,
+ void *callback,
+ const char *name)
+{
+ struct virtio_device *vdev = &rvring->rvdev->vdev;
+ struct virtqueue *vq;
+
+ if (!name)
+ return ERR_PTR(-EINVAL);
/*
* Create the new vq, and tell virtio we're not interested in
* the 'weak' smp barriers, since we're talking with a real device.
*/
- vq = vring_new_virtqueue(id, len, rvring->align, vdev, false, addr,
- rproc_virtio_notify, callback, name);
+ vq = vring_new_virtqueue(id, rvring->len, rvring->align, vdev, false,
+ rvring->va, rproc_virtio_notify, callback,
+ name);
if (!vq) {
- dev_err(dev, "vring_new_virtqueue %s failed\n", name);
- rproc_free_vring(rvring);
+ dev_err(&vdev->dev, "vring_new_virtqueue %s failed\n", name);
return ERR_PTR(-ENOMEM);
}
@@ -133,44 +176,27 @@ static void __rproc_virtio_del_vqs(struct virtio_device *vdev)
}
}
-static void rproc_virtio_del_vqs(struct virtio_device *vdev)
+static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+ struct virtqueue *vqs[],
+ vq_callback_t *callbacks[],
+ const char *names[])
{
- struct rproc *rproc = vdev_to_rproc(vdev);
-
- /* power down the remote processor before deleting vqs */
- rproc_shutdown(rproc);
-
- __rproc_virtio_del_vqs(vdev);
+ void *cbs = callbacks, *rings = vqs;
+ int err = __rproc_virtio_find_rings(vdev, nvqs, rings, cbs,
+ names, __create_new_virtqueue);
+ if (err)
+ __rproc_virtio_del_vqs(vdev);
+ return err;
}
-static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs,
- struct virtqueue *vqs[],
- vq_callback_t *callbacks[],
- const char *names[])
+static void rproc_virtio_del_vqs(struct virtio_device *vdev)
{
struct rproc *rproc = vdev_to_rproc(vdev);
- int i, ret;
- for (i = 0; i < nvqs; ++i) {
- vqs[i] = rp_find_vq(vdev, i, callbacks[i], names[i]);
- if (IS_ERR(vqs[i])) {
- ret = PTR_ERR(vqs[i]);
- goto error;
- }
- }
-
- /* now that the vqs are all set, boot the remote processor */
- ret = rproc_boot(rproc);
- if (ret) {
- dev_err(&rproc->dev, "rproc_boot() failed %d\n", ret);
- goto error;
- }
-
- return 0;
+ /* power down the remote processor before deleting vqs */
+ rproc_shutdown(rproc);
-error:
__rproc_virtio_del_vqs(vdev);
- return ret;
}
/*
--
1.7.9.2
From: Erwan Yvin <[email protected]>
Implement the vringh callback functions in order
to manage host virtio rings and handle kicks.
This allows virtio device to request host-virtio-rings.
Signed-off-by: Erwan Yvin <[email protected]>
---
drivers/remoteproc/remoteproc_virtio.c | 115 +++++++++++++++++++++++++++++++-
include/linux/remoteproc.h | 22 ++++++
2 files changed, 134 insertions(+), 3 deletions(-)
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index ef5ec8a..4ad87c5 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -23,12 +23,15 @@
#include <linux/virtio_config.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_ring.h>
+#include <linux/vringh.h>
#include <linux/err.h>
#include <linux/kref.h>
#include <linux/slab.h>
#include "remoteproc_internal.h"
+static u32 rproc_virtio_get_features(struct virtio_device *vdev);
+
/* kick the remote processor, and let it know which virtqueue to poke at */
static void rproc_virtio_notify(struct virtqueue *vq)
{
@@ -41,6 +44,18 @@ static void rproc_virtio_notify(struct virtqueue *vq)
rproc->ops->kick(rproc, notifyid);
}
+/* kick the remote processor, and let it know which vring to poke at */
+static void rproc_virtio_vringh_notify(struct vringh *vrh)
+{
+ struct rproc_vring *rvring = vringh_to_rvring(vrh);
+ struct rproc *rproc = rvring->rvdev->rproc;
+ int notifyid = rvring->notifyid;
+
+ dev_dbg(&rproc->dev, "kicking vq index: %d\n", notifyid);
+
+ rproc->ops->kick(rproc, notifyid);
+}
+
/**
* rproc_vq_interrupt() - tell remoteproc that a virtqueue is interrupted
* @rproc: handle to the remote processor
@@ -60,10 +75,18 @@ irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int notifyid)
dev_dbg(&rproc->dev, "vq index %d is interrupted\n", notifyid);
rvring = idr_find(&rproc->notifyids, notifyid);
- if (!rvring || !rvring->vq)
+ if (!rvring)
return IRQ_NONE;
- return vring_interrupt(0, rvring->vq);
+ if (rvring->rvringh && rvring->rvringh->vringh_cb) {
+ rvring->rvringh->vringh_cb(&rvring->rvdev->vdev,
+ &rvring->rvringh->vrh);
+ return IRQ_HANDLED;
+ } else if (rvring->vq) {
+ return vring_interrupt(0, rvring->vq);
+ } else {
+ return IRQ_NONE;
+ }
}
EXPORT_SYMBOL(rproc_vq_interrupt);
@@ -103,7 +126,7 @@ static int __rproc_virtio_find_rings(struct virtio_device *vdev,
*/
for (id = rng = 0; id < nrings && rng < max; ++rng) {
rvring = &rvdev->vring[rng];
- if (rvring->vq)
+ if (rvring->vq || rvring->rvringh)
continue;
/* Allocate and initialize the virtio ring */
@@ -199,6 +222,86 @@ static void rproc_virtio_del_vqs(struct virtio_device *vdev)
__rproc_virtio_del_vqs(vdev);
}
+static void __rproc_virtio_del_vrhs(struct virtio_device *vdev)
+{
+ struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
+ int i, num_of_vrings = ARRAY_SIZE(rvdev->vring);
+
+ for (i = 0; i < num_of_vrings; i++) {
+ struct rproc_vring *rvring = &rvdev->vring[i];
+ if (!rvring->rvringh)
+ continue;
+ kfree(rvring->rvringh);
+ rvring->rvringh = NULL;
+ rproc_free_vring(rvring);
+ }
+}
+
+/* Helper function that creates and initializes the host virtio ring */
+static void *__create_new_vringh(struct rproc_vring *rvring,
+ unsigned int index,
+ void *callback,
+ const char *name)
+{
+ struct rproc_vringh *rvrh = NULL;
+ struct rproc_vdev *rvdev = rvring->rvdev;
+ int err;
+
+ rvrh = kzalloc(sizeof(*rvrh), GFP_KERNEL);
+ err = -ENOMEM;
+ if (!rvrh)
+ goto err;
+
+ /* initialize the host virtio ring */
+ rvrh->rvring = rvring;
+ rvrh->vringh_cb = callback;
+ rvrh->vrh.notify = rproc_virtio_vringh_notify;
+ memset(rvring->va, 0, vring_size(rvring->len, rvring->align));
+ vring_init(&rvrh->vrh.vring, rvring->len, rvring->va, rvring->align);
+
+ /*
+ * Create the new vring host, and tell we're not interested in
+ * the 'weak' smp barriers, since we're talking with a real device.
+ */
+ err = vringh_init_kern(&rvrh->vrh,
+ rproc_virtio_get_features(&rvdev->vdev),
+ rvring->len,
+ false,
+ rvrh->vrh.vring.desc,
+ rvrh->vrh.vring.avail,
+ rvrh->vrh.vring.used);
+ if (err)
+ goto err;
+
+ rvring->rvringh = rvrh;
+ return &rvrh->vrh;
+err:
+ kfree(rvrh);
+ return ERR_PTR(err);
+}
+
+static int rproc_virtio_find_vrhs(struct virtio_device *vdev, unsigned nhvrs,
+ struct vringh *vrhs[],
+ vrh_callback_t *callbacks[])
+{
+ void *cbs = callbacks, *rings = vrhs;
+ int err = __rproc_virtio_find_rings(vdev, nhvrs, rings, cbs,
+ NULL, __create_new_vringh);
+ if (err)
+ __rproc_virtio_del_vrhs(vdev);
+ return err;
+}
+
+static void rproc_virtio_del_vrhs(struct virtio_device *vdev)
+{
+ struct rproc *rproc = vdev_to_rproc(vdev);
+
+ /* power down the remote processor before deleting vqs */
+ rproc_shutdown(rproc);
+
+ __rproc_virtio_del_vrhs(vdev);
+}
+
/*
* We don't support yet real virtio status semantics.
*
@@ -258,6 +361,11 @@ static struct virtio_config_ops rproc_virtio_config_ops = {
.get_status = rproc_virtio_get_status,
};
+static struct vringh_config_ops rproc_virtio_vringh_ops = {
+ .find_vrhs = rproc_virtio_find_vrhs,
+ .del_vrhs = rproc_virtio_del_vrhs,
+};
+
/*
* This function is called whenever vdev is released, and is responsible
* to decrement the remote processor's refcount which was taken when vdev was
@@ -296,6 +404,7 @@ int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
vdev->id.device = id,
vdev->config = &rproc_virtio_config_ops,
+ vdev->vringh_config = &rproc_virtio_vringh_ops;
vdev->dev.parent = dev;
vdev->dev.release = rproc_vdev_release;
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index faf3332..9796562 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -39,6 +39,7 @@
#include <linux/klist.h>
#include <linux/mutex.h>
#include <linux/virtio.h>
+#include <linux/vringh.h>
#include <linux/completion.h>
#include <linux/idr.h>
@@ -435,6 +436,19 @@ struct rproc {
#define RVDEV_NUM_VRINGS 2
/**
+ * struct rproc_vringh - remoteproc host vring
+ * @vrh: Host side virtio ring
+ * @rvring: Virtio ring associated with the device
+ * @vringh_cb: Callback notifying virtio driver about new buffers
+ */
+struct rproc_vring;
+struct rproc_vringh {
+ struct vringh vrh;
+ struct rproc_vring *rvring;
+ vrh_callback_t *vringh_cb;
+};
+
+/**
* struct rproc_vring - remoteproc vring state
* @va: virtual address
* @dma: dma address
@@ -444,6 +458,7 @@ struct rproc {
* @notifyid: rproc-specific unique vring index
* @rvdev: remote vdev
* @vq: the virtqueue of this vring
+ * @rvringh: the reversed host-side vring
*/
struct rproc_vring {
void *va;
@@ -454,6 +469,7 @@ struct rproc_vring {
int notifyid;
struct rproc_vdev *rvdev;
struct virtqueue *vq;
+ struct rproc_vringh *rvringh;
};
/**
@@ -497,4 +513,10 @@ static inline struct rproc *vdev_to_rproc(struct virtio_device *vdev)
return rvdev->rproc;
}
+static inline struct rproc_vring *vringh_to_rvring(struct vringh *vrh)
+{
+ struct rproc_vringh *rvrh = container_of(vrh, struct rproc_vringh, vrh);
+ return rvrh->rvring;
+}
+
#endif /* REMOTEPROC_H */
--
1.7.9.2
Hi Ohad,
On Fri, Mar 15, 2013 at 10:29 AM, Erwan Yvin <[email protected]> wrote:
> From: Erwan Yvin <[email protected]>
>
> Implement the vringh callback functions in order
> to manage host virtio rings and handle kicks.
> This allows virtio device to request host-virtio-rings.
>
> Signed-off-by: Erwan Yvin <[email protected]>
Any chance that you could review this in time for 3.10 merge window?
This is the last missing piece for host vring support.
The patches would have to be Acked/Reviewed by and should go via
Rusty's tree.
Thanks,
Sjur
Hi Sjur,
On Fri, Apr 5, 2013 at 9:15 AM, Sjur Br?ndeland <[email protected]> wrote:
> Any chance that you could review this in time for 3.10 merge window?
Sure, I'm getting to this early next week. Monday at the latest.
Thanks,
Ohad.