2022-12-27 02:33:40

by Shunsuke Mie

[permalink] [raw]
Subject: [RFC PATCH 0/6] Introduce a vringh accessor for IO memory

Vringh is a host-side implementation of virtio rings, and supports the
vring located on three kinds of memories, userspace, kernel space and a
space translated iotlb.

The goal of this patchset is to refactor vringh and introduce a new vringh
accessor for the vring located on the io memory region. The io memory
accessor (iomem) is used by a driver that is not published yet, but I'm
planning to publish it. Drivers affected by these changes are not included
in this patchset. e.g. caif_virtio and vdpa (sim_net, sim_blk and net/mlx5)
drivers.

This patchset can separate into 3 parts:
1. Fix and prepare some code related vringh [1, 2, 3/6]
2. Unify the vringh APIs and change related [4, 5/6]
3. Support IOMEM to vringh [6/6]

This first part is preparation for the second part which has a little fix
and changes. A test code for vringh named vringh_test is also updated along
with the changes. In the second part, unify the vringh API for each
accessors that are user, kern and iotlb. The main point is struct
vringh_ops that fill the gap between all accessors. The final part
introduces an iomem support to vringh according to the unified API in the
second part.

Those changes are tested for the user accessor using vringh_test and kern
and iomem using a non published driver, but I think I can add a link to a
patchset for the driver in the next version of this patchset.

Shunsuke Mie (6):
vringh: fix a typo in comments for vringh_kiov
vringh: remove vringh_iov and unite to vringh_kiov
tools/virtio: convert to new vringh user APIs
vringh: unify the APIs for all accessors
tools/virtio: convert to use new unified vringh APIs
vringh: IOMEM support

drivers/vhost/Kconfig | 6 +
drivers/vhost/vringh.c | 721 ++++++++++++-------------------------
include/linux/vringh.h | 147 +++-----
tools/virtio/vringh_test.c | 123 ++++---
4 files changed, 356 insertions(+), 641 deletions(-)

--
2.25.1


2022-12-27 02:33:51

by Shunsuke Mie

[permalink] [raw]
Subject: [RFC PATCH 1/9] vringh: fix a typo in comments for vringh_kiov

Probably it is a simple copy error from struct vring_iov.

Fixes: f87d0fbb5798 ("vringh: host-side implementation of virtio rings.")
Signed-off-by: Shunsuke Mie <[email protected]>
---
include/linux/vringh.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/include/linux/vringh.h b/include/linux/vringh.h
index 212892cf9822..1991a02c6431 100644
--- a/include/linux/vringh.h
+++ b/include/linux/vringh.h
@@ -92,7 +92,7 @@ struct vringh_iov {
};

/**
- * struct vringh_iov - kvec mangler.
+ * struct vringh_kiov - kvec mangler.
*
* Mangles kvec in place, and restores it.
* Remaining data is iov + i, of used - i elements.
--
2.25.1

2022-12-27 02:59:45

by Shunsuke Mie

[permalink] [raw]
Subject: [RFC PATCH 3/9] tools/virtio: convert to new vringh user APIs

struct vringh_iov is being remove, so convert vringh_test to use the
vringh user APIs. This has it change to use struct vringh_kiov instead of
the struct vringh_iov.

Signed-off-by: Shunsuke Mie <[email protected]>
---
tools/virtio/vringh_test.c | 34 +++++++++++++++++-----------------
1 file changed, 17 insertions(+), 17 deletions(-)

diff --git a/tools/virtio/vringh_test.c b/tools/virtio/vringh_test.c
index 98ff808d6f0c..6c9533b8a2ca 100644
--- a/tools/virtio/vringh_test.c
+++ b/tools/virtio/vringh_test.c
@@ -193,8 +193,8 @@ static int parallel_test(u64 features,
errx(1, "Could not set affinity to cpu %u", first_cpu);

while (xfers < NUM_XFERS) {
- struct iovec host_riov[2], host_wiov[2];
- struct vringh_iov riov, wiov;
+ struct kvec host_riov[2], host_wiov[2];
+ struct vringh_kiov riov, wiov;
u16 head, written;

if (fast_vringh) {
@@ -216,10 +216,10 @@ static int parallel_test(u64 features,
written = 0;
goto complete;
} else {
- vringh_iov_init(&riov,
+ vringh_kiov_init(&riov,
host_riov,
ARRAY_SIZE(host_riov));
- vringh_iov_init(&wiov,
+ vringh_kiov_init(&wiov,
host_wiov,
ARRAY_SIZE(host_wiov));

@@ -442,8 +442,8 @@ int main(int argc, char *argv[])
struct virtqueue *vq;
struct vringh vrh;
struct scatterlist guest_sg[RINGSIZE], *sgs[2];
- struct iovec host_riov[2], host_wiov[2];
- struct vringh_iov riov, wiov;
+ struct kvec host_riov[2], host_wiov[2];
+ struct vringh_kiov riov, wiov;
struct vring_used_elem used[RINGSIZE];
char buf[28];
u16 head;
@@ -517,8 +517,8 @@ int main(int argc, char *argv[])
__kmalloc_fake = NULL;

/* Host retreives it. */
- vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
- vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));
+ vringh_kiov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
+ vringh_kiov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));

err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
if (err != 1)
@@ -586,8 +586,8 @@ int main(int argc, char *argv[])
__kmalloc_fake = NULL;

/* Host picks it up (allocates new iov). */
- vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
- vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));
+ vringh_kiov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
+ vringh_kiov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));

err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
if (err != 1)
@@ -613,8 +613,8 @@ int main(int argc, char *argv[])
assert(err < 3 || buf[2] == (char)(i + 2));
}
assert(riov.i == riov.used);
- vringh_iov_cleanup(&riov);
- vringh_iov_cleanup(&wiov);
+ vringh_kiov_cleanup(&riov);
+ vringh_kiov_cleanup(&wiov);

/* Complete using multi interface, just because we can. */
used[0].id = head;
@@ -638,8 +638,8 @@ int main(int argc, char *argv[])
}

/* Now get many, and consume them all at once. */
- vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
- vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));
+ vringh_kiov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
+ vringh_kiov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));

for (i = 0; i < RINGSIZE; i++) {
err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
@@ -723,8 +723,8 @@ int main(int argc, char *argv[])
d[5].flags = 0;

/* Host picks it up (allocates new iov). */
- vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
- vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));
+ vringh_kiov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
+ vringh_kiov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));

err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
if (err != 1)
@@ -744,7 +744,7 @@ int main(int argc, char *argv[])
/* Data should be linear. */
for (i = 0; i < err; i++)
assert(buf[i] == i);
- vringh_iov_cleanup(&riov);
+ vringh_kiov_cleanup(&riov);
}

/* Don't leak memory... */
--
2.25.1

2022-12-27 03:02:53

by Shunsuke Mie

[permalink] [raw]
Subject: [RFC PATCH 6/9] caif_virtio: convert to new unified vringh APIs

vringh_*_kern APIs are being removed without vringh_init_kern(), so change
to use new APIs.

Signed-off-by: Shunsuke Mie <[email protected]>
---
drivers/net/caif/caif_virtio.c | 26 ++++++++++----------------
1 file changed, 10 insertions(+), 16 deletions(-)

diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
index 0b0f234b0b50..f9dd79807afa 100644
--- a/drivers/net/caif/caif_virtio.c
+++ b/drivers/net/caif/caif_virtio.c
@@ -265,18 +265,12 @@ static int cfv_rx_poll(struct napi_struct *napi, int quota)
*/
if (riov->i == riov->used) {
if (cfv->ctx.head != USHRT_MAX) {
- vringh_complete_kern(cfv->vr_rx,
- cfv->ctx.head,
- 0);
+ vringh_complete(cfv->vr_rx, cfv->ctx.head, 0);
cfv->ctx.head = USHRT_MAX;
}

- err = vringh_getdesc_kern(
- cfv->vr_rx,
- riov,
- NULL,
- &cfv->ctx.head,
- GFP_ATOMIC);
+ err = vringh_getdesc(cfv->vr_rx, riov, NULL,
+ &cfv->ctx.head);

if (err <= 0)
goto exit;
@@ -317,9 +311,9 @@ static int cfv_rx_poll(struct napi_struct *napi, int quota)

/* Really out of packets? (stolen from virtio_net)*/
napi_complete(napi);
- if (unlikely(!vringh_notify_enable_kern(cfv->vr_rx)) &&
+ if (unlikely(!vringh_notify_enable(cfv->vr_rx)) &&
napi_schedule_prep(napi)) {
- vringh_notify_disable_kern(cfv->vr_rx);
+ vringh_notify_disable(cfv->vr_rx);
__napi_schedule(napi);
}
break;
@@ -329,7 +323,7 @@ static int cfv_rx_poll(struct napi_struct *napi, int quota)
dev_kfree_skb(skb);
/* Stop NAPI poll on OOM, we hope to be polled later */
napi_complete(napi);
- vringh_notify_enable_kern(cfv->vr_rx);
+ vringh_notify_enable(cfv->vr_rx);
break;

default:
@@ -337,12 +331,12 @@ static int cfv_rx_poll(struct napi_struct *napi, int quota)
netdev_warn(cfv->ndev, "Bad ring, disable device\n");
cfv->ndev->stats.rx_dropped = riov->used - riov->i;
napi_complete(napi);
- vringh_notify_disable_kern(cfv->vr_rx);
+ vringh_notify_disable(cfv->vr_rx);
netif_carrier_off(cfv->ndev);
break;
}
out:
- if (rxcnt && vringh_need_notify_kern(cfv->vr_rx) > 0)
+ if (rxcnt && vringh_need_notify(cfv->vr_rx) > 0)
vringh_notify(cfv->vr_rx);
return rxcnt;
}
@@ -352,7 +346,7 @@ static void cfv_recv(struct virtio_device *vdev, struct vringh *vr_rx)
struct cfv_info *cfv = vdev->priv;

++cfv->stats.rx_kicks;
- vringh_notify_disable_kern(cfv->vr_rx);
+ vringh_notify_disable(cfv->vr_rx);
napi_schedule(&cfv->napi);
}

@@ -460,7 +454,7 @@ static int cfv_netdev_close(struct net_device *netdev)
/* Disable interrupts, queues and NAPI polling */
netif_carrier_off(netdev);
virtqueue_disable_cb(cfv->vq_tx);
- vringh_notify_disable_kern(cfv->vr_rx);
+ vringh_notify_disable(cfv->vr_rx);
napi_disable(&cfv->napi);

/* Release any TX buffers on both used and available rings */
--
2.25.1