RFC only. Not tested on vdpa-sim-blk with user virtual address.
Works fine with vdpa-sim-net which uses physical address to map.
This patch is based on top of [1].
[1] https://lore.kernel.org/virtualization/[email protected]/
Signed-off-by: Si-Wei Liu <[email protected]>
---
RFC v2:
- initialize iotlb to passthrough mode in device add
---
drivers/vdpa/vdpa_sim/vdpa_sim.c | 34 ++++++++++++++++++++++++--------
1 file changed, 26 insertions(+), 8 deletions(-)
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index 76d41058add9..2a0a6042d61d 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -151,13 +151,6 @@ static void vdpasim_do_reset(struct vdpasim *vdpasim)
&vdpasim->iommu_lock);
}
- for (i = 0; i < vdpasim->dev_attr.nas; i++) {
- vhost_iotlb_reset(&vdpasim->iommu[i]);
- vhost_iotlb_add_range(&vdpasim->iommu[i], 0, ULONG_MAX,
- 0, VHOST_MAP_RW);
- vdpasim->iommu_pt[i] = true;
- }
-
vdpasim->running = true;
spin_unlock(&vdpasim->iommu_lock);
@@ -259,8 +252,12 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr,
if (!vdpasim->iommu_pt)
goto err_iommu;
- for (i = 0; i < vdpasim->dev_attr.nas; i++)
+ for (i = 0; i < vdpasim->dev_attr.nas; i++) {
vhost_iotlb_init(&vdpasim->iommu[i], max_iotlb_entries, 0);
+ vhost_iotlb_add_range(&vdpasim->iommu[i], 0, ULONG_MAX, 0,
+ VHOST_MAP_RW);
+ vdpasim->iommu_pt[i] = true;
+ }
for (i = 0; i < dev_attr->nvqs; i++)
vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0],
@@ -637,6 +634,25 @@ static int vdpasim_set_map(struct vdpa_device *vdpa, unsigned int asid,
return ret;
}
+static int vdpasim_reset_map(struct vdpa_device *vdpa, unsigned int asid)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+
+ if (asid >= vdpasim->dev_attr.nas)
+ return -EINVAL;
+
+ spin_lock(&vdpasim->iommu_lock);
+ if (vdpasim->iommu_pt[asid])
+ goto out;
+ vhost_iotlb_reset(&vdpasim->iommu[asid]);
+ vhost_iotlb_add_range(&vdpasim->iommu[asid], 0, ULONG_MAX,
+ 0, VHOST_MAP_RW);
+ vdpasim->iommu_pt[asid] = true;
+out:
+ spin_unlock(&vdpasim->iommu_lock);
+ return 0;
+}
+
static int vdpasim_bind_mm(struct vdpa_device *vdpa, struct mm_struct *mm)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
@@ -759,6 +775,7 @@ static const struct vdpa_config_ops vdpasim_config_ops = {
.set_group_asid = vdpasim_set_group_asid,
.dma_map = vdpasim_dma_map,
.dma_unmap = vdpasim_dma_unmap,
+ .reset_map = vdpasim_reset_map,
.bind_mm = vdpasim_bind_mm,
.unbind_mm = vdpasim_unbind_mm,
.free = vdpasim_free,
@@ -796,6 +813,7 @@ static const struct vdpa_config_ops vdpasim_batch_config_ops = {
.get_iova_range = vdpasim_get_iova_range,
.set_group_asid = vdpasim_set_group_asid,
.set_map = vdpasim_set_map,
+ .reset_map = vdpasim_reset_map,
.bind_mm = vdpasim_bind_mm,
.unbind_mm = vdpasim_unbind_mm,
.free = vdpasim_free,
--
2.39.3
On Tue, Oct 17, 2023 at 10:11:33PM -0700, Si-Wei Liu wrote:
>RFC only. Not tested on vdpa-sim-blk with user virtual address.
>Works fine with vdpa-sim-net which uses physical address to map.
>
>This patch is based on top of [1].
>
>[1] https://lore.kernel.org/virtualization/[email protected]/
>
>Signed-off-by: Si-Wei Liu <[email protected]>
>
>---
>RFC v2:
> - initialize iotlb to passthrough mode in device add
I tested this version and I didn't see any issue ;-)
Tested-by: Stefano Garzarella <[email protected]>
>---
> drivers/vdpa/vdpa_sim/vdpa_sim.c | 34 ++++++++++++++++++++++++--------
> 1 file changed, 26 insertions(+), 8 deletions(-)
>
>diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
>index 76d41058add9..2a0a6042d61d 100644
>--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
>+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
>@@ -151,13 +151,6 @@ static void vdpasim_do_reset(struct vdpasim *vdpasim)
> &vdpasim->iommu_lock);
> }
>
>- for (i = 0; i < vdpasim->dev_attr.nas; i++) {
>- vhost_iotlb_reset(&vdpasim->iommu[i]);
>- vhost_iotlb_add_range(&vdpasim->iommu[i], 0, ULONG_MAX,
>- 0, VHOST_MAP_RW);
>- vdpasim->iommu_pt[i] = true;
>- }
>-
> vdpasim->running = true;
> spin_unlock(&vdpasim->iommu_lock);
>
>@@ -259,8 +252,12 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr,
> if (!vdpasim->iommu_pt)
> goto err_iommu;
>
>- for (i = 0; i < vdpasim->dev_attr.nas; i++)
>+ for (i = 0; i < vdpasim->dev_attr.nas; i++) {
> vhost_iotlb_init(&vdpasim->iommu[i], max_iotlb_entries, 0);
>+ vhost_iotlb_add_range(&vdpasim->iommu[i], 0, ULONG_MAX, 0,
>+ VHOST_MAP_RW);
>+ vdpasim->iommu_pt[i] = true;
>+ }
>
> for (i = 0; i < dev_attr->nvqs; i++)
> vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0],
>@@ -637,6 +634,25 @@ static int vdpasim_set_map(struct vdpa_device *vdpa, unsigned int asid,
> return ret;
> }
>
>+static int vdpasim_reset_map(struct vdpa_device *vdpa, unsigned int asid)
>+{
>+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
>+
>+ if (asid >= vdpasim->dev_attr.nas)
>+ return -EINVAL;
>+
>+ spin_lock(&vdpasim->iommu_lock);
>+ if (vdpasim->iommu_pt[asid])
>+ goto out;
>+ vhost_iotlb_reset(&vdpasim->iommu[asid]);
>+ vhost_iotlb_add_range(&vdpasim->iommu[asid], 0, ULONG_MAX,
>+ 0, VHOST_MAP_RW);
>+ vdpasim->iommu_pt[asid] = true;
>+out:
>+ spin_unlock(&vdpasim->iommu_lock);
>+ return 0;
>+}
>+
> static int vdpasim_bind_mm(struct vdpa_device *vdpa, struct mm_struct *mm)
> {
> struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
>@@ -759,6 +775,7 @@ static const struct vdpa_config_ops vdpasim_config_ops = {
> .set_group_asid = vdpasim_set_group_asid,
> .dma_map = vdpasim_dma_map,
> .dma_unmap = vdpasim_dma_unmap,
>+ .reset_map = vdpasim_reset_map,
> .bind_mm = vdpasim_bind_mm,
> .unbind_mm = vdpasim_unbind_mm,
> .free = vdpasim_free,
>@@ -796,6 +813,7 @@ static const struct vdpa_config_ops vdpasim_batch_config_ops = {
> .get_iova_range = vdpasim_get_iova_range,
> .set_group_asid = vdpasim_set_group_asid,
> .set_map = vdpasim_set_map,
>+ .reset_map = vdpasim_reset_map,
> .bind_mm = vdpasim_bind_mm,
> .unbind_mm = vdpasim_unbind_mm,
> .free = vdpasim_free,
>--
>2.39.3
>
On 10/18/2023 1:05 AM, Stefano Garzarella wrote:
> On Tue, Oct 17, 2023 at 10:11:33PM -0700, Si-Wei Liu wrote:
>> RFC only. Not tested on vdpa-sim-blk with user virtual address.
>> Works fine with vdpa-sim-net which uses physical address to map.
>>
>> This patch is based on top of [1].
>>
>> [1]
>> https://lore.kernel.org/virtualization/[email protected]/
>>
>> Signed-off-by: Si-Wei Liu <[email protected]>
>>
>> ---
>> RFC v2:
>> - initialize iotlb to passthrough mode in device add
>
> I tested this version and I didn't see any issue ;-)
Great, thank you so much for your help on testing my patch, Stefano!
Just for my own interest/curiosity, currently there's no vhost-vdpa
backend client implemented for vdpa-sim-blk or any vdpa block device in
userspace as yet, correct? So there was no test specific to vhost-vdpa
that needs to be exercised, right?
Thanks,
-Siwei
>
> Tested-by: Stefano Garzarella <[email protected]>
>
>> ---
>> drivers/vdpa/vdpa_sim/vdpa_sim.c | 34 ++++++++++++++++++++++++--------
>> 1 file changed, 26 insertions(+), 8 deletions(-)
>>
>> diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c
>> b/drivers/vdpa/vdpa_sim/vdpa_sim.c
>> index 76d41058add9..2a0a6042d61d 100644
>> --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
>> +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
>> @@ -151,13 +151,6 @@ static void vdpasim_do_reset(struct vdpasim
>> *vdpasim)
>> &vdpasim->iommu_lock);
>> }
>>
>> - for (i = 0; i < vdpasim->dev_attr.nas; i++) {
>> - vhost_iotlb_reset(&vdpasim->iommu[i]);
>> - vhost_iotlb_add_range(&vdpasim->iommu[i], 0, ULONG_MAX,
>> - 0, VHOST_MAP_RW);
>> - vdpasim->iommu_pt[i] = true;
>> - }
>> -
>> vdpasim->running = true;
>> spin_unlock(&vdpasim->iommu_lock);
>>
>> @@ -259,8 +252,12 @@ struct vdpasim *vdpasim_create(struct
>> vdpasim_dev_attr *dev_attr,
>> if (!vdpasim->iommu_pt)
>> goto err_iommu;
>>
>> - for (i = 0; i < vdpasim->dev_attr.nas; i++)
>> + for (i = 0; i < vdpasim->dev_attr.nas; i++) {
>> vhost_iotlb_init(&vdpasim->iommu[i], max_iotlb_entries, 0);
>> + vhost_iotlb_add_range(&vdpasim->iommu[i], 0, ULONG_MAX, 0,
>> + VHOST_MAP_RW);
>> + vdpasim->iommu_pt[i] = true;
>> + }
>>
>> for (i = 0; i < dev_attr->nvqs; i++)
>> vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0],
>> @@ -637,6 +634,25 @@ static int vdpasim_set_map(struct vdpa_device
>> *vdpa, unsigned int asid,
>> return ret;
>> }
>>
>> +static int vdpasim_reset_map(struct vdpa_device *vdpa, unsigned int
>> asid)
>> +{
>> + struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
>> +
>> + if (asid >= vdpasim->dev_attr.nas)
>> + return -EINVAL;
>> +
>> + spin_lock(&vdpasim->iommu_lock);
>> + if (vdpasim->iommu_pt[asid])
>> + goto out;
>> + vhost_iotlb_reset(&vdpasim->iommu[asid]);
>> + vhost_iotlb_add_range(&vdpasim->iommu[asid], 0, ULONG_MAX,
>> + 0, VHOST_MAP_RW);
>> + vdpasim->iommu_pt[asid] = true;
>> +out:
>> + spin_unlock(&vdpasim->iommu_lock);
>> + return 0;
>> +}
>> +
>> static int vdpasim_bind_mm(struct vdpa_device *vdpa, struct mm_struct
>> *mm)
>> {
>> struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
>> @@ -759,6 +775,7 @@ static const struct vdpa_config_ops
>> vdpasim_config_ops = {
>> .set_group_asid = vdpasim_set_group_asid,
>> .dma_map = vdpasim_dma_map,
>> .dma_unmap = vdpasim_dma_unmap,
>> + .reset_map = vdpasim_reset_map,
>> .bind_mm = vdpasim_bind_mm,
>> .unbind_mm = vdpasim_unbind_mm,
>> .free = vdpasim_free,
>> @@ -796,6 +813,7 @@ static const struct vdpa_config_ops
>> vdpasim_batch_config_ops = {
>> .get_iova_range = vdpasim_get_iova_range,
>> .set_group_asid = vdpasim_set_group_asid,
>> .set_map = vdpasim_set_map,
>> + .reset_map = vdpasim_reset_map,
>> .bind_mm = vdpasim_bind_mm,
>> .unbind_mm = vdpasim_unbind_mm,
>> .free = vdpasim_free,
>> --
>> 2.39.3
>>
>
On Wed, Oct 18, 2023 at 04:47:48PM -0700, Si-Wei Liu wrote:
>
>
>On 10/18/2023 1:05 AM, Stefano Garzarella wrote:
>>On Tue, Oct 17, 2023 at 10:11:33PM -0700, Si-Wei Liu wrote:
>>>RFC only. Not tested on vdpa-sim-blk with user virtual address.
>>>Works fine with vdpa-sim-net which uses physical address to map.
>>>
>>>This patch is based on top of [1].
>>>
>>>[1] https://lore.kernel.org/virtualization/[email protected]/
>>>
>>>Signed-off-by: Si-Wei Liu <[email protected]>
>>>
>>>---
>>>RFC v2:
>>>?- initialize iotlb to passthrough mode in device add
>>
>>I tested this version and I didn't see any issue ;-)
>Great, thank you so much for your help on testing my patch, Stefano!
You're welcome :-)
>Just for my own interest/curiosity, currently there's no vhost-vdpa
>backend client implemented for vdpa-sim-blk
Yep, we developed libblkio [1]. libblkio exposes common API to access
block devices in userspace. It supports several drivers.
The one useful for this use case is `virtio-blk-vhost-vdpa`. Here [2]
some examples on how to use the libblkio test suite with the
vdpa-sim-blk.
Since QEMU 7.2, it supports libblkio drivers, so you can use the
following options to attach a vdpa-blk device to a VM:
-blockdev node-name=drive_src1,driver=virtio-blk-vhost-vdpa,path=/dev/vhost-vdpa-0,cache.direct=on \
-device virtio-blk-pci,id=src1,bootindex=2,drive=drive_src1 \
For now only what we called slow-path [3][4] is supported, since the VQs
are not directly exposed to the guest, but QEMU allocates other VQs
(similar to shadow VQs for net) to support live-migration and QEMU
storage features. Fast-path is on the agenda, but on pause for now.
>or any vdpa block device in userspace as yet, correct?
Do you mean with VDUSE?
In this case, yes, qemu-storage-daemon supports it, and can implement a
virtio-blk in user space, exposing a disk image thorough VDUSE.
There is an example in libblkio as well [5] on how to start it.
>So there was no test specific to vhost-vdpa that needs to be exercised,
>right?
>
I hope I answered above :-)
This reminded me that I need to write a blog post with all this
information, I hope to do that soon!
Stefano
[1] https://gitlab.com/libblkio/libblkio
[2] https://gitlab.com/libblkio/libblkio/-/blob/main/tests/meson.build?ref_type=heads#L42
[3] https://kvmforum2022.sched.com/event/15jK5/qemu-storage-daemon-and-libblkio-exploring-new-shores-for-the-qemu-block-layer-kevin-wolf-stefano-garzarella-red-hat
[4] https://kvmforum2021.sched.com/event/ke3a/vdpa-blk-unified-hardware-and-software-offload-for-virtio-blk-stefano-garzarella-red-hat
[5] https://gitlab.com/libblkio/libblkio/-/blob/main/tests/meson.build?ref_type=heads#L58
On 10/19/2023 2:29 AM, Stefano Garzarella wrote:
> On Wed, Oct 18, 2023 at 04:47:48PM -0700, Si-Wei Liu wrote:
>>
>>
>> On 10/18/2023 1:05 AM, Stefano Garzarella wrote:
>>> On Tue, Oct 17, 2023 at 10:11:33PM -0700, Si-Wei Liu wrote:
>>>> RFC only. Not tested on vdpa-sim-blk with user virtual address.
>>>> Works fine with vdpa-sim-net which uses physical address to map.
>>>>
>>>> This patch is based on top of [1].
>>>>
>>>> [1]
>>>> https://lore.kernel.org/virtualization/[email protected]/
>>>>
>>>> Signed-off-by: Si-Wei Liu <[email protected]>
>>>>
>>>> ---
>>>> RFC v2:
>>>> - initialize iotlb to passthrough mode in device add
>>>
>>> I tested this version and I didn't see any issue ;-)
>> Great, thank you so much for your help on testing my patch, Stefano!
>
> You're welcome :-)
>
>> Just for my own interest/curiosity, currently there's no vhost-vdpa
>> backend client implemented for vdpa-sim-blk
>
> Yep, we developed libblkio [1]. libblkio exposes common API to access
> block devices in userspace. It supports several drivers.
> The one useful for this use case is `virtio-blk-vhost-vdpa`. Here [2]
> some examples on how to use the libblkio test suite with the
> vdpa-sim-blk.
>
> Since QEMU 7.2, it supports libblkio drivers, so you can use the
> following options to attach a vdpa-blk device to a VM:
>
> -blockdev
> node-name=drive_src1,driver=virtio-blk-vhost-vdpa,path=/dev/vhost-vdpa-0,cache.direct=on
> \
> -device virtio-blk-pci,id=src1,bootindex=2,drive=drive_src1 \
>
> For now only what we called slow-path [3][4] is supported, since the
> VQs are not directly exposed to the guest, but QEMU allocates other
> VQs (similar to shadow VQs for net) to support live-migration and QEMU
> storage features. Fast-path is on the agenda, but on pause for now.
>
>> or any vdpa block device in userspace as yet, correct?
>
> Do you mean with VDUSE?
> In this case, yes, qemu-storage-daemon supports it, and can implement
> a virtio-blk in user space, exposing a disk image thorough VDUSE.
>
> There is an example in libblkio as well [5] on how to start it.
>
>> So there was no test specific to vhost-vdpa that needs to be
>> exercised, right?
>>
>
> I hope I answered above :-)
Definitely! This is exactly what I needed, it's really useful! Much
appreciated for the detailed information!
I hadn't been aware of the latest status on libblkio drivers and qemu
support since I last checked it (it was at some point right after KVM
2022, sorry my knowledge too outdated). I followed your links below and
checked a few things, looks my change shouldn't affect anything. Good to
see all the desired pieces landed to QEMU and libblkio already as
planned, great job done!
Cheers,
-Siwei
> This reminded me that I need to write a blog post with all this
> information, I hope to do that soon!
>
> Stefano
>
> [1] https://gitlab.com/libblkio/libblkio
> [2]
> https://gitlab.com/libblkio/libblkio/-/blob/main/tests/meson.build?ref_type=heads#L42
> [3]
> https://kvmforum2022.sched.com/event/15jK5/qemu-storage-daemon-and-libblkio-exploring-new-shores-for-the-qemu-block-layer-kevin-wolf-stefano-garzarella-red-hat
> [4]
> https://kvmforum2021.sched.com/event/ke3a/vdpa-blk-unified-hardware-and-software-offload-for-virtio-blk-stefano-garzarella-red-hat
> [5]
> https://gitlab.com/libblkio/libblkio/-/blob/main/tests/meson.build?ref_type=heads#L58
>