From: Liu Song <[email protected]>
In "__viommu_sync_req", 0 is always returned as the only return value, no
return value is needed for this case, and the processes and functions
involved are adjusted accordingly.
Signed-off-by: Liu Song <[email protected]>
---
drivers/iommu/virtio-iommu.c | 23 ++++++-----------------
1 file changed, 6 insertions(+), 17 deletions(-)
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index b7c2280..fde5661 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -151,7 +151,7 @@ static off_t viommu_get_write_desc_offset(struct viommu_dev *viommu,
* Wait for all added requests to complete. When this function returns, all
* requests that were in-flight at the time of the call have completed.
*/
-static int __viommu_sync_req(struct viommu_dev *viommu)
+static void __viommu_sync_req(struct viommu_dev *viommu)
{
unsigned int len;
size_t write_len;
@@ -180,22 +180,15 @@ static int __viommu_sync_req(struct viommu_dev *viommu)
list_del(&req->list);
kfree(req);
}
-
- return 0;
}
-static int viommu_sync_req(struct viommu_dev *viommu)
+static void viommu_sync_req(struct viommu_dev *viommu)
{
- int ret;
unsigned long flags;
spin_lock_irqsave(&viommu->request_lock, flags);
- ret = __viommu_sync_req(viommu);
- if (ret)
- dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret);
+ __viommu_sync_req(viommu);
spin_unlock_irqrestore(&viommu->request_lock, flags);
-
- return ret;
}
/*
@@ -247,8 +240,8 @@ static int __viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len,
ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC);
if (ret == -ENOSPC) {
/* If the queue is full, sync and retry */
- if (!__viommu_sync_req(viommu))
- ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC);
+ __viommu_sync_req(viommu);
+ ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC);
}
if (ret)
goto err_free;
@@ -293,11 +286,7 @@ static int viommu_send_req_sync(struct viommu_dev *viommu, void *buf,
goto out_unlock;
}
- ret = __viommu_sync_req(viommu);
- if (ret) {
- dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret);
- /* Fall-through (get the actual request status) */
- }
+ __viommu_sync_req(viommu);
ret = viommu_get_req_errno(buf, len);
out_unlock:
--
1.8.3.1
Hi Liu,
On Thu, Sep 22, 2022 at 07:24:46PM +0800, Liu Song wrote:
> From: Liu Song <[email protected]>
>
> In "__viommu_sync_req", 0 is always returned as the only return value, no
> return value is needed for this case, and the processes and functions
> involved are adjusted accordingly.
>
> Signed-off-by: Liu Song <[email protected]>
Thanks for the patch but I'd rather improve __viommu_sync_req() to handle
more errors. At the moment, if the virtqueue breaks then it spins
infinitely waiting for a host response. We should at least check the
return value of virtqueue_kick(), and maybe add a timeout as well although
I'm not sure which time base we can use reliably here.
Thanks,
Jean
> ---
> drivers/iommu/virtio-iommu.c | 23 ++++++-----------------
> 1 file changed, 6 insertions(+), 17 deletions(-)
>
> diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
> index b7c2280..fde5661 100644
> --- a/drivers/iommu/virtio-iommu.c
> +++ b/drivers/iommu/virtio-iommu.c
> @@ -151,7 +151,7 @@ static off_t viommu_get_write_desc_offset(struct viommu_dev *viommu,
> * Wait for all added requests to complete. When this function returns, all
> * requests that were in-flight at the time of the call have completed.
> */
> -static int __viommu_sync_req(struct viommu_dev *viommu)
> +static void __viommu_sync_req(struct viommu_dev *viommu)
> {
> unsigned int len;
> size_t write_len;
> @@ -180,22 +180,15 @@ static int __viommu_sync_req(struct viommu_dev *viommu)
> list_del(&req->list);
> kfree(req);
> }
> -
> - return 0;
> }
>
> -static int viommu_sync_req(struct viommu_dev *viommu)
> +static void viommu_sync_req(struct viommu_dev *viommu)
> {
> - int ret;
> unsigned long flags;
>
> spin_lock_irqsave(&viommu->request_lock, flags);
> - ret = __viommu_sync_req(viommu);
> - if (ret)
> - dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret);
> + __viommu_sync_req(viommu);
> spin_unlock_irqrestore(&viommu->request_lock, flags);
> -
> - return ret;
> }
>
> /*
> @@ -247,8 +240,8 @@ static int __viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len,
> ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC);
> if (ret == -ENOSPC) {
> /* If the queue is full, sync and retry */
> - if (!__viommu_sync_req(viommu))
> - ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC);
> + __viommu_sync_req(viommu);
> + ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC);
> }
> if (ret)
> goto err_free;
> @@ -293,11 +286,7 @@ static int viommu_send_req_sync(struct viommu_dev *viommu, void *buf,
> goto out_unlock;
> }
>
> - ret = __viommu_sync_req(viommu);
> - if (ret) {
> - dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret);
> - /* Fall-through (get the actual request status) */
> - }
> + __viommu_sync_req(viommu);
>
> ret = viommu_get_req_errno(buf, len);
> out_unlock:
> --
> 1.8.3.1
>