We always use the smallest supported page size of vfio_iommu as
pgsize. Remove parameter "pgsize" of vfio_iova_dirty_bitmap.
Signed-off-by: Keqian Zhu <[email protected]>
---
drivers/vfio/vfio_iommu_type1.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 080c05b129ee..82649a040148 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -1015,11 +1015,12 @@ static int update_user_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,
}
static int vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,
- dma_addr_t iova, size_t size, size_t pgsize)
+ dma_addr_t iova, size_t size)
{
struct vfio_dma *dma;
struct rb_node *n;
- unsigned long pgshift = __ffs(pgsize);
+ unsigned long pgshift = __ffs(iommu->pgsize_bitmap);
+ size_t pgsize = (size_t)1 << pgshift;
int ret;
/*
@@ -2824,8 +2825,7 @@ static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu,
if (iommu->dirty_page_tracking)
ret = vfio_iova_dirty_bitmap(range.bitmap.data,
iommu, range.iova,
- range.size,
- range.bitmap.pgsize);
+ range.size);
else
ret = -EINVAL;
out_unlock:
--
2.19.1
On Thu, 7 Jan 2021 12:44:00 +0800
Keqian Zhu <[email protected]> wrote:
> We always use the smallest supported page size of vfio_iommu as
> pgsize. Remove parameter "pgsize" of vfio_iova_dirty_bitmap.
>
> Signed-off-by: Keqian Zhu <[email protected]>
> ---
> drivers/vfio/vfio_iommu_type1.c | 8 ++++----
> 1 file changed, 4 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
> index 080c05b129ee..82649a040148 100644
> --- a/drivers/vfio/vfio_iommu_type1.c
> +++ b/drivers/vfio/vfio_iommu_type1.c
> @@ -1015,11 +1015,12 @@ static int update_user_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,
> }
>
> static int vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,
> - dma_addr_t iova, size_t size, size_t pgsize)
> + dma_addr_t iova, size_t size)
> {
> struct vfio_dma *dma;
> struct rb_node *n;
> - unsigned long pgshift = __ffs(pgsize);
> + unsigned long pgshift = __ffs(iommu->pgsize_bitmap);
> + size_t pgsize = (size_t)1 << pgshift;
> int ret;
>
> /*
> @@ -2824,8 +2825,7 @@ static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu,
> if (iommu->dirty_page_tracking)
> ret = vfio_iova_dirty_bitmap(range.bitmap.data,
> iommu, range.iova,
> - range.size,
> - range.bitmap.pgsize);
> + range.size);
> else
> ret = -EINVAL;
> out_unlock:
In this case the caller has actually already calculated both pgsize and
pgshift, the better optimization would be to pass both rather than
recalculate. Thanks,
Alex