2020-12-10 07:48:35

by Keqian Zhu

[permalink] [raw]
Subject: [PATCH 0/7] vfio: iommu_type1: Some fixes and optimization

Hi folks,

This patch series aim to fix up or optimize some code about vfio
dirty log tracking.

patch 1: Optimize dirty log when unwind pin pages.
patch 2-3: Optimize promoting pinned_page_dirty_scope.
patch 4: Fix up dirty log missing when promote pinned_page_dirty_scope.
patch 5-7: Drop superfluous parameter "pgsize" of some functions.

Wish they improves the robustness of vfio dirty log tracking.

Thanks,
Keqian

Keqian Zhu (7):
vfio: iommu_type1: Clear added dirty bit when unwind pin
vfio: iommu_type1: Initially set the pinned_page_dirty_scope
vfio: iommu_type1: Make an explicit "promote" semantic
vfio: iommu_type1: Fix missing dirty page when promote pinned_scope
vfio: iommu_type1: Drop parameter "pgsize" of
vfio_dma_bitmap_alloc_all
vfio: iommu_type1: Drop parameter "pgsize" of vfio_iova_dirty_bitmap.
vfio: iommu_type1: Drop parameter "pgsize" of update_user_bitmap

drivers/vfio/vfio_iommu_type1.c | 108 +++++++++++++++++++-------------
1 file changed, 65 insertions(+), 43 deletions(-)

--
2.23.0


2020-12-10 14:07:21

by Keqian Zhu

[permalink] [raw]
Subject: [PATCH 3/7] vfio: iommu_type1: Make an explicit "promote" semantic

When we want to promote pinned_page_scope of vfio_iommu, we
should call the "update" function to visit all vfio_group,
but when we want to downgrade it, we can set the flag directly.

Giving above, we can give an explicit "promote" semantic to
that function. BTW, if vfio_iommu has been promoted, then it
can return early.

Signed-off-by: Keqian Zhu <[email protected]>
---
drivers/vfio/vfio_iommu_type1.c | 27 +++++++++++++--------------
1 file changed, 13 insertions(+), 14 deletions(-)

diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index c52bcefba96b..bd9a94590ebc 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -148,7 +148,7 @@ static int put_pfn(unsigned long pfn, int prot);
static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
struct iommu_group *iommu_group);

-static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu);
+static void promote_pinned_page_dirty_scope(struct vfio_iommu *iommu);
/*
* This code handles mapping and unmapping of user data buffers
* into DMA'ble space using the IOMMU
@@ -719,7 +719,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
group = vfio_iommu_find_iommu_group(iommu, iommu_group);
if (!group->pinned_page_dirty_scope) {
group->pinned_page_dirty_scope = true;
- update_pinned_page_dirty_scope(iommu);
+ promote_pinned_page_dirty_scope(iommu);
}

goto pin_done;
@@ -1633,27 +1633,26 @@ static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
return group;
}

-static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu)
+static void promote_pinned_page_dirty_scope(struct vfio_iommu *iommu)
{
struct vfio_domain *domain;
struct vfio_group *group;

+ if (iommu->pinned_page_dirty_scope)
+ return;
+
list_for_each_entry(domain, &iommu->domain_list, next) {
list_for_each_entry(group, &domain->group_list, next) {
- if (!group->pinned_page_dirty_scope) {
- iommu->pinned_page_dirty_scope = false;
+ if (!group->pinned_page_dirty_scope)
return;
- }
}
}

if (iommu->external_domain) {
domain = iommu->external_domain;
list_for_each_entry(group, &domain->group_list, next) {
- if (!group->pinned_page_dirty_scope) {
- iommu->pinned_page_dirty_scope = false;
+ if (!group->pinned_page_dirty_scope)
return;
- }
}
}

@@ -2348,7 +2347,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
struct vfio_iommu *iommu = iommu_data;
struct vfio_domain *domain;
struct vfio_group *group;
- bool update_dirty_scope = false;
+ bool promote_dirty_scope = false;
LIST_HEAD(iova_copy);

mutex_lock(&iommu->lock);
@@ -2356,7 +2355,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
if (iommu->external_domain) {
group = find_iommu_group(iommu->external_domain, iommu_group);
if (group) {
- update_dirty_scope = !group->pinned_page_dirty_scope;
+ promote_dirty_scope = !group->pinned_page_dirty_scope;
list_del(&group->next);
kfree(group);

@@ -2386,7 +2385,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
continue;

vfio_iommu_detach_group(domain, group);
- update_dirty_scope = !group->pinned_page_dirty_scope;
+ promote_dirty_scope = !group->pinned_page_dirty_scope;
list_del(&group->next);
kfree(group);
/*
@@ -2422,8 +2421,8 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
* Removal of a group without dirty tracking may allow the iommu scope
* to be promoted.
*/
- if (update_dirty_scope)
- update_pinned_page_dirty_scope(iommu);
+ if (promote_dirty_scope)
+ promote_pinned_page_dirty_scope(iommu);
mutex_unlock(&iommu->lock);
}

--
2.23.0