2021-01-07 04:46:50

by Keqian Zhu

[permalink] [raw]
Subject: [PATCH 1/6] vfio/iommu_type1: Make an explicit "promote" semantic

When we want to promote the pinned_page_dirty_scope of vfio_iommu,
we call the "update" function to visit all vfio_group, but when we
want to downgrade this, we can set the flag as false directly.

So we'd better make an explicit "promote" semantic to the "update"
function. BTW, if vfio_iommu already has been promoted, then return
early.

Signed-off-by: Keqian Zhu <[email protected]>
---
drivers/vfio/vfio_iommu_type1.c | 30 ++++++++++++++----------------
1 file changed, 14 insertions(+), 16 deletions(-)

diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 0b4dedaa9128..334a8240e1da 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -148,7 +148,7 @@ static int put_pfn(unsigned long pfn, int prot);
static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
struct iommu_group *iommu_group);

-static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu);
+static void promote_pinned_page_dirty_scope(struct vfio_iommu *iommu);
/*
* This code handles mapping and unmapping of user data buffers
* into DMA'ble space using the IOMMU
@@ -714,7 +714,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
group = vfio_iommu_find_iommu_group(iommu, iommu_group);
if (!group->pinned_page_dirty_scope) {
group->pinned_page_dirty_scope = true;
- update_pinned_page_dirty_scope(iommu);
+ promote_pinned_page_dirty_scope(iommu);
}

goto pin_done;
@@ -1622,27 +1622,26 @@ static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
return group;
}

-static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu)
+static void promote_pinned_page_dirty_scope(struct vfio_iommu *iommu)
{
struct vfio_domain *domain;
struct vfio_group *group;

+ if (iommu->pinned_page_dirty_scope)
+ return;
+
list_for_each_entry(domain, &iommu->domain_list, next) {
list_for_each_entry(group, &domain->group_list, next) {
- if (!group->pinned_page_dirty_scope) {
- iommu->pinned_page_dirty_scope = false;
+ if (!group->pinned_page_dirty_scope)
return;
- }
}
}

if (iommu->external_domain) {
domain = iommu->external_domain;
list_for_each_entry(group, &domain->group_list, next) {
- if (!group->pinned_page_dirty_scope) {
- iommu->pinned_page_dirty_scope = false;
+ if (!group->pinned_page_dirty_scope)
return;
- }
}
}

@@ -2057,8 +2056,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
* addition of a dirty tracking group.
*/
group->pinned_page_dirty_scope = true;
- if (!iommu->pinned_page_dirty_scope)
- update_pinned_page_dirty_scope(iommu);
+ promote_pinned_page_dirty_scope(iommu);
mutex_unlock(&iommu->lock);

return 0;
@@ -2341,7 +2339,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
struct vfio_iommu *iommu = iommu_data;
struct vfio_domain *domain;
struct vfio_group *group;
- bool update_dirty_scope = false;
+ bool promote_dirty_scope = false;
LIST_HEAD(iova_copy);

mutex_lock(&iommu->lock);
@@ -2349,7 +2347,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
if (iommu->external_domain) {
group = find_iommu_group(iommu->external_domain, iommu_group);
if (group) {
- update_dirty_scope = !group->pinned_page_dirty_scope;
+ promote_dirty_scope = !group->pinned_page_dirty_scope;
list_del(&group->next);
kfree(group);

@@ -2379,7 +2377,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
continue;

vfio_iommu_detach_group(domain, group);
- update_dirty_scope = !group->pinned_page_dirty_scope;
+ promote_dirty_scope = !group->pinned_page_dirty_scope;
list_del(&group->next);
kfree(group);
/*
@@ -2415,8 +2413,8 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
* Removal of a group without dirty tracking may allow the iommu scope
* to be promoted.
*/
- if (update_dirty_scope)
- update_pinned_page_dirty_scope(iommu);
+ if (promote_dirty_scope)
+ promote_pinned_page_dirty_scope(iommu);
mutex_unlock(&iommu->lock);
}

--
2.19.1


2021-01-15 22:45:33

by Alex Williamson

[permalink] [raw]
Subject: Re: [PATCH 1/6] vfio/iommu_type1: Make an explicit "promote" semantic

On Thu, 7 Jan 2021 12:43:56 +0800
Keqian Zhu <[email protected]> wrote:

> When we want to promote the pinned_page_dirty_scope of vfio_iommu,
> we call the "update" function to visit all vfio_group, but when we
> want to downgrade this, we can set the flag as false directly.

I agree that the transition can only go in one direction, but it's
still conditional on the scope of all groups involved. We are
"updating" the iommu state based on the change of a group. Renaming
this to "promote" seems like a matter of personal preference.

> So we'd better make an explicit "promote" semantic to the "update"
> function. BTW, if vfio_iommu already has been promoted, then return
> early.

Currently it's the caller that avoids using this function when the
iommu scope is already correct. In fact the changes induces a
redundant test in the pin_pages code path, we're changing a group from
non-pinned-page-scope to pinned-page-scope, therefore the iommu scope
cannot initially be scope limited. In the attach_group call path,
we're moving that test from the caller, so at best we've introduced an
additional function call.

The function as it exists today is also more versatile whereas the
"promote" version here forces it to a single task with no appreciable
difference in complexity or code. This seems like a frivolous change.
Thanks,

Alex

> Signed-off-by: Keqian Zhu <[email protected]>
> ---
> drivers/vfio/vfio_iommu_type1.c | 30 ++++++++++++++----------------
> 1 file changed, 14 insertions(+), 16 deletions(-)
>
> diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
> index 0b4dedaa9128..334a8240e1da 100644
> --- a/drivers/vfio/vfio_iommu_type1.c
> +++ b/drivers/vfio/vfio_iommu_type1.c
> @@ -148,7 +148,7 @@ static int put_pfn(unsigned long pfn, int prot);
> static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
> struct iommu_group *iommu_group);
>
> -static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu);
> +static void promote_pinned_page_dirty_scope(struct vfio_iommu *iommu);
> /*
> * This code handles mapping and unmapping of user data buffers
> * into DMA'ble space using the IOMMU
> @@ -714,7 +714,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
> group = vfio_iommu_find_iommu_group(iommu, iommu_group);
> if (!group->pinned_page_dirty_scope) {
> group->pinned_page_dirty_scope = true;
> - update_pinned_page_dirty_scope(iommu);
> + promote_pinned_page_dirty_scope(iommu);
> }
>
> goto pin_done;
> @@ -1622,27 +1622,26 @@ static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
> return group;
> }
>
> -static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu)
> +static void promote_pinned_page_dirty_scope(struct vfio_iommu *iommu)
> {
> struct vfio_domain *domain;
> struct vfio_group *group;
>
> + if (iommu->pinned_page_dirty_scope)
> + return;
> +
> list_for_each_entry(domain, &iommu->domain_list, next) {
> list_for_each_entry(group, &domain->group_list, next) {
> - if (!group->pinned_page_dirty_scope) {
> - iommu->pinned_page_dirty_scope = false;
> + if (!group->pinned_page_dirty_scope)
> return;
> - }
> }
> }
>
> if (iommu->external_domain) {
> domain = iommu->external_domain;
> list_for_each_entry(group, &domain->group_list, next) {
> - if (!group->pinned_page_dirty_scope) {
> - iommu->pinned_page_dirty_scope = false;
> + if (!group->pinned_page_dirty_scope)
> return;
> - }
> }
> }
>
> @@ -2057,8 +2056,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
> * addition of a dirty tracking group.
> */
> group->pinned_page_dirty_scope = true;
> - if (!iommu->pinned_page_dirty_scope)
> - update_pinned_page_dirty_scope(iommu);
> + promote_pinned_page_dirty_scope(iommu);
> mutex_unlock(&iommu->lock);
>
> return 0;
> @@ -2341,7 +2339,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
> struct vfio_iommu *iommu = iommu_data;
> struct vfio_domain *domain;
> struct vfio_group *group;
> - bool update_dirty_scope = false;
> + bool promote_dirty_scope = false;
> LIST_HEAD(iova_copy);
>
> mutex_lock(&iommu->lock);
> @@ -2349,7 +2347,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
> if (iommu->external_domain) {
> group = find_iommu_group(iommu->external_domain, iommu_group);
> if (group) {
> - update_dirty_scope = !group->pinned_page_dirty_scope;
> + promote_dirty_scope = !group->pinned_page_dirty_scope;
> list_del(&group->next);
> kfree(group);
>
> @@ -2379,7 +2377,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
> continue;
>
> vfio_iommu_detach_group(domain, group);
> - update_dirty_scope = !group->pinned_page_dirty_scope;
> + promote_dirty_scope = !group->pinned_page_dirty_scope;
> list_del(&group->next);
> kfree(group);
> /*
> @@ -2415,8 +2413,8 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
> * Removal of a group without dirty tracking may allow the iommu scope
> * to be promoted.
> */
> - if (update_dirty_scope)
> - update_pinned_page_dirty_scope(iommu);
> + if (promote_dirty_scope)
> + promote_pinned_page_dirty_scope(iommu);
> mutex_unlock(&iommu->lock);
> }
>

2021-01-18 13:40:33

by Keqian Zhu

[permalink] [raw]
Subject: Re: [PATCH 1/6] vfio/iommu_type1: Make an explicit "promote" semantic



On 2021/1/16 6:42, Alex Williamson wrote:
> On Thu, 7 Jan 2021 12:43:56 +0800
> Keqian Zhu <[email protected]> wrote:
>
>> When we want to promote the pinned_page_dirty_scope of vfio_iommu,
>> we call the "update" function to visit all vfio_group, but when we
>> want to downgrade this, we can set the flag as false directly.
>
> I agree that the transition can only go in one direction, but it's
> still conditional on the scope of all groups involved. We are
> "updating" the iommu state based on the change of a group. Renaming
> this to "promote" seems like a matter of personal preference.
>
>> So we'd better make an explicit "promote" semantic to the "update"
>> function. BTW, if vfio_iommu already has been promoted, then return
>> early.
>
> Currently it's the caller that avoids using this function when the
> iommu scope is already correct. In fact the changes induces a
> redundant test in the pin_pages code path, we're changing a group from
> non-pinned-page-scope to pinned-page-scope, therefore the iommu scope
> cannot initially be scope limited. In the attach_group call path,
> we're moving that test from the caller, so at best we've introduced an
> additional function call.
>
> The function as it exists today is also more versatile whereas the
> "promote" version here forces it to a single task with no appreciable
> difference in complexity or code. This seems like a frivolous change.
> Thanks,
OK, I will adapt your idea that maintenance a counter of non-pinned groups.
Then we keep the "update" semantic, and the target is the counter ;-).

Thanks,
Keqian

>
> Alex
>
>> Signed-off-by: Keqian Zhu <[email protected]>
>> ---
>> drivers/vfio/vfio_iommu_type1.c | 30 ++++++++++++++----------------
>> 1 file changed, 14 insertions(+), 16 deletions(-)
>>
>> diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
>> index 0b4dedaa9128..334a8240e1da 100644
>> --- a/drivers/vfio/vfio_iommu_type1.c
>> +++ b/drivers/vfio/vfio_iommu_type1.c
>> @@ -148,7 +148,7 @@ static int put_pfn(unsigned long pfn, int prot);
>> static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
>> struct iommu_group *iommu_group);
>>
>> -static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu);
>> +static void promote_pinned_page_dirty_scope(struct vfio_iommu *iommu);
>> /*
>> * This code handles mapping and unmapping of user data buffers
>> * into DMA'ble space using the IOMMU
>> @@ -714,7 +714,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
>> group = vfio_iommu_find_iommu_group(iommu, iommu_group);
>> if (!group->pinned_page_dirty_scope) {
>> group->pinned_page_dirty_scope = true;
>> - update_pinned_page_dirty_scope(iommu);
>> + promote_pinned_page_dirty_scope(iommu);
>> }
>>
>> goto pin_done;
>> @@ -1622,27 +1622,26 @@ static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
>> return group;
>> }
>>
>> -static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu)
>> +static void promote_pinned_page_dirty_scope(struct vfio_iommu *iommu)
>> {
>> struct vfio_domain *domain;
>> struct vfio_group *group;
>>
>> + if (iommu->pinned_page_dirty_scope)
>> + return;
>> +
>> list_for_each_entry(domain, &iommu->domain_list, next) {
>> list_for_each_entry(group, &domain->group_list, next) {
>> - if (!group->pinned_page_dirty_scope) {
>> - iommu->pinned_page_dirty_scope = false;
>> + if (!group->pinned_page_dirty_scope)
>> return;
>> - }
>> }
>> }
>>
>> if (iommu->external_domain) {
>> domain = iommu->external_domain;
>> list_for_each_entry(group, &domain->group_list, next) {
>> - if (!group->pinned_page_dirty_scope) {
>> - iommu->pinned_page_dirty_scope = false;
>> + if (!group->pinned_page_dirty_scope)
>> return;
>> - }
>> }
>> }
>>
>> @@ -2057,8 +2056,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
>> * addition of a dirty tracking group.
>> */
>> group->pinned_page_dirty_scope = true;
>> - if (!iommu->pinned_page_dirty_scope)
>> - update_pinned_page_dirty_scope(iommu);
>> + promote_pinned_page_dirty_scope(iommu);
>> mutex_unlock(&iommu->lock);
>>
>> return 0;
>> @@ -2341,7 +2339,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
>> struct vfio_iommu *iommu = iommu_data;
>> struct vfio_domain *domain;
>> struct vfio_group *group;
>> - bool update_dirty_scope = false;
>> + bool promote_dirty_scope = false;
>> LIST_HEAD(iova_copy);
>>
>> mutex_lock(&iommu->lock);
>> @@ -2349,7 +2347,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
>> if (iommu->external_domain) {
>> group = find_iommu_group(iommu->external_domain, iommu_group);
>> if (group) {
>> - update_dirty_scope = !group->pinned_page_dirty_scope;
>> + promote_dirty_scope = !group->pinned_page_dirty_scope;
>> list_del(&group->next);
>> kfree(group);
>>
>> @@ -2379,7 +2377,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
>> continue;
>>
>> vfio_iommu_detach_group(domain, group);
>> - update_dirty_scope = !group->pinned_page_dirty_scope;
>> + promote_dirty_scope = !group->pinned_page_dirty_scope;
>> list_del(&group->next);
>> kfree(group);
>> /*
>> @@ -2415,8 +2413,8 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
>> * Removal of a group without dirty tracking may allow the iommu scope
>> * to be promoted.
>> */
>> - if (update_dirty_scope)
>> - update_pinned_page_dirty_scope(iommu);
>> + if (promote_dirty_scope)
>> + promote_pinned_page_dirty_scope(iommu);
>> mutex_unlock(&iommu->lock);
>> }
>>
>
> .
>