2021-03-01 12:19:19

by John Garry

[permalink] [raw]
Subject: [PATCH 0/3] iommu/iova: Add CPU hotplug handler to flush rcaches to core code

The Intel IOMMU driver supports flushing the per-CPU rcaches when a CPU is
offlined.

Let's move it to core code, so everyone can take advantage.

Also correct a code comment.

Based on v5.12-rc1. Tested on arm64 only.

John Garry (3):
iova: Add CPU hotplug handler to flush rcaches
iommu/vt-d: Remove IOVA domain rcache flushing for CPU offlining
iova: Correct comment for free_cpu_cached_iovas()

drivers/iommu/intel/iommu.c | 31 -------------------------------
drivers/iommu/iova.c | 32 ++++++++++++++++++++++++++++++--
include/linux/cpuhotplug.h | 2 +-
include/linux/iova.h | 1 +
4 files changed, 32 insertions(+), 34 deletions(-)

--
2.26.2


2021-03-01 12:19:33

by John Garry

[permalink] [raw]
Subject: [PATCH 1/3] iova: Add CPU hotplug handler to flush rcaches

Like the intel IOMMU driver already does, flush the per-IOVA domain
CPU rcache when a CPU goes offline - there's no point in keeping it.

Signed-off-by: John Garry <[email protected]>
---
drivers/iommu/iova.c | 30 +++++++++++++++++++++++++++++-
include/linux/cpuhotplug.h | 1 +
include/linux/iova.h | 1 +
3 files changed, 31 insertions(+), 1 deletion(-)

diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index e6e2fa85271c..c78312560425 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -25,6 +25,17 @@ static void init_iova_rcaches(struct iova_domain *iovad);
static void free_iova_rcaches(struct iova_domain *iovad);
static void fq_destroy_all_entries(struct iova_domain *iovad);
static void fq_flush_timeout(struct timer_list *t);
+
+static int iova_cpuhp_dead(unsigned int cpu, struct hlist_node *node)
+{
+ struct iova_domain *iovad;
+
+ iovad = hlist_entry_safe(node, struct iova_domain, cpuhp_dead);
+
+ free_cpu_cached_iovas(cpu, iovad);
+ return 0;
+}
+
static void free_global_cached_iovas(struct iova_domain *iovad);

void
@@ -51,6 +62,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
+ cpuhp_state_add_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD, &iovad->cpuhp_dead);
init_iova_rcaches(iovad);
}
EXPORT_SYMBOL_GPL(init_iova_domain);
@@ -257,10 +269,21 @@ int iova_cache_get(void)
{
mutex_lock(&iova_cache_mutex);
if (!iova_cache_users) {
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_IOMMU_IOVA_DEAD, "iommu/iova:dead", NULL,
+ iova_cpuhp_dead);
+ if (ret) {
+ mutex_unlock(&iova_cache_mutex);
+ pr_err("Couldn't register cpuhp handler\n");
+ return ret;
+ }
+
iova_cache = kmem_cache_create(
"iommu_iova", sizeof(struct iova), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!iova_cache) {
+ cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD);
mutex_unlock(&iova_cache_mutex);
pr_err("Couldn't create iova cache\n");
return -ENOMEM;
@@ -282,8 +305,10 @@ void iova_cache_put(void)
return;
}
iova_cache_users--;
- if (!iova_cache_users)
+ if (!iova_cache_users) {
+ cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD);
kmem_cache_destroy(iova_cache);
+ }
mutex_unlock(&iova_cache_mutex);
}
EXPORT_SYMBOL_GPL(iova_cache_put);
@@ -606,6 +631,9 @@ void put_iova_domain(struct iova_domain *iovad)
{
struct iova *iova, *tmp;

+ cpuhp_state_remove_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD,
+ &iovad->cpuhp_dead);
+
free_iova_flush_queue(iovad);
free_iova_rcaches(iovad);
rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node)
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index f14adb882338..cedac9986557 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -58,6 +58,7 @@ enum cpuhp_state {
CPUHP_NET_DEV_DEAD,
CPUHP_PCI_XGENE_DEAD,
CPUHP_IOMMU_INTEL_DEAD,
+ CPUHP_IOMMU_IOVA_DEAD,
CPUHP_LUSTRE_CFS_DEAD,
CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
CPUHP_PADATA_DEAD,
diff --git a/include/linux/iova.h b/include/linux/iova.h
index c834c01c0a5b..4be6c0ab4997 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -95,6 +95,7 @@ struct iova_domain {
flush-queues */
atomic_t fq_timer_on; /* 1 when timer is active, 0
when not */
+ struct hlist_node cpuhp_dead;
};

static inline unsigned long iova_size(struct iova *iova)
--
2.26.2

2021-03-01 12:20:53

by John Garry

[permalink] [raw]
Subject: [PATCH 2/3] iommu/vt-d: Remove IOVA domain rcache flushing for CPU offlining

Now that the core code handles flushing per-IOVA domain CPU rcaches,
remove the handling here.

Signed-off-by: John Garry <[email protected]>
---
drivers/iommu/intel/iommu.c | 31 -------------------------------
include/linux/cpuhotplug.h | 1 -
2 files changed, 32 deletions(-)

diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index ee0932307d64..d1e66e1b07b8 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -4065,35 +4065,6 @@ static struct notifier_block intel_iommu_memory_nb = {
.priority = 0
};

-static void free_all_cpu_cached_iovas(unsigned int cpu)
-{
- int i;
-
- for (i = 0; i < g_num_of_iommus; i++) {
- struct intel_iommu *iommu = g_iommus[i];
- struct dmar_domain *domain;
- int did;
-
- if (!iommu)
- continue;
-
- for (did = 0; did < cap_ndoms(iommu->cap); did++) {
- domain = get_iommu_domain(iommu, (u16)did);
-
- if (!domain || domain->domain.type != IOMMU_DOMAIN_DMA)
- continue;
-
- iommu_dma_free_cpu_cached_iovas(cpu, &domain->domain);
- }
- }
-}
-
-static int intel_iommu_cpu_dead(unsigned int cpu)
-{
- free_all_cpu_cached_iovas(cpu);
- return 0;
-}
-
static void intel_disable_iommus(void)
{
struct intel_iommu *iommu = NULL;
@@ -4388,8 +4359,6 @@ int __init intel_iommu_init(void)
bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
if (si_domain && !hw_pass_through)
register_memory_notifier(&intel_iommu_memory_nb);
- cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL,
- intel_iommu_cpu_dead);

down_read(&dmar_global_lock);
if (probe_acpi_namespace_devices())
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index cedac9986557..85996494bec1 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -57,7 +57,6 @@ enum cpuhp_state {
CPUHP_PAGE_ALLOC_DEAD,
CPUHP_NET_DEV_DEAD,
CPUHP_PCI_XGENE_DEAD,
- CPUHP_IOMMU_INTEL_DEAD,
CPUHP_IOMMU_IOVA_DEAD,
CPUHP_LUSTRE_CFS_DEAD,
CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
--
2.26.2

2021-03-03 02:50:54

by John Garry

[permalink] [raw]
Subject: [PATCH 3/3] iova: Correct comment for free_cpu_cached_iovas()

Function free_cpu_cached_iovas() is not only called when a CPU is
hotplugged, so remove that part of the code comment.

Signed-off-by: John Garry <[email protected]>
---
drivers/iommu/iova.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index c78312560425..465b3b0eeeb0 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -996,7 +996,7 @@ static void free_iova_rcaches(struct iova_domain *iovad)
}

/*
- * free all the IOVA ranges cached by a cpu (used when cpu is unplugged)
+ * free all the IOVA ranges cached by a cpu
*/
void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
{
--
2.26.2

2021-03-22 18:00:01

by John Garry

[permalink] [raw]
Subject: Re: [PATCH 0/3] iommu/iova: Add CPU hotplug handler to flush rcaches to core code

On 01/03/2021 12:12, John Garry wrote:
> The Intel IOMMU driver supports flushing the per-CPU rcaches when a CPU is
> offlined.
>
> Let's move it to core code, so everyone can take advantage.
>
> Also correct a code comment.
>
> Based on v5.12-rc1. Tested on arm64 only.

Hi guys,

Friendly reminder ...

Thanks
John

>
> John Garry (3):
> iova: Add CPU hotplug handler to flush rcaches
> iommu/vt-d: Remove IOVA domain rcache flushing for CPU offlining
> iova: Correct comment for free_cpu_cached_iovas()
>
> drivers/iommu/intel/iommu.c | 31 -------------------------------
> drivers/iommu/iova.c | 32 ++++++++++++++++++++++++++++++--
> include/linux/cpuhotplug.h | 2 +-
> include/linux/iova.h | 1 +
> 4 files changed, 32 insertions(+), 34 deletions(-)
>

2021-03-23 02:10:10

by Baolu Lu

[permalink] [raw]
Subject: Re: [PATCH 2/3] iommu/vt-d: Remove IOVA domain rcache flushing for CPU offlining

On 3/1/21 8:12 PM, John Garry wrote:
> Now that the core code handles flushing per-IOVA domain CPU rcaches,
> remove the handling here.
>
> Signed-off-by: John Garry <[email protected]>
> ---
> drivers/iommu/intel/iommu.c | 31 -------------------------------
> include/linux/cpuhotplug.h | 1 -
> 2 files changed, 32 deletions(-)
>
> diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
> index ee0932307d64..d1e66e1b07b8 100644
> --- a/drivers/iommu/intel/iommu.c
> +++ b/drivers/iommu/intel/iommu.c
> @@ -4065,35 +4065,6 @@ static struct notifier_block intel_iommu_memory_nb = {
> .priority = 0
> };
>
> -static void free_all_cpu_cached_iovas(unsigned int cpu)
> -{
> - int i;
> -
> - for (i = 0; i < g_num_of_iommus; i++) {
> - struct intel_iommu *iommu = g_iommus[i];
> - struct dmar_domain *domain;
> - int did;
> -
> - if (!iommu)
> - continue;
> -
> - for (did = 0; did < cap_ndoms(iommu->cap); did++) {
> - domain = get_iommu_domain(iommu, (u16)did);
> -
> - if (!domain || domain->domain.type != IOMMU_DOMAIN_DMA)
> - continue;
> -
> - iommu_dma_free_cpu_cached_iovas(cpu, &domain->domain);
> - }
> - }
> -}
> -
> -static int intel_iommu_cpu_dead(unsigned int cpu)
> -{
> - free_all_cpu_cached_iovas(cpu);
> - return 0;
> -}
> -
> static void intel_disable_iommus(void)
> {
> struct intel_iommu *iommu = NULL;
> @@ -4388,8 +4359,6 @@ int __init intel_iommu_init(void)
> bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
> if (si_domain && !hw_pass_through)
> register_memory_notifier(&intel_iommu_memory_nb);
> - cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL,
> - intel_iommu_cpu_dead);
>
> down_read(&dmar_global_lock);
> if (probe_acpi_namespace_devices())
> diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
> index cedac9986557..85996494bec1 100644
> --- a/include/linux/cpuhotplug.h
> +++ b/include/linux/cpuhotplug.h
> @@ -57,7 +57,6 @@ enum cpuhp_state {
> CPUHP_PAGE_ALLOC_DEAD,
> CPUHP_NET_DEV_DEAD,
> CPUHP_PCI_XGENE_DEAD,
> - CPUHP_IOMMU_INTEL_DEAD,
> CPUHP_IOMMU_IOVA_DEAD,
> CPUHP_LUSTRE_CFS_DEAD,
> CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
>

Reviewed-by: Lu Baolu <[email protected]>

Best regards,
baolu

2021-03-23 13:07:22

by Robin Murphy

[permalink] [raw]
Subject: Re: [PATCH 3/3] iova: Correct comment for free_cpu_cached_iovas()

On 2021-03-01 12:12, John Garry wrote:
> Function free_cpu_cached_iovas() is not only called when a CPU is
> hotplugged, so remove that part of the code comment.

FWIW I read it as clarifying why this is broken out into a separate
function vs. a monolithic "free all cached IOVAs" routine that handles
both the per-cpu and global caches - it never said "*only* used..."

As such I'd hesitate to call it incorrect, but it's certainly arguable
whether it needs to be stated or not, especially once the hotplug
callsite is now obvious in the same file - on which note the function
itself also shouldn't need to be public any more, no?

Robin.

> Signed-off-by: John Garry <[email protected]>
> ---
> drivers/iommu/iova.c | 2 +-
> 1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
> index c78312560425..465b3b0eeeb0 100644
> --- a/drivers/iommu/iova.c
> +++ b/drivers/iommu/iova.c
> @@ -996,7 +996,7 @@ static void free_iova_rcaches(struct iova_domain *iovad)
> }
>
> /*
> - * free all the IOVA ranges cached by a cpu (used when cpu is unplugged)
> + * free all the IOVA ranges cached by a cpu
> */
> void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
> {
>

2021-03-23 13:37:45

by Robin Murphy

[permalink] [raw]
Subject: Re: [PATCH 1/3] iova: Add CPU hotplug handler to flush rcaches

On 2021-03-01 12:12, John Garry wrote:
> Like the intel IOMMU driver already does, flush the per-IOVA domain
> CPU rcache when a CPU goes offline - there's no point in keeping it.

Thanks John!

Reviewed-by: Robin Murphy <[email protected]>

> Signed-off-by: John Garry <[email protected]>
> ---
> drivers/iommu/iova.c | 30 +++++++++++++++++++++++++++++-
> include/linux/cpuhotplug.h | 1 +
> include/linux/iova.h | 1 +
> 3 files changed, 31 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
> index e6e2fa85271c..c78312560425 100644
> --- a/drivers/iommu/iova.c
> +++ b/drivers/iommu/iova.c
> @@ -25,6 +25,17 @@ static void init_iova_rcaches(struct iova_domain *iovad);
> static void free_iova_rcaches(struct iova_domain *iovad);
> static void fq_destroy_all_entries(struct iova_domain *iovad);
> static void fq_flush_timeout(struct timer_list *t);
> +
> +static int iova_cpuhp_dead(unsigned int cpu, struct hlist_node *node)
> +{
> + struct iova_domain *iovad;
> +
> + iovad = hlist_entry_safe(node, struct iova_domain, cpuhp_dead);
> +
> + free_cpu_cached_iovas(cpu, iovad);
> + return 0;
> +}
> +
> static void free_global_cached_iovas(struct iova_domain *iovad);
>
> void
> @@ -51,6 +62,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
> iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
> rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
> rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
> + cpuhp_state_add_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD, &iovad->cpuhp_dead);
> init_iova_rcaches(iovad);
> }
> EXPORT_SYMBOL_GPL(init_iova_domain);
> @@ -257,10 +269,21 @@ int iova_cache_get(void)
> {
> mutex_lock(&iova_cache_mutex);
> if (!iova_cache_users) {
> + int ret;
> +
> + ret = cpuhp_setup_state_multi(CPUHP_IOMMU_IOVA_DEAD, "iommu/iova:dead", NULL,
> + iova_cpuhp_dead);
> + if (ret) {
> + mutex_unlock(&iova_cache_mutex);
> + pr_err("Couldn't register cpuhp handler\n");
> + return ret;
> + }
> +
> iova_cache = kmem_cache_create(
> "iommu_iova", sizeof(struct iova), 0,
> SLAB_HWCACHE_ALIGN, NULL);
> if (!iova_cache) {
> + cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD);
> mutex_unlock(&iova_cache_mutex);
> pr_err("Couldn't create iova cache\n");
> return -ENOMEM;
> @@ -282,8 +305,10 @@ void iova_cache_put(void)
> return;
> }
> iova_cache_users--;
> - if (!iova_cache_users)
> + if (!iova_cache_users) {
> + cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD);
> kmem_cache_destroy(iova_cache);
> + }
> mutex_unlock(&iova_cache_mutex);
> }
> EXPORT_SYMBOL_GPL(iova_cache_put);
> @@ -606,6 +631,9 @@ void put_iova_domain(struct iova_domain *iovad)
> {
> struct iova *iova, *tmp;
>
> + cpuhp_state_remove_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD,
> + &iovad->cpuhp_dead);
> +
> free_iova_flush_queue(iovad);
> free_iova_rcaches(iovad);
> rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node)
> diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
> index f14adb882338..cedac9986557 100644
> --- a/include/linux/cpuhotplug.h
> +++ b/include/linux/cpuhotplug.h
> @@ -58,6 +58,7 @@ enum cpuhp_state {
> CPUHP_NET_DEV_DEAD,
> CPUHP_PCI_XGENE_DEAD,
> CPUHP_IOMMU_INTEL_DEAD,
> + CPUHP_IOMMU_IOVA_DEAD,
> CPUHP_LUSTRE_CFS_DEAD,
> CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
> CPUHP_PADATA_DEAD,
> diff --git a/include/linux/iova.h b/include/linux/iova.h
> index c834c01c0a5b..4be6c0ab4997 100644
> --- a/include/linux/iova.h
> +++ b/include/linux/iova.h
> @@ -95,6 +95,7 @@ struct iova_domain {
> flush-queues */
> atomic_t fq_timer_on; /* 1 when timer is active, 0
> when not */
> + struct hlist_node cpuhp_dead;
> };
>
> static inline unsigned long iova_size(struct iova *iova)
>

2021-03-24 01:55:31

by John Garry

[permalink] [raw]
Subject: Re: [PATCH 3/3] iova: Correct comment for free_cpu_cached_iovas()

On 23/03/2021 13:05, Robin Murphy wrote:
> On 2021-03-01 12:12, John Garry wrote:
>> Function free_cpu_cached_iovas() is not only called when a CPU is
>> hotplugged, so remove that part of the code comment.
>
> FWIW I read it as clarifying why this is broken out into a separate
> function vs. a monolithic "free all cached IOVAs" routine that handles
> both the per-cpu and global caches

> it never said "*only* used..."

It seems to be implying that.

It's only a code comment, so I don't care too much either way and can
drop this change.

>
> As such I'd hesitate to call it incorrect, but it's certainly arguable
> whether it needs to be stated or not, especially once the hotplug
> callsite is now obvious in the same file - on which note the function
> itself also shouldn't need to be public any more, no?
>

Right, I actually missed deleting iommu_dma_free_cpu_cached_iovas(), so
can fix that now.

Cheers,
John

> Robin.
>
>> Signed-off-by: John Garry <[email protected]>
>> ---
>>   drivers/iommu/iova.c | 2 +-
>>   1 file changed, 1 insertion(+), 1 deletion(-)
>>
>> diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
>> index c78312560425..465b3b0eeeb0 100644
>> --- a/drivers/iommu/iova.c
>> +++ b/drivers/iommu/iova.c
>> @@ -996,7 +996,7 @@ static void free_iova_rcaches(struct iova_domain
>> *iovad)
>>   }
>>   /*
>> - * free all the IOVA ranges cached by a cpu (used when cpu is unplugged)
>> + * free all the IOVA ranges cached by a cpu
>>    */
>>   void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
>>   {
>>
> .

2021-04-07 20:30:21

by Joerg Roedel

[permalink] [raw]
Subject: Re: [PATCH 0/3] iommu/iova: Add CPU hotplug handler to flush rcaches to core code

On Mon, Mar 01, 2021 at 08:12:18PM +0800, John Garry wrote:
> The Intel IOMMU driver supports flushing the per-CPU rcaches when a CPU is
> offlined.
>
> Let's move it to core code, so everyone can take advantage.
>
> Also correct a code comment.
>
> Based on v5.12-rc1. Tested on arm64 only.
>
> John Garry (3):
> iova: Add CPU hotplug handler to flush rcaches
> iommu/vt-d: Remove IOVA domain rcache flushing for CPU offlining
> iova: Correct comment for free_cpu_cached_iovas()
>
> drivers/iommu/intel/iommu.c | 31 -------------------------------
> drivers/iommu/iova.c | 32 ++++++++++++++++++++++++++++++--
> include/linux/cpuhotplug.h | 2 +-
> include/linux/iova.h | 1 +
> 4 files changed, 32 insertions(+), 34 deletions(-)

Applied, thanks.

2021-04-07 20:33:48

by John Garry

[permalink] [raw]
Subject: Re: [PATCH 0/3] iommu/iova: Add CPU hotplug handler to flush rcaches to core code

On 07/04/2021 09:04, Joerg Roedel wrote:
> On Mon, Mar 01, 2021 at 08:12:18PM +0800, John Garry wrote:
>> The Intel IOMMU driver supports flushing the per-CPU rcaches when a CPU is
>> offlined.
>>
>> Let's move it to core code, so everyone can take advantage.
>>
>> Also correct a code comment.
>>
>> Based on v5.12-rc1. Tested on arm64 only.
>>
>> John Garry (3):
>> iova: Add CPU hotplug handler to flush rcaches
>> iommu/vt-d: Remove IOVA domain rcache flushing for CPU offlining
>> iova: Correct comment for free_cpu_cached_iovas()
>>
>> drivers/iommu/intel/iommu.c | 31 -------------------------------
>> drivers/iommu/iova.c | 32 ++++++++++++++++++++++++++++++--
>> include/linux/cpuhotplug.h | 2 +-
>> include/linux/iova.h | 1 +
>> 4 files changed, 32 insertions(+), 34 deletions(-)
>
> Applied, thanks.
>
> .
>

Thanks, but there was a v2 on this series. Not sure which you applied.

https://lore.kernel.org/linux-iommu/[email protected]/T/#mbea81468782c75fa84744ad7a7801831a4c952e9