From: Vijayanand Jitta <[email protected]>
When ever a new iova alloc request comes iova is always searched
from the cached node and the nodes which are previous to cached
node. So, even if there is free iova space available in the nodes
which are next to the cached node iova allocation can still fail
because of this approach.
Consider the following sequence of iova alloc and frees on
1GB of iova space
1) alloc - 500MB
2) alloc - 12MB
3) alloc - 499MB
4) free - 12MB which was allocated in step 2
5) alloc - 13MB
After the above sequence we will have 12MB of free iova space and
cached node will be pointing to the iova pfn of last alloc of 13MB
which will be the lowest iova pfn of that iova space. Now if we get an
alloc request of 2MB we just search from cached node and then look
for lower iova pfn's for free iova and as they aren't any, iova alloc
fails though there is 12MB of free iova space.
To avoid such iova search failures do a retry from the last rb tree node
when iova search fails, this will search the entire tree and get an iova
if its available.
Signed-off-by: Vijayanand Jitta <[email protected]>
---
drivers/iommu/iova.c | 23 +++++++++++++++++------
1 file changed, 17 insertions(+), 6 deletions(-)
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 49fc01f..4e77116 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -184,8 +184,9 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
struct rb_node *curr, *prev;
struct iova *curr_iova;
unsigned long flags;
- unsigned long new_pfn;
+ unsigned long new_pfn, low_pfn_new;
unsigned long align_mask = ~0UL;
+ unsigned long high_pfn = limit_pfn, low_pfn = iovad->start_pfn;
if (size_aligned)
align_mask <<= fls_long(size - 1);
@@ -198,15 +199,25 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
curr = __get_cached_rbnode(iovad, limit_pfn);
curr_iova = rb_entry(curr, struct iova, node);
+ low_pfn_new = curr_iova->pfn_hi + 1;
+
+retry:
do {
- limit_pfn = min(limit_pfn, curr_iova->pfn_lo);
- new_pfn = (limit_pfn - size) & align_mask;
+ high_pfn = min(high_pfn, curr_iova->pfn_lo);
+ new_pfn = (high_pfn - size) & align_mask;
prev = curr;
curr = rb_prev(curr);
curr_iova = rb_entry(curr, struct iova, node);
- } while (curr && new_pfn <= curr_iova->pfn_hi);
-
- if (limit_pfn < size || new_pfn < iovad->start_pfn) {
+ } while (curr && new_pfn <= curr_iova->pfn_hi && new_pfn >= low_pfn);
+
+ if (high_pfn < size || new_pfn < low_pfn) {
+ if (low_pfn == iovad->start_pfn && low_pfn_new < limit_pfn) {
+ high_pfn = limit_pfn;
+ low_pfn = low_pfn_new;
+ curr = &iovad->anchor.node;
+ curr_iova = rb_entry(curr, struct iova, node);
+ goto retry;
+ }
iovad->max32_alloc_size = size;
goto iova32_full;
}
--
QUALCOMM INDIA, on behalf of Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum, hosted by The Linux Foundation
1.9.1
From: Vijayanand Jitta <[email protected]>
When ever an iova alloc request fails we free the iova
ranges present in the percpu iova rcaches and then retry
but the global iova rcache is not freed as a result we could
still see iova alloc failure even after retry as global
rcache is holding the iova's which can cause fragmentation.
So, free the global iova rcache as well and then go for the
retry.
Signed-off-by: Vijayanand Jitta <[email protected]>
---
drivers/iommu/iova.c | 23 +++++++++++++++++++++++
include/linux/iova.h | 6 ++++++
2 files changed, 29 insertions(+)
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 4e77116..5836c87 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -442,6 +442,7 @@ struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
flush_rcache = false;
for_each_online_cpu(cpu)
free_cpu_cached_iovas(cpu, iovad);
+ free_global_cached_iovas(iovad);
goto retry;
}
@@ -1055,5 +1056,27 @@ void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
}
}
+/*
+ * free all the IOVA ranges of global cache
+ */
+void free_global_cached_iovas(struct iova_domain *iovad)
+{
+ struct iova_rcache *rcache;
+ unsigned long flags;
+ int i, j;
+
+ for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
+ rcache = &iovad->rcaches[i];
+ spin_lock_irqsave(&rcache->lock, flags);
+ for (j = 0; j < rcache->depot_size; ++j) {
+ iova_magazine_free_pfns(rcache->depot[j], iovad);
+ iova_magazine_free(rcache->depot[j]);
+ rcache->depot[j] = NULL;
+ }
+ rcache->depot_size = 0;
+ spin_unlock_irqrestore(&rcache->lock, flags);
+ }
+}
+
MODULE_AUTHOR("Anil S Keshavamurthy <[email protected]>");
MODULE_LICENSE("GPL");
diff --git a/include/linux/iova.h b/include/linux/iova.h
index a0637ab..a905726 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -163,6 +163,7 @@ int init_iova_flush_queue(struct iova_domain *iovad,
struct iova *split_and_remove_iova(struct iova_domain *iovad,
struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
+void free_global_cached_iovas(struct iova_domain *iovad);
#else
static inline int iova_cache_get(void)
{
@@ -270,6 +271,11 @@ static inline void free_cpu_cached_iovas(unsigned int cpu,
struct iova_domain *iovad)
{
}
+
+static inline void free_global_cached_iovas(struct iova_domain *iovad)
+{
+}
+
#endif
#endif
--
QUALCOMM INDIA, on behalf of Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum, hosted by The Linux Foundation
1.9.1
On 8/20/2020 6:19 PM, [email protected] wrote:
> From: Vijayanand Jitta <[email protected]>
>
> When ever a new iova alloc request comes iova is always searched
> from the cached node and the nodes which are previous to cached
> node. So, even if there is free iova space available in the nodes
> which are next to the cached node iova allocation can still fail
> because of this approach.
>
> Consider the following sequence of iova alloc and frees on
> 1GB of iova space
>
> 1) alloc - 500MB
> 2) alloc - 12MB
> 3) alloc - 499MB
> 4) free - 12MB which was allocated in step 2
> 5) alloc - 13MB
>
> After the above sequence we will have 12MB of free iova space and
> cached node will be pointing to the iova pfn of last alloc of 13MB
> which will be the lowest iova pfn of that iova space. Now if we get an
> alloc request of 2MB we just search from cached node and then look
> for lower iova pfn's for free iova and as they aren't any, iova alloc
> fails though there is 12MB of free iova space.
>
> To avoid such iova search failures do a retry from the last rb tree node
> when iova search fails, this will search the entire tree and get an iova
> if its available.
>
> Signed-off-by: Vijayanand Jitta <[email protected]>
> ---
> drivers/iommu/iova.c | 23 +++++++++++++++++------
> 1 file changed, 17 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
> index 49fc01f..4e77116 100644
> --- a/drivers/iommu/iova.c
> +++ b/drivers/iommu/iova.c
> @@ -184,8 +184,9 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
> struct rb_node *curr, *prev;
> struct iova *curr_iova;
> unsigned long flags;
> - unsigned long new_pfn;
> + unsigned long new_pfn, low_pfn_new;
> unsigned long align_mask = ~0UL;
> + unsigned long high_pfn = limit_pfn, low_pfn = iovad->start_pfn;
>
> if (size_aligned)
> align_mask <<= fls_long(size - 1);
> @@ -198,15 +199,25 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
>
> curr = __get_cached_rbnode(iovad, limit_pfn);
> curr_iova = rb_entry(curr, struct iova, node);
> + low_pfn_new = curr_iova->pfn_hi + 1;
> +
> +retry:
> do {
> - limit_pfn = min(limit_pfn, curr_iova->pfn_lo);
> - new_pfn = (limit_pfn - size) & align_mask;
> + high_pfn = min(high_pfn, curr_iova->pfn_lo);
> + new_pfn = (high_pfn - size) & align_mask;
> prev = curr;
> curr = rb_prev(curr);
> curr_iova = rb_entry(curr, struct iova, node);
> - } while (curr && new_pfn <= curr_iova->pfn_hi);
> -
> - if (limit_pfn < size || new_pfn < iovad->start_pfn) {
> + } while (curr && new_pfn <= curr_iova->pfn_hi && new_pfn >= low_pfn);
> +
> + if (high_pfn < size || new_pfn < low_pfn) {
> + if (low_pfn == iovad->start_pfn && low_pfn_new < limit_pfn) {
> + high_pfn = limit_pfn;
> + low_pfn = low_pfn_new;
> + curr = &iovad->anchor.node;
> + curr_iova = rb_entry(curr, struct iova, node);
> + goto retry;
> + }
> iovad->max32_alloc_size = size;
> goto iova32_full;
> }
>
ping ?
Thanks,
Vijay
--
QUALCOMM INDIA, on behalf of Qualcomm Innovation Center, Inc. is a
member of Code Aurora Forum, hosted by The Linux Foundation
On 8/28/2020 1:01 PM, Vijayanand Jitta wrote:
>
>
> On 8/20/2020 6:19 PM, [email protected] wrote:
>> From: Vijayanand Jitta <[email protected]>
>>
>> When ever a new iova alloc request comes iova is always searched
>> from the cached node and the nodes which are previous to cached
>> node. So, even if there is free iova space available in the nodes
>> which are next to the cached node iova allocation can still fail
>> because of this approach.
>>
>> Consider the following sequence of iova alloc and frees on
>> 1GB of iova space
>>
>> 1) alloc - 500MB
>> 2) alloc - 12MB
>> 3) alloc - 499MB
>> 4) free - 12MB which was allocated in step 2
>> 5) alloc - 13MB
>>
>> After the above sequence we will have 12MB of free iova space and
>> cached node will be pointing to the iova pfn of last alloc of 13MB
>> which will be the lowest iova pfn of that iova space. Now if we get an
>> alloc request of 2MB we just search from cached node and then look
>> for lower iova pfn's for free iova and as they aren't any, iova alloc
>> fails though there is 12MB of free iova space.
>>
>> To avoid such iova search failures do a retry from the last rb tree node
>> when iova search fails, this will search the entire tree and get an iova
>> if its available.
>>
>> Signed-off-by: Vijayanand Jitta <[email protected]>
>> ---
>> drivers/iommu/iova.c | 23 +++++++++++++++++------
>> 1 file changed, 17 insertions(+), 6 deletions(-)
>>
>> diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
>> index 49fc01f..4e77116 100644
>> --- a/drivers/iommu/iova.c
>> +++ b/drivers/iommu/iova.c
>> @@ -184,8 +184,9 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
>> struct rb_node *curr, *prev;
>> struct iova *curr_iova;
>> unsigned long flags;
>> - unsigned long new_pfn;
>> + unsigned long new_pfn, low_pfn_new;
>> unsigned long align_mask = ~0UL;
>> + unsigned long high_pfn = limit_pfn, low_pfn = iovad->start_pfn;
>>
>> if (size_aligned)
>> align_mask <<= fls_long(size - 1);
>> @@ -198,15 +199,25 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
>>
>> curr = __get_cached_rbnode(iovad, limit_pfn);
>> curr_iova = rb_entry(curr, struct iova, node);
>> + low_pfn_new = curr_iova->pfn_hi + 1;
>> +
>> +retry:
>> do {
>> - limit_pfn = min(limit_pfn, curr_iova->pfn_lo);
>> - new_pfn = (limit_pfn - size) & align_mask;
>> + high_pfn = min(high_pfn, curr_iova->pfn_lo);
>> + new_pfn = (high_pfn - size) & align_mask;
>> prev = curr;
>> curr = rb_prev(curr);
>> curr_iova = rb_entry(curr, struct iova, node);
>> - } while (curr && new_pfn <= curr_iova->pfn_hi);
>> -
>> - if (limit_pfn < size || new_pfn < iovad->start_pfn) {
>> + } while (curr && new_pfn <= curr_iova->pfn_hi && new_pfn >= low_pfn);
>> +
>> + if (high_pfn < size || new_pfn < low_pfn) {
>> + if (low_pfn == iovad->start_pfn && low_pfn_new < limit_pfn) {
>> + high_pfn = limit_pfn;
>> + low_pfn = low_pfn_new;
>> + curr = &iovad->anchor.node;
>> + curr_iova = rb_entry(curr, struct iova, node);
>> + goto retry;
>> + }
>> iovad->max32_alloc_size = size;
>> goto iova32_full;
>> }
>>
>
> ping ?
>
> Thanks,
> Vijay
>
ping ?
Thanks,
Vijay
--
QUALCOMM INDIA, on behalf of Qualcomm Innovation Center, Inc. is a
member of Code Aurora Forum, hosted by The Linux Foundation
On Mon, Sep 14, 2020 at 10:20:55AM +0530, Vijayanand Jitta wrote:
> ping ?
Robin needs to have a look first.
Regards,
Joerg
On 2020-08-20 13:49, [email protected] wrote:
> From: Vijayanand Jitta <[email protected]>
>
> When ever a new iova alloc request comes iova is always searched
> from the cached node and the nodes which are previous to cached
> node. So, even if there is free iova space available in the nodes
> which are next to the cached node iova allocation can still fail
> because of this approach.
>
> Consider the following sequence of iova alloc and frees on
> 1GB of iova space
>
> 1) alloc - 500MB
> 2) alloc - 12MB
> 3) alloc - 499MB
> 4) free - 12MB which was allocated in step 2
> 5) alloc - 13MB
>
> After the above sequence we will have 12MB of free iova space and
> cached node will be pointing to the iova pfn of last alloc of 13MB
> which will be the lowest iova pfn of that iova space. Now if we get an
> alloc request of 2MB we just search from cached node and then look
> for lower iova pfn's for free iova and as they aren't any, iova alloc
> fails though there is 12MB of free iova space.
>
> To avoid such iova search failures do a retry from the last rb tree node
> when iova search fails, this will search the entire tree and get an iova
> if its available.
>
> Signed-off-by: Vijayanand Jitta <[email protected]>
> ---
> drivers/iommu/iova.c | 23 +++++++++++++++++------
> 1 file changed, 17 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
> index 49fc01f..4e77116 100644
> --- a/drivers/iommu/iova.c
> +++ b/drivers/iommu/iova.c
> @@ -184,8 +184,9 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
> struct rb_node *curr, *prev;
> struct iova *curr_iova;
> unsigned long flags;
> - unsigned long new_pfn;
> + unsigned long new_pfn, low_pfn_new;
> unsigned long align_mask = ~0UL;
> + unsigned long high_pfn = limit_pfn, low_pfn = iovad->start_pfn;
>
> if (size_aligned)
> align_mask <<= fls_long(size - 1);
> @@ -198,15 +199,25 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
>
> curr = __get_cached_rbnode(iovad, limit_pfn);
> curr_iova = rb_entry(curr, struct iova, node);
> + low_pfn_new = curr_iova->pfn_hi + 1;
Could we call "low_pfn_new" something like "retry_pfn" instead? This
code already has unavoidable readability struggles with so many
different "pfn"s in play, so having two different meanings of "new"
really doesn't help.
Other than that, I think this looks OK (IIRC it's basically what I
originally suggested), so with the naming tweaked,
Reviewed-by: Robin Murphy <[email protected]>
> +
> +retry:
> do {
> - limit_pfn = min(limit_pfn, curr_iova->pfn_lo);
> - new_pfn = (limit_pfn - size) & align_mask;
> + high_pfn = min(high_pfn, curr_iova->pfn_lo);
> + new_pfn = (high_pfn - size) & align_mask;
> prev = curr;
> curr = rb_prev(curr);
> curr_iova = rb_entry(curr, struct iova, node);
> - } while (curr && new_pfn <= curr_iova->pfn_hi);
> -
> - if (limit_pfn < size || new_pfn < iovad->start_pfn) {
> + } while (curr && new_pfn <= curr_iova->pfn_hi && new_pfn >= low_pfn);
> +
> + if (high_pfn < size || new_pfn < low_pfn) {
> + if (low_pfn == iovad->start_pfn && low_pfn_new < limit_pfn) {
> + high_pfn = limit_pfn;
> + low_pfn = low_pfn_new;
> + curr = &iovad->anchor.node;
> + curr_iova = rb_entry(curr, struct iova, node);
> + goto retry;
> + }
> iovad->max32_alloc_size = size;
> goto iova32_full;
> }
>
On 2020-08-20 13:49, [email protected] wrote:
> From: Vijayanand Jitta <[email protected]>
>
> When ever an iova alloc request fails we free the iova
> ranges present in the percpu iova rcaches and then retry
> but the global iova rcache is not freed as a result we could
> still see iova alloc failure even after retry as global
> rcache is holding the iova's which can cause fragmentation.
> So, free the global iova rcache as well and then go for the
> retry.
>
> Signed-off-by: Vijayanand Jitta <[email protected]>
> ---
> drivers/iommu/iova.c | 23 +++++++++++++++++++++++
> include/linux/iova.h | 6 ++++++
> 2 files changed, 29 insertions(+)
>
> diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
> index 4e77116..5836c87 100644
> --- a/drivers/iommu/iova.c
> +++ b/drivers/iommu/iova.c
> @@ -442,6 +442,7 @@ struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
> flush_rcache = false;
> for_each_online_cpu(cpu)
> free_cpu_cached_iovas(cpu, iovad);
> + free_global_cached_iovas(iovad);
> goto retry;
> }
>
> @@ -1055,5 +1056,27 @@ void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
> }
> }
>
> +/*
> + * free all the IOVA ranges of global cache
> + */
> +void free_global_cached_iovas(struct iova_domain *iovad)
As John pointed out last time, this should be static and the header
changes dropped.
(TBH we should probably register our own hotplug notifier instance for a
flush queue, so that external code has no need to poke at the per-CPU
caches either)
Robin.
> +{
> + struct iova_rcache *rcache;
> + unsigned long flags;
> + int i, j;
> +
> + for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
> + rcache = &iovad->rcaches[i];
> + spin_lock_irqsave(&rcache->lock, flags);
> + for (j = 0; j < rcache->depot_size; ++j) {
> + iova_magazine_free_pfns(rcache->depot[j], iovad);
> + iova_magazine_free(rcache->depot[j]);
> + rcache->depot[j] = NULL;
> + }
> + rcache->depot_size = 0;
> + spin_unlock_irqrestore(&rcache->lock, flags);
> + }
> +}
> +
> MODULE_AUTHOR("Anil S Keshavamurthy <[email protected]>");
> MODULE_LICENSE("GPL");
> diff --git a/include/linux/iova.h b/include/linux/iova.h
> index a0637ab..a905726 100644
> --- a/include/linux/iova.h
> +++ b/include/linux/iova.h
> @@ -163,6 +163,7 @@ int init_iova_flush_queue(struct iova_domain *iovad,
> struct iova *split_and_remove_iova(struct iova_domain *iovad,
> struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
> void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
> +void free_global_cached_iovas(struct iova_domain *iovad);
> #else
> static inline int iova_cache_get(void)
> {
> @@ -270,6 +271,11 @@ static inline void free_cpu_cached_iovas(unsigned int cpu,
> struct iova_domain *iovad)
> {
> }
> +
> +static inline void free_global_cached_iovas(struct iova_domain *iovad)
> +{
> +}
> +
> #endif
>
> #endif
>
On 9/18/2020 8:11 PM, Robin Murphy wrote:
> On 2020-08-20 13:49, [email protected] wrote:
>> From: Vijayanand Jitta <[email protected]>
>>
>> When ever an iova alloc request fails we free the iova
>> ranges present in the percpu iova rcaches and then retry
>> but the global iova rcache is not freed as a result we could
>> still see iova alloc failure even after retry as global
>> rcache is holding the iova's which can cause fragmentation.
>> So, free the global iova rcache as well and then go for the
>> retry.
>>
>> Signed-off-by: Vijayanand Jitta <[email protected]>
>> ---
>> drivers/iommu/iova.c | 23 +++++++++++++++++++++++
>> include/linux/iova.h | 6 ++++++
>> 2 files changed, 29 insertions(+)
>>
>> diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
>> index 4e77116..5836c87 100644
>> --- a/drivers/iommu/iova.c
>> +++ b/drivers/iommu/iova.c
>> @@ -442,6 +442,7 @@ struct iova *find_iova(struct iova_domain *iovad,
>> unsigned long pfn)
>> flush_rcache = false;
>> for_each_online_cpu(cpu)
>> free_cpu_cached_iovas(cpu, iovad);
>> + free_global_cached_iovas(iovad);
>> goto retry;
>> }
>> @@ -1055,5 +1056,27 @@ void free_cpu_cached_iovas(unsigned int cpu,
>> struct iova_domain *iovad)
>> }
>> }
>> +/*
>> + * free all the IOVA ranges of global cache
>> + */
>> +void free_global_cached_iovas(struct iova_domain *iovad)
>
> As John pointed out last time, this should be static and the header
> changes dropped.
>
> (TBH we should probably register our own hotplug notifier instance for a
> flush queue, so that external code has no need to poke at the per-CPU
> caches either)
>
> Robin.
>
Right, I have made it static and dropped header changes in v3.
can you please review that.
Thanks,
Vijay
>> +{
>> + struct iova_rcache *rcache;
>> + unsigned long flags;
>> + int i, j;
>> +
>> + for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
>> + rcache = &iovad->rcaches[i];
>> + spin_lock_irqsave(&rcache->lock, flags);
>> + for (j = 0; j < rcache->depot_size; ++j) {
>> + iova_magazine_free_pfns(rcache->depot[j], iovad);
>> + iova_magazine_free(rcache->depot[j]);
>> + rcache->depot[j] = NULL;
>> + }
>> + rcache->depot_size = 0;
>> + spin_unlock_irqrestore(&rcache->lock, flags);
>> + }
>> +}
>> +
>> MODULE_AUTHOR("Anil S Keshavamurthy <[email protected]>");
>> MODULE_LICENSE("GPL");
>> diff --git a/include/linux/iova.h b/include/linux/iova.h
>> index a0637ab..a905726 100644
>> --- a/include/linux/iova.h
>> +++ b/include/linux/iova.h
>> @@ -163,6 +163,7 @@ int init_iova_flush_queue(struct iova_domain *iovad,
>> struct iova *split_and_remove_iova(struct iova_domain *iovad,
>> struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
>> void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain
>> *iovad);
>> +void free_global_cached_iovas(struct iova_domain *iovad);
>> #else
>> static inline int iova_cache_get(void)
>> {
>> @@ -270,6 +271,11 @@ static inline void free_cpu_cached_iovas(unsigned
>> int cpu,
>> struct iova_domain *iovad)
>> {
>> }
>> +
>> +static inline void free_global_cached_iovas(struct iova_domain *iovad)
>> +{
>> +}
>> +
>> #endif
>> #endif
>>
--
QUALCOMM INDIA, on behalf of Qualcomm Innovation Center, Inc. is a
member of Code Aurora Forum, hosted by The Linux Foundation
On 9/18/2020 7:48 PM, Robin Murphy wrote:
> On 2020-08-20 13:49, [email protected] wrote:
>> From: Vijayanand Jitta <[email protected]>
>>
>> When ever a new iova alloc request comes iova is always searched
>> from the cached node and the nodes which are previous to cached
>> node. So, even if there is free iova space available in the nodes
>> which are next to the cached node iova allocation can still fail
>> because of this approach.
>>
>> Consider the following sequence of iova alloc and frees on
>> 1GB of iova space
>>
>> 1) alloc - 500MB
>> 2) alloc - 12MB
>> 3) alloc - 499MB
>> 4) free - 12MB which was allocated in step 2
>> 5) alloc - 13MB
>>
>> After the above sequence we will have 12MB of free iova space and
>> cached node will be pointing to the iova pfn of last alloc of 13MB
>> which will be the lowest iova pfn of that iova space. Now if we get an
>> alloc request of 2MB we just search from cached node and then look
>> for lower iova pfn's for free iova and as they aren't any, iova alloc
>> fails though there is 12MB of free iova space.
>>
>> To avoid such iova search failures do a retry from the last rb tree node
>> when iova search fails, this will search the entire tree and get an iova
>> if its available.
>>
>> Signed-off-by: Vijayanand Jitta <[email protected]>
>> ---
>> drivers/iommu/iova.c | 23 +++++++++++++++++------
>> 1 file changed, 17 insertions(+), 6 deletions(-)
>>
>> diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
>> index 49fc01f..4e77116 100644
>> --- a/drivers/iommu/iova.c
>> +++ b/drivers/iommu/iova.c
>> @@ -184,8 +184,9 @@ static int __alloc_and_insert_iova_range(struct
>> iova_domain *iovad,
>> struct rb_node *curr, *prev;
>> struct iova *curr_iova;
>> unsigned long flags;
>> - unsigned long new_pfn;
>> + unsigned long new_pfn, low_pfn_new;
>> unsigned long align_mask = ~0UL;
>> + unsigned long high_pfn = limit_pfn, low_pfn = iovad->start_pfn;
>> if (size_aligned)
>> align_mask <<= fls_long(size - 1);
>> @@ -198,15 +199,25 @@ static int __alloc_and_insert_iova_range(struct
>> iova_domain *iovad,
>> curr = __get_cached_rbnode(iovad, limit_pfn);
>> curr_iova = rb_entry(curr, struct iova, node);
>> + low_pfn_new = curr_iova->pfn_hi + 1;
>
> Could we call "low_pfn_new" something like "retry_pfn" instead? This
> code already has unavoidable readability struggles with so many
> different "pfn"s in play, so having two different meanings of "new"
> really doesn't help.
>
> Other than that, I think this looks OK (IIRC it's basically what I
> originally suggested), so with the naming tweaked,
>
> Reviewed-by: Robin Murphy <[email protected]>
>
Thanks for review, I have renamed it to retry_pfn in v4.
Thanks,
Vijay
>> +
>> +retry:
>> do {
>> - limit_pfn = min(limit_pfn, curr_iova->pfn_lo);
>> - new_pfn = (limit_pfn - size) & align_mask;
>> + high_pfn = min(high_pfn, curr_iova->pfn_lo);
>> + new_pfn = (high_pfn - size) & align_mask;
>> prev = curr;
>> curr = rb_prev(curr);
>> curr_iova = rb_entry(curr, struct iova, node);
>> - } while (curr && new_pfn <= curr_iova->pfn_hi);
>> -
>> - if (limit_pfn < size || new_pfn < iovad->start_pfn) {
>> + } while (curr && new_pfn <= curr_iova->pfn_hi && new_pfn >=
>> low_pfn);
>> +
>> + if (high_pfn < size || new_pfn < low_pfn) {
>> + if (low_pfn == iovad->start_pfn && low_pfn_new < limit_pfn) {
>> + high_pfn = limit_pfn;
>> + low_pfn = low_pfn_new;
>> + curr = &iovad->anchor.node;
>> + curr_iova = rb_entry(curr, struct iova, node);
>> + goto retry;
>> + }
>> iovad->max32_alloc_size = size;
>> goto iova32_full;
>> }
>>
--
QUALCOMM INDIA, on behalf of Qualcomm Innovation Center, Inc. is a
member of Code Aurora Forum, hosted by The Linux Foundation
On 9/28/2020 6:11 PM, Vijayanand Jitta wrote:
>
>
> On 9/18/2020 8:11 PM, Robin Murphy wrote:
>> On 2020-08-20 13:49, [email protected] wrote:
>>> From: Vijayanand Jitta <[email protected]>
>>>
>>> When ever an iova alloc request fails we free the iova
>>> ranges present in the percpu iova rcaches and then retry
>>> but the global iova rcache is not freed as a result we could
>>> still see iova alloc failure even after retry as global
>>> rcache is holding the iova's which can cause fragmentation.
>>> So, free the global iova rcache as well and then go for the
>>> retry.
>>>
>>> Signed-off-by: Vijayanand Jitta <[email protected]>
>>> ---
>>> drivers/iommu/iova.c | 23 +++++++++++++++++++++++
>>> include/linux/iova.h | 6 ++++++
>>> 2 files changed, 29 insertions(+)
>>>
>>> diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
>>> index 4e77116..5836c87 100644
>>> --- a/drivers/iommu/iova.c
>>> +++ b/drivers/iommu/iova.c
>>> @@ -442,6 +442,7 @@ struct iova *find_iova(struct iova_domain *iovad,
>>> unsigned long pfn)
>>> flush_rcache = false;
>>> for_each_online_cpu(cpu)
>>> free_cpu_cached_iovas(cpu, iovad);
>>> + free_global_cached_iovas(iovad);
>>> goto retry;
>>> }
>>> @@ -1055,5 +1056,27 @@ void free_cpu_cached_iovas(unsigned int cpu,
>>> struct iova_domain *iovad)
>>> }
>>> }
>>> +/*
>>> + * free all the IOVA ranges of global cache
>>> + */
>>> +void free_global_cached_iovas(struct iova_domain *iovad)
>>
>> As John pointed out last time, this should be static and the header
>> changes dropped.
>>
>> (TBH we should probably register our own hotplug notifier instance for a
>> flush queue, so that external code has no need to poke at the per-CPU
>> caches either)
>>
>> Robin.
>>
>
> Right, I have made it static and dropped header changes in v3.
> can you please review that.
>
> Thanks,
> Vijay
Please review v4 instead of v3, I have updated other patch as well in v4.
Thanks,
Vijay
>>> +{
>>> + struct iova_rcache *rcache;
>>> + unsigned long flags;
>>> + int i, j;
>>> +
>>> + for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
>>> + rcache = &iovad->rcaches[i];
>>> + spin_lock_irqsave(&rcache->lock, flags);
>>> + for (j = 0; j < rcache->depot_size; ++j) {
>>> + iova_magazine_free_pfns(rcache->depot[j], iovad);
>>> + iova_magazine_free(rcache->depot[j]);
>>> + rcache->depot[j] = NULL;
>>> + }
>>> + rcache->depot_size = 0;
>>> + spin_unlock_irqrestore(&rcache->lock, flags);
>>> + }
>>> +}
>>> +
>>> MODULE_AUTHOR("Anil S Keshavamurthy <[email protected]>");
>>> MODULE_LICENSE("GPL");
>>> diff --git a/include/linux/iova.h b/include/linux/iova.h
>>> index a0637ab..a905726 100644
>>> --- a/include/linux/iova.h
>>> +++ b/include/linux/iova.h
>>> @@ -163,6 +163,7 @@ int init_iova_flush_queue(struct iova_domain *iovad,
>>> struct iova *split_and_remove_iova(struct iova_domain *iovad,
>>> struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
>>> void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain
>>> *iovad);
>>> +void free_global_cached_iovas(struct iova_domain *iovad);
>>> #else
>>> static inline int iova_cache_get(void)
>>> {
>>> @@ -270,6 +271,11 @@ static inline void free_cpu_cached_iovas(unsigned
>>> int cpu,
>>> struct iova_domain *iovad)
>>> {
>>> }
>>> +
>>> +static inline void free_global_cached_iovas(struct iova_domain *iovad)
>>> +{
>>> +}
>>> +
>>> #endif
>>> #endif
>>>
>
--
QUALCOMM INDIA, on behalf of Qualcomm Innovation Center, Inc. is a
member of Code Aurora Forum, hosted by The Linux Foundation