Remove unnecessary calls to kmap{,_atomic}() when acquiring
pages using GFP_DMA32.
The GFP_DMA32 uses the DMA32 zone to satisfy the allocation
requests. Therefore, pages allocated with GFP_DMA32 cannot
come from Highmem.
Avoid using calls to kmap_local_page() / kunmap_local() and
kmap() / kunmap() in the psb_mmu_alloc_pd function. Instead,
utilize page_address().
Remove the usage of kmap_atomic() / kunmap_atomic() in the
psb_mmu_alloc_pt function. Use page_address() instead.
Substitute kmap_atomic(pt->p) / kunmap_atomic(pt->v) calls
in the psb_mmu_pt_alloc_map_lock() and psb_mmu_pt_unmap_unlock()
functions with page_address(pt->p). This is possible as
pt = psb_mmu_alloc_pt(pd) allocates a page using
pt->p = alloc_page(GFP_DMA32).
Suggested-by: Ira Weiny <[email protected]>
Signed-off-by: Sumitra Sharma <[email protected]>
---
drivers/gpu/drm/gma500/mmu.c | 17 +++++------------
1 file changed, 5 insertions(+), 12 deletions(-)
diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
index a70b01ccdf70..59aa5661e56a 100644
--- a/drivers/gpu/drm/gma500/mmu.c
+++ b/drivers/gpu/drm/gma500/mmu.c
@@ -184,20 +184,15 @@ struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
pd->invalid_pte = 0;
}
- v = kmap_local_page(pd->dummy_pt);
+ v = page_address(pd->dummy_pt);
for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
v[i] = pd->invalid_pte;
- kunmap_local(v);
-
- v = kmap_local_page(pd->p);
+ v = page_address(pd->p);
for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
v[i] = pd->invalid_pde;
- kunmap_local(v);
-
- clear_page(kmap(pd->dummy_page));
- kunmap(pd->dummy_page);
+ clear_page(page_address(pd->dummy_page));
pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
if (!pd->tables)
@@ -279,7 +274,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
spin_lock(lock);
- v = kmap_atomic(pt->p);
+ v = page_address(pt->p);
clf = (uint8_t *) v;
ptes = (uint32_t *) v;
for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
@@ -293,7 +288,6 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
}
mb();
}
- kunmap_atomic(v);
spin_unlock(lock);
pt->count = 0;
@@ -339,7 +333,7 @@ static struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
atomic_set(&pd->driver->needs_tlbflush, 1);
}
}
- pt->v = kmap_atomic(pt->p);
+ pt->v = page_address(pt->p);
return pt;
}
@@ -365,7 +359,6 @@ static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
struct psb_mmu_pd *pd = pt->pd;
uint32_t *v;
- kunmap_atomic(pt->v);
if (pt->count == 0) {
v = kmap_atomic(pd->p);
v[pt->index] = pd->invalid_pde;
--
2.25.1
On marted? 20 giugno 2023 20:01:48 CEST Sumitra Sharma wrote:
> Remove unnecessary calls to kmap{,_atomic}() when acquiring
> pages using GFP_DMA32.
>
> The GFP_DMA32 uses the DMA32 zone to satisfy the allocation
> requests. Therefore, pages allocated with GFP_DMA32 cannot
> come from Highmem.
>
> Avoid using calls to kmap_local_page() / kunmap_local() and
> kmap() / kunmap() in the psb_mmu_alloc_pd function. Instead,
> utilize page_address().
>
> Remove the usage of kmap_atomic() / kunmap_atomic() in the
> psb_mmu_alloc_pt function. Use page_address() instead.
>
> Substitute kmap_atomic(pt->p) / kunmap_atomic(pt->v) calls
> in the psb_mmu_pt_alloc_map_lock() and psb_mmu_pt_unmap_unlock()
> functions with page_address(pt->p). This is possible as
> pt = psb_mmu_alloc_pt(pd) allocates a page using
> pt->p = alloc_page(GFP_DMA32).
Sumitra,
I'm sorry because this patch cannot acked with this commit message.
This commit message is missing two _really_ important information. Therefore,
it is not acked. Please check again what I write below and either add the
missing information or change the code accordingly...
You should assure everybody that the code between the old kmap_atomic() /
kunmap_atomic() doesn't depend either on implicit pagefault_disable() or
preempt_disable() calls or both.
Please read again the section of the Highmem's documentation regarding
kmap_atomic() at https://docs.kernel.org/mm/highmem.html
In particular take care to read and understand what "[] the code between calls
to kmap_atomic() and kunmap_atomic() may implicitly depend on the side effects
of atomic mappings, i.e. disabling page faults or preemption, or both. In that
case, explicit calls to pagefault_disable() or preempt_disable() or both must
be made in conjunction with the use of kmap_local_page().".
Please study carefully also the following patch from Zhao, suggested by Ira
and reviewed by Ira and I: "[PATCH v2 3/9] drm/i915: Use kmap_local_page() in
gem/i915_gem_shmem.c". It's not yet reached upstream so you need to find it in
lore.kernel.org at https://lore.kernel.org/lkml/[email protected]/
Please note that, in turn, that patch also contains a link to a patch from Ira
who too had to disable faults (https://lore.kernel.org/all/
[email protected])
I haven't yet looked at your code. If any sections do depend on those implicit
disables, you should act accordingly and add one or both of the above-
mentioned calls, even in cases where you get rid of local mappings.
Instead if the sections don't depend on the mentioned side effects, you should
write something like what I wrote in "[PATCH] NFS: Convert kmap_atomic() to
kmap_local_folio()" that you can find at https://lore.kernel.org/lkml/
[email protected]/ or, by by using "git show
4b71e2416ec4".
Thanks for working on this,
Fabio
>
> Suggested-by: Ira Weiny <[email protected]>
> Signed-off-by: Sumitra Sharma <[email protected]>
> ---
> drivers/gpu/drm/gma500/mmu.c | 17 +++++------------
> 1 file changed, 5 insertions(+), 12 deletions(-)
>
> diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
> index a70b01ccdf70..59aa5661e56a 100644
> --- a/drivers/gpu/drm/gma500/mmu.c
> +++ b/drivers/gpu/drm/gma500/mmu.c
> @@ -184,20 +184,15 @@ struct psb_mmu_pd *psb_mmu_alloc_pd(struct
> psb_mmu_driver *driver, pd->invalid_pte = 0;
> }
>
> - v = kmap_local_page(pd->dummy_pt);
> + v = page_address(pd->dummy_pt);
> for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
> v[i] = pd->invalid_pte;
>
> - kunmap_local(v);
> -
> - v = kmap_local_page(pd->p);
> + v = page_address(pd->p);
> for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
> v[i] = pd->invalid_pde;
>
> - kunmap_local(v);
> -
> - clear_page(kmap(pd->dummy_page));
> - kunmap(pd->dummy_page);
> + clear_page(page_address(pd->dummy_page));
>
> pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
> if (!pd->tables)
> @@ -279,7 +274,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct
> psb_mmu_pd *pd)
>
> spin_lock(lock);
>
> - v = kmap_atomic(pt->p);
> + v = page_address(pt->p);
> clf = (uint8_t *) v;
> ptes = (uint32_t *) v;
> for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
> @@ -293,7 +288,6 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct
> psb_mmu_pd *pd) }
> mb();
> }
> - kunmap_atomic(v);
> spin_unlock(lock);
>
> pt->count = 0;
> @@ -339,7 +333,7 @@ static struct psb_mmu_pt
*psb_mmu_pt_alloc_map_lock(struct
> psb_mmu_pd *pd, atomic_set(&pd->driver->needs_tlbflush, 1);
> }
> }
> - pt->v = kmap_atomic(pt->p);
> + pt->v = page_address(pt->p);
> return pt;
> }
>
> @@ -365,7 +359,6 @@ static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt
*pt)
> struct psb_mmu_pd *pd = pt->pd;
> uint32_t *v;
>
> - kunmap_atomic(pt->v);
> if (pt->count == 0) {
> v = kmap_atomic(pd->p);
> v[pt->index] = pd->invalid_pde;
> --
> 2.25.1
On Wed, Jun 21, 2023 at 12:31:40AM +0200, Fabio M. De Francesco wrote:
> On marted? 20 giugno 2023 20:01:48 CEST Sumitra Sharma wrote:
> > Remove unnecessary calls to kmap{,_atomic}() when acquiring
> > pages using GFP_DMA32.
> >
> > The GFP_DMA32 uses the DMA32 zone to satisfy the allocation
> > requests. Therefore, pages allocated with GFP_DMA32 cannot
> > come from Highmem.
> >
> > Avoid using calls to kmap_local_page() / kunmap_local() and
> > kmap() / kunmap() in the psb_mmu_alloc_pd function. Instead,
> > utilize page_address().
> >
> > Remove the usage of kmap_atomic() / kunmap_atomic() in the
> > psb_mmu_alloc_pt function. Use page_address() instead.
> >
> > Substitute kmap_atomic(pt->p) / kunmap_atomic(pt->v) calls
> > in the psb_mmu_pt_alloc_map_lock() and psb_mmu_pt_unmap_unlock()
> > functions with page_address(pt->p). This is possible as
> > pt = psb_mmu_alloc_pt(pd) allocates a page using
> > pt->p = alloc_page(GFP_DMA32).
>
> Sumitra,
>
> I'm sorry because this patch cannot acked with this commit message.
>
> This commit message is missing two _really_ important information. Therefore,
> it is not acked. Please check again what I write below and either add the
> missing information or change the code accordingly...
>
> You should assure everybody that the code between the old kmap_atomic() /
> kunmap_atomic() doesn't depend either on implicit pagefault_disable() or
> preempt_disable() calls or both.
>
> Please read again the section of the Highmem's documentation regarding
> kmap_atomic() at https://docs.kernel.org/mm/highmem.html
>
> In particular take care to read and understand what "[] the code between calls
> to kmap_atomic() and kunmap_atomic() may implicitly depend on the side effects
> of atomic mappings, i.e. disabling page faults or preemption, or both. In that
> case, explicit calls to pagefault_disable() or preempt_disable() or both must
> be made in conjunction with the use of kmap_local_page().".
>
> Please study carefully also the following patch from Zhao, suggested by Ira
> and reviewed by Ira and I: "[PATCH v2 3/9] drm/i915: Use kmap_local_page() in
> gem/i915_gem_shmem.c". It's not yet reached upstream so you need to find it in
> lore.kernel.org at https://lore.kernel.org/lkml/[email protected]/
>
> Please note that, in turn, that patch also contains a link to a patch from Ira
> who too had to disable faults (https://lore.kernel.org/all/
> [email protected])
>
> I haven't yet looked at your code. If any sections do depend on those implicit
> disables, you should act accordingly and add one or both of the above-
> mentioned calls, even in cases where you get rid of local mappings.
>
> Instead if the sections don't depend on the mentioned side effects, you should
> write something like what I wrote in "[PATCH] NFS: Convert kmap_atomic() to
> kmap_local_folio()" that you can find at https://lore.kernel.org/lkml/
> [email protected]/ or, by by using "git show
> 4b71e2416ec4".
>
> Thanks for working on this,
>
> Fabio
>
Hi Fabio,
Thank you for notifying the mistakes I made in this patch. I am directly
working on the changes.
Regards
Sumitra
> >
> > Suggested-by: Ira Weiny <[email protected]>
> > Signed-off-by: Sumitra Sharma <[email protected]>
> > ---
> > drivers/gpu/drm/gma500/mmu.c | 17 +++++------------
> > 1 file changed, 5 insertions(+), 12 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
> > index a70b01ccdf70..59aa5661e56a 100644
> > --- a/drivers/gpu/drm/gma500/mmu.c
> > +++ b/drivers/gpu/drm/gma500/mmu.c
> > @@ -184,20 +184,15 @@ struct psb_mmu_pd *psb_mmu_alloc_pd(struct
> > psb_mmu_driver *driver, pd->invalid_pte = 0;
> > }
> >
> > - v = kmap_local_page(pd->dummy_pt);
> > + v = page_address(pd->dummy_pt);
> > for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
> > v[i] = pd->invalid_pte;
> >
> > - kunmap_local(v);
> > -
> > - v = kmap_local_page(pd->p);
> > + v = page_address(pd->p);
> > for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
> > v[i] = pd->invalid_pde;
> >
> > - kunmap_local(v);
> > -
> > - clear_page(kmap(pd->dummy_page));
> > - kunmap(pd->dummy_page);
> > + clear_page(page_address(pd->dummy_page));
> >
> > pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
> > if (!pd->tables)
> > @@ -279,7 +274,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct
> > psb_mmu_pd *pd)
> >
> > spin_lock(lock);
> >
> > - v = kmap_atomic(pt->p);
> > + v = page_address(pt->p);
> > clf = (uint8_t *) v;
> > ptes = (uint32_t *) v;
> > for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
> > @@ -293,7 +288,6 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct
> > psb_mmu_pd *pd) }
> > mb();
> > }
> > - kunmap_atomic(v);
> > spin_unlock(lock);
> >
> > pt->count = 0;
> > @@ -339,7 +333,7 @@ static struct psb_mmu_pt
> *psb_mmu_pt_alloc_map_lock(struct
> > psb_mmu_pd *pd, atomic_set(&pd->driver->needs_tlbflush, 1);
> > }
> > }
> > - pt->v = kmap_atomic(pt->p);
> > + pt->v = page_address(pt->p);
> > return pt;
> > }
> >
> > @@ -365,7 +359,6 @@ static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt
> *pt)
> > struct psb_mmu_pd *pd = pt->pd;
> > uint32_t *v;
> >
> > - kunmap_atomic(pt->v);
> > if (pt->count == 0) {
> > v = kmap_atomic(pd->p);
> > v[pt->index] = pd->invalid_pde;
> > --
> > 2.25.1
>
>
>
>