Ensure find_vma() callers do so with the mmap_sem held.
I'm sure there are a few more places left to fix, but
this is a pretty good start. Following the call chain,
some users become all tangled up, but I believe these
fixes are correct. Furthermore, the bulk of the callers
of find_vma are in a lot of functions where it is well
known that the mmap_sem is taken way before, such as
get_unmapped_area() family.
Please note that none of the patches are tested.
Thanks!
blackfin/ptrace: call find_vma with the mmap_sem held
m68k: call find_vma with the mmap_sem held in sys_cacheflush()
mips: call find_vma with the mmap_sem held
arc: call find_vma with the mmap_sem held
drivers/misc/sgi-gru/grufault.c: call find_vma with the mmap_sem held
drm/exynos: call find_vma with the mmap_sem held
arch/arc/kernel/troubleshoot.c | 7 ++++---
arch/blackfin/kernel/ptrace.c | 8 ++++++--
arch/m68k/kernel/sys_m68k.c | 18 ++++++++++++------
arch/mips/kernel/traps.c | 2 ++
arch/mips/mm/c-octeon.c | 2 ++
drivers/gpu/drm/exynos/exynos_drm_g2d.c | 6 ++++++
drivers/misc/sgi-gru/grufault.c | 13 +++++++++----
7 files changed, 41 insertions(+), 15 deletions(-)
--
1.8.1.4
Performing vma lookups without taking the mm->mmap_sem is asking
for trouble. While doing the search, the vma in question can be
modified or even removed before returning to the caller. Take the
lock (shared) in order to avoid races while iterating through the
vmacache and/or rbtree.
This patch is completely *untested*.
Signed-off-by: Davidlohr Bueso <[email protected]>
Cc: Steven Miao <[email protected]>
Cc: [email protected]
---
arch/blackfin/kernel/ptrace.c | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c
index e1f88e0..8b8fe67 100644
--- a/arch/blackfin/kernel/ptrace.c
+++ b/arch/blackfin/kernel/ptrace.c
@@ -117,6 +117,7 @@ put_reg(struct task_struct *task, unsigned long regno, unsigned long data)
int
is_user_addr_valid(struct task_struct *child, unsigned long start, unsigned long len)
{
+ bool valid;
struct vm_area_struct *vma;
struct sram_list_struct *sraml;
@@ -124,9 +125,12 @@ is_user_addr_valid(struct task_struct *child, unsigned long start, unsigned long
if (start + len < start)
return -EIO;
+ down_read(&child->mm->mmap_sem);
vma = find_vma(child->mm, start);
- if (vma && start >= vma->vm_start && start + len <= vma->vm_end)
- return 0;
+ valid = vma && start >= vma->vm_start && start + len <= vma->vm_end;
+ up_read(&child->mm->mmap_sem);
+ if (valid)
+ return 0;
for (sraml = child->mm->context.sram_list; sraml; sraml = sraml->next)
if (start >= (unsigned long)sraml->addr
--
1.8.1.4
Performing vma lookups without taking the mm->mmap_sem is asking
for trouble. While doing the search, the vma in question can be
modified or even removed before returning to the caller. Take the
lock (exclusively) in order to avoid races while iterating through
the vmacache and/or rbtree.
Updates two functions:
- process_fpemu_return()
- cteon_flush_cache_sigtramp()
This patch is completely *untested*.
Signed-off-by: Davidlohr Bueso <[email protected]>
Cc: Ralf Baechle <[email protected]>
Cc: [email protected]
---
arch/mips/kernel/traps.c | 2 ++
arch/mips/mm/c-octeon.c | 2 ++
2 files changed, 4 insertions(+)
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 074e857..c51bd20 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -712,10 +712,12 @@ int process_fpemu_return(int sig, void __user *fault_addr)
si.si_addr = fault_addr;
si.si_signo = sig;
if (sig == SIGSEGV) {
+ down_read(¤t->mm->mmap_sem);
if (find_vma(current->mm, (unsigned long)fault_addr))
si.si_code = SEGV_ACCERR;
else
si.si_code = SEGV_MAPERR;
+ up_read(¤t->mm->mmap_sem);
} else {
si.si_code = BUS_ADRERR;
}
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index f41a5c5..05b1d7c 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -137,8 +137,10 @@ static void octeon_flush_cache_sigtramp(unsigned long addr)
{
struct vm_area_struct *vma;
+ down_read(¤t->mm->mmap_sem);
vma = find_vma(current->mm, addr);
octeon_flush_icache_all_cores(vma);
+ up_read(¤t->mm->mmap_sem);
}
--
1.8.1.4
From: Jonathan Gonzalez V <[email protected]>
Performing vma lookups without taking the mm->mmap_sem is asking
for trouble. While doing the search, the vma in question can be
modified or even removed before returning to the caller. Take the
lock (exclusively) in order to avoid races while iterating through
the vmacache and/or rbtree.
This patch is completely *untested*.
Signed-off-by: Jonathan Gonzalez V <[email protected]>
Signed-off-by: Davidlohr Bueso <[email protected]>
Cc: Inki Dae <[email protected]>
Cc: Joonyoung Shim <[email protected]>
Cc: David Airlie <[email protected]>
Cc: [email protected]
Cc: [email protected]
---
drivers/gpu/drm/exynos/exynos_drm_g2d.c | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 6c1885e..8001587 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -467,14 +467,17 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
goto err_free;
}
+ down_read(¤t->mm->mmap_sem);
vma = find_vma(current->mm, userptr);
if (!vma) {
+ up_read(¤t->mm->mmap_sem);
DRM_ERROR("failed to get vm region.\n");
ret = -EFAULT;
goto err_free_pages;
}
if (vma->vm_end < userptr + size) {
+ up_read(¤t->mm->mmap_sem);
DRM_ERROR("vma is too small.\n");
ret = -EFAULT;
goto err_free_pages;
@@ -482,6 +485,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
g2d_userptr->vma = exynos_gem_get_vma(vma);
if (!g2d_userptr->vma) {
+ up_read(¤t->mm->mmap_sem);
DRM_ERROR("failed to copy vma.\n");
ret = -ENOMEM;
goto err_free_pages;
@@ -492,10 +496,12 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK,
npages, pages, vma);
if (ret < 0) {
+ up_read(¤t->mm->mmap_sem);
DRM_ERROR("failed to get user pages from userptr.\n");
goto err_put_vma;
}
+ up_read(¤t->mm->mmap_sem);
g2d_userptr->pages = pages;
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
--
1.8.1.4
From: Jonathan Gonzalez V <[email protected]>
Performing vma lookups without taking the mm->mmap_sem is asking
for trouble. While doing the search, the vma in question can
be modified or even removed before returning to the caller.
Take the lock in order to avoid races while iterating through
the vmacache and/or rbtree.
This patch is completely *untested*.
Signed-off-by: Jonathan Gonzalez V <[email protected]>
Signed-off-by: Davidlohr Bueso <[email protected]>
Cc: Dimitri Sivanich <[email protected]
---
drivers/misc/sgi-gru/grufault.c | 13 +++++++++----
1 file changed, 9 insertions(+), 4 deletions(-)
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index f74fc0c..15adc84 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -266,6 +266,7 @@ static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
unsigned long paddr;
int ret, ps;
+ down_write(&mm->mmap_sem);
vma = find_vma(mm, vaddr);
if (!vma)
goto inval;
@@ -277,22 +278,26 @@ static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
rmb(); /* Must/check ms_range_active before loading PTEs */
ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps);
if (ret) {
- if (atomic)
- goto upm;
+ if (atomic) {
+ up_write(&mm->mmap_sem);
+ return VTOP_RETRY;
+ }
if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps))
goto inval;
}
if (is_gru_paddr(paddr))
goto inval;
+
+ up_write(&mm->mmap_sem);
+
paddr = paddr & ~((1UL << ps) - 1);
*gpa = uv_soc_phys_ram_to_gpa(paddr);
*pageshift = ps;
return VTOP_SUCCESS;
inval:
+ up_write(&mm->mmap_sem);
return VTOP_INVALID;
-upm:
- return VTOP_RETRY;
}
--
1.8.1.4
Performing vma lookups without taking the mm->mmap_sem is asking
for trouble. While doing the search, the vma in question can be
modified or even removed before returning to the caller. Take the
lock (shared) in order to avoid races while iterating through
the vmacache and/or rbtree.
This patch is completely *untested*.
Signed-off-by: Davidlohr Bueso <[email protected]>
Cc: Geert Uytterhoeven <[email protected]>
Cc: [email protected]
---
arch/m68k/kernel/sys_m68k.c | 18 ++++++++++++------
1 file changed, 12 insertions(+), 6 deletions(-)
diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
index 3a480b3..d2263a0 100644
--- a/arch/m68k/kernel/sys_m68k.c
+++ b/arch/m68k/kernel/sys_m68k.c
@@ -376,7 +376,6 @@ cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
asmlinkage int
sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
{
- struct vm_area_struct *vma;
int ret = -EINVAL;
if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
@@ -389,16 +388,23 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
if (!capable(CAP_SYS_ADMIN))
goto out;
} else {
+ struct vm_area_struct *vma;
+ bool invalid;
+
+ /* Check for overflow. */
+ if (addr + len < addr)
+ goto out;
+
/*
* Verify that the specified address region actually belongs
* to this process.
*/
- vma = find_vma (current->mm, addr);
ret = -EINVAL;
- /* Check for overflow. */
- if (addr + len < addr)
- goto out;
- if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
+ down_read(¤t->mm->mmap_sem);
+ vma = find_vma(current->mm, addr);
+ invalid = !vma || addr < vma->vm_start || addr + len > vma->vm_end;
+ up_read(¤t->mm->mmap_sem);
+ if (invalid)
goto out;
}
--
1.8.1.4
Performing vma lookups without taking the mm->mmap_sem is asking
for trouble. While doing the search, the vma in question can be
modified or even removed before returning to the caller. Take the
lock (shared) in order to avoid races while iterating through
the vmacache and/or rbtree.
This patch is completely *untested*.
Signed-off-by: Davidlohr Bueso <[email protected]>
Cc: Vineet Gupta <[email protected]>
---
arch/arc/kernel/troubleshoot.c | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index 73a7450..3a5a5c1 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -90,7 +90,7 @@ static void show_faulting_vma(unsigned long address, char *buf)
/* can't use print_vma_addr() yet as it doesn't check for
* non-inclusive vma
*/
-
+ down_read(¤t->active_mm->mmap_sem);
vma = find_vma(current->active_mm, address);
/* check against the find_vma( ) behaviour which returns the next VMA
@@ -110,9 +110,10 @@ static void show_faulting_vma(unsigned long address, char *buf)
vma->vm_start < TASK_UNMAPPED_BASE ?
address : address - vma->vm_start,
nm, vma->vm_start, vma->vm_end);
- } else {
+ } else
pr_info(" @No matching VMA found\n");
- }
+
+ up_read(¤t->active_mm->mmap_sem);
}
static void show_ecr_verbose(struct pt_regs *regs)
--
1.8.1.4
Hi David,
On Sun, Apr 20, 2014 at 4:26 AM, Davidlohr Bueso <[email protected]> wrote:
> Performing vma lookups without taking the mm->mmap_sem is asking
> for trouble. While doing the search, the vma in question can be
> modified or even removed before returning to the caller. Take the
> lock (shared) in order to avoid races while iterating through
> the vmacache and/or rbtree.
Thanks for your patch!
> This patch is completely *untested*.
>
> Signed-off-by: Davidlohr Bueso <[email protected]>
> Cc: Geert Uytterhoeven <[email protected]>
> Cc: [email protected]
> ---
> arch/m68k/kernel/sys_m68k.c | 18 ++++++++++++------
> 1 file changed, 12 insertions(+), 6 deletions(-)
>
> diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
> index 3a480b3..d2263a0 100644
> --- a/arch/m68k/kernel/sys_m68k.c
> +++ b/arch/m68k/kernel/sys_m68k.c
> @@ -376,7 +376,6 @@ cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
> asmlinkage int
> sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
> {
> - struct vm_area_struct *vma;
> int ret = -EINVAL;
>
> if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
> @@ -389,16 +388,23 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
> if (!capable(CAP_SYS_ADMIN))
> goto out;
> } else {
> + struct vm_area_struct *vma;
> + bool invalid;
> +
> + /* Check for overflow. */
> + if (addr + len < addr)
> + goto out;
> +
> /*
> * Verify that the specified address region actually belongs
> * to this process.
> */
> - vma = find_vma (current->mm, addr);
> ret = -EINVAL;
> - /* Check for overflow. */
> - if (addr + len < addr)
> - goto out;
> - if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
> + down_read(¤t->mm->mmap_sem);
> + vma = find_vma(current->mm, addr);
> + invalid = !vma || addr < vma->vm_start || addr + len > vma->vm_end;
> + up_read(¤t->mm->mmap_sem);
> + if (invalid)
> goto out;
> }
Shouldn't the up_read() be moved to the end of the function?
The vma may still be modified or destroyed between the call to find_vma(),
and the actual cache flush?
Am I missing something?
Gr{oetje,eeting}s,
Geert
--
Geert Uytterhoeven -- There's lots of Linux beyond ia32 -- [email protected]
In personal conversations with technical people, I call myself a hacker. But
when I'm talking to journalists I just say "programmer" or something like that.
-- Linus Torvalds
On Sun, 2014-04-20 at 10:04 +0200, Geert Uytterhoeven wrote:
> Hi David,
>
> On Sun, Apr 20, 2014 at 4:26 AM, Davidlohr Bueso <[email protected]> wrote:
> > Performing vma lookups without taking the mm->mmap_sem is asking
> > for trouble. While doing the search, the vma in question can be
> > modified or even removed before returning to the caller. Take the
> > lock (shared) in order to avoid races while iterating through
> > the vmacache and/or rbtree.
>
> Thanks for your patch!
>
> > This patch is completely *untested*.
> >
> > Signed-off-by: Davidlohr Bueso <[email protected]>
> > Cc: Geert Uytterhoeven <[email protected]>
> > Cc: [email protected]
> > ---
> > arch/m68k/kernel/sys_m68k.c | 18 ++++++++++++------
> > 1 file changed, 12 insertions(+), 6 deletions(-)
> >
> > diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
> > index 3a480b3..d2263a0 100644
> > --- a/arch/m68k/kernel/sys_m68k.c
> > +++ b/arch/m68k/kernel/sys_m68k.c
> > @@ -376,7 +376,6 @@ cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
> > asmlinkage int
> > sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
> > {
> > - struct vm_area_struct *vma;
> > int ret = -EINVAL;
> >
> > if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
> > @@ -389,16 +388,23 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
> > if (!capable(CAP_SYS_ADMIN))
> > goto out;
> > } else {
> > + struct vm_area_struct *vma;
> > + bool invalid;
> > +
> > + /* Check for overflow. */
> > + if (addr + len < addr)
> > + goto out;
> > +
> > /*
> > * Verify that the specified address region actually belongs
> > * to this process.
> > */
> > - vma = find_vma (current->mm, addr);
> > ret = -EINVAL;
> > - /* Check for overflow. */
> > - if (addr + len < addr)
> > - goto out;
> > - if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
> > + down_read(¤t->mm->mmap_sem);
> > + vma = find_vma(current->mm, addr);
> > + invalid = !vma || addr < vma->vm_start || addr + len > vma->vm_end;
> > + up_read(¤t->mm->mmap_sem);
> > + if (invalid)
> > goto out;
> > }
>
> Shouldn't the up_read() be moved to the end of the function?
> The vma may still be modified or destroyed between the call to find_vma(),
> and the actual cache flush?
I don't think so. afaict the vma is only searched to check upon validity
for the address being passed. Once the sem is dropped, the call doesn't
do absolutely anything else with the returned vma.
Thanks,
Davidlohr
Hi David,
On Mon, Apr 21, 2014 at 12:28 AM, Davidlohr Bueso <[email protected]> wrote:
> On Sun, 2014-04-20 at 10:04 +0200, Geert Uytterhoeven wrote:
>> On Sun, Apr 20, 2014 at 4:26 AM, Davidlohr Bueso <[email protected]> wrote:
>> > Performing vma lookups without taking the mm->mmap_sem is asking
>> > for trouble. While doing the search, the vma in question can be
>> > modified or even removed before returning to the caller. Take the
>> > lock (shared) in order to avoid races while iterating through
>> > the vmacache and/or rbtree.
>>
>> Thanks for your patch!
>>
>> > This patch is completely *untested*.
>> >
>> > Signed-off-by: Davidlohr Bueso <[email protected]>
>> > Cc: Geert Uytterhoeven <[email protected]>
>> > Cc: [email protected]
>> > ---
>> > arch/m68k/kernel/sys_m68k.c | 18 ++++++++++++------
>> > 1 file changed, 12 insertions(+), 6 deletions(-)
>> >
>> > diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
>> > index 3a480b3..d2263a0 100644
>> > --- a/arch/m68k/kernel/sys_m68k.c
>> > +++ b/arch/m68k/kernel/sys_m68k.c
>> > @@ -376,7 +376,6 @@ cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
>> > asmlinkage int
>> > sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
>> > {
>> > - struct vm_area_struct *vma;
>> > int ret = -EINVAL;
>> >
>> > if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
>> > @@ -389,16 +388,23 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
>> > if (!capable(CAP_SYS_ADMIN))
>> > goto out;
>> > } else {
>> > + struct vm_area_struct *vma;
>> > + bool invalid;
>> > +
>> > + /* Check for overflow. */
>> > + if (addr + len < addr)
>> > + goto out;
>> > +
>> > /*
>> > * Verify that the specified address region actually belongs
>> > * to this process.
>> > */
>> > - vma = find_vma (current->mm, addr);
>> > ret = -EINVAL;
>> > - /* Check for overflow. */
>> > - if (addr + len < addr)
>> > - goto out;
>> > - if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
>> > + down_read(¤t->mm->mmap_sem);
>> > + vma = find_vma(current->mm, addr);
>> > + invalid = !vma || addr < vma->vm_start || addr + len > vma->vm_end;
>> > + up_read(¤t->mm->mmap_sem);
>> > + if (invalid)
>> > goto out;
>> > }
>>
>> Shouldn't the up_read() be moved to the end of the function?
>> The vma may still be modified or destroyed between the call to find_vma(),
>> and the actual cache flush?
>
> I don't think so. afaict the vma is only searched to check upon validity
> for the address being passed. Once the sem is dropped, the call doesn't
> do absolutely anything else with the returned vma.
The function indeed doesn't do anything anymore with the vma itself, but
it does do something with the addr/len pair, which may no longer match
with the vma if it changes after the up_read(). I.e. the address may no longer
be valid when the cache is actually flushed.
Gr{oetje,eeting}s,
Geert
--
Geert Uytterhoeven -- There's lots of Linux beyond ia32 -- [email protected]
In personal conversations with technical people, I call myself a hacker. But
when I'm talking to journalists I just say "programmer" or something like that.
-- Linus Torvalds
On Sat, Apr 19, 2014 at 07:26:30PM -0700, Davidlohr Bueso wrote:
> From: Jonathan Gonzalez V <[email protected]>
>
> Performing vma lookups without taking the mm->mmap_sem is asking
> for trouble. While doing the search, the vma in question can
> be modified or even removed before returning to the caller.
> Take the lock in order to avoid races while iterating through
> the vmacache and/or rbtree.
>
> This patch is completely *untested*.
The mmap_sem is already taken in all paths calling gru_vtop().
The gru_intr() function takes it before calling gru_try_dropin(), from which
all calls to gru_vtop() originate.
The gru_find_lock_gts() function takes it when called from
gru_handle_user_call_os(), which then calls gru_user_dropin()->gru_try_dropin().
Nacked-by: Dimitri Sivanich <[email protected]>
>
> Signed-off-by: Jonathan Gonzalez V <[email protected]>
> Signed-off-by: Davidlohr Bueso <[email protected]>
> Cc: Dimitri Sivanich <[email protected]
> ---
> drivers/misc/sgi-gru/grufault.c | 13 +++++++++----
> 1 file changed, 9 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
> index f74fc0c..15adc84 100644
> --- a/drivers/misc/sgi-gru/grufault.c
> +++ b/drivers/misc/sgi-gru/grufault.c
> @@ -266,6 +266,7 @@ static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
> unsigned long paddr;
> int ret, ps;
>
> + down_write(&mm->mmap_sem);
> vma = find_vma(mm, vaddr);
> if (!vma)
> goto inval;
> @@ -277,22 +278,26 @@ static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
> rmb(); /* Must/check ms_range_active before loading PTEs */
> ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps);
> if (ret) {
> - if (atomic)
> - goto upm;
> + if (atomic) {
> + up_write(&mm->mmap_sem);
> + return VTOP_RETRY;
> + }
> if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps))
> goto inval;
> }
> if (is_gru_paddr(paddr))
> goto inval;
> +
> + up_write(&mm->mmap_sem);
> +
> paddr = paddr & ~((1UL << ps) - 1);
> *gpa = uv_soc_phys_ram_to_gpa(paddr);
> *pageshift = ps;
> return VTOP_SUCCESS;
>
> inval:
> + up_write(&mm->mmap_sem);
> return VTOP_INVALID;
> -upm:
> - return VTOP_RETRY;
> }
>
>
> --
> 1.8.1.4
Hi Davidlohr,
On Sun, Apr 20, 2014 at 10:26 AM, Davidlohr Bueso <[email protected]> wrote:
> Performing vma lookups without taking the mm->mmap_sem is asking
> for trouble. While doing the search, the vma in question can be
> modified or even removed before returning to the caller. Take the
> lock (shared) in order to avoid races while iterating through the
> vmacache and/or rbtree.
Yes, mm->mmap_sem should lock here. Applied, thanks.
>
> This patch is completely *untested*.
>
> Signed-off-by: Davidlohr Bueso <[email protected]>
> Cc: Steven Miao <[email protected]>
> Cc: [email protected]
> ---
> arch/blackfin/kernel/ptrace.c | 8 ++++++--
> 1 file changed, 6 insertions(+), 2 deletions(-)
>
> diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c
> index e1f88e0..8b8fe67 100644
> --- a/arch/blackfin/kernel/ptrace.c
> +++ b/arch/blackfin/kernel/ptrace.c
> @@ -117,6 +117,7 @@ put_reg(struct task_struct *task, unsigned long regno, unsigned long data)
> int
> is_user_addr_valid(struct task_struct *child, unsigned long start, unsigned long len)
> {
> + bool valid;
> struct vm_area_struct *vma;
> struct sram_list_struct *sraml;
>
> @@ -124,9 +125,12 @@ is_user_addr_valid(struct task_struct *child, unsigned long start, unsigned long
> if (start + len < start)
> return -EIO;
>
> + down_read(&child->mm->mmap_sem);
> vma = find_vma(child->mm, start);
> - if (vma && start >= vma->vm_start && start + len <= vma->vm_end)
> - return 0;
> + valid = vma && start >= vma->vm_start && start + len <= vma->vm_end;
> + up_read(&child->mm->mmap_sem);
> + if (valid)
> + return 0;
>
> for (sraml = child->mm->context.sram_list; sraml; sraml = sraml->next)
> if (start >= (unsigned long)sraml->addr
> --
> 1.8.1.4
>
-steven
Hi,
On Sunday 20 April 2014 07:56 AM, Davidlohr Bueso wrote:
> Performing vma lookups without taking the mm->mmap_sem is asking
> for trouble. While doing the search, the vma in question can be
> modified or even removed before returning to the caller. Take the
> lock (shared) in order to avoid races while iterating through
> the vmacache and/or rbtree.
>
> This patch is completely *untested*.
>
> Signed-off-by: Davidlohr Bueso <[email protected]>
> Cc: Vineet Gupta <[email protected]>
> ---
> arch/arc/kernel/troubleshoot.c | 7 ++++---
> 1 file changed, 4 insertions(+), 3 deletions(-)
>
> diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
> index 73a7450..3a5a5c1 100644
> --- a/arch/arc/kernel/troubleshoot.c
> +++ b/arch/arc/kernel/troubleshoot.c
> @@ -90,7 +90,7 @@ static void show_faulting_vma(unsigned long address, char *buf)
> /* can't use print_vma_addr() yet as it doesn't check for
> * non-inclusive vma
> */
> -
> + down_read(¤t->active_mm->mmap_sem);
Actually avoiding the lock here was intentional - atleast in the past, in case of
a crash from mmap_region() - due to our custom mmap syscall handler (not in
mainline) it would cause a double lockup.
However given that this code is now only called for user contexts [if
user_mode(regs)] above becomes moot point anyways and it would be safe to do that.
So, yes this looks good.
A minor suggestion though - can you please use a tmp for current->active_mm as
there are 3 users now in the function.
Acked-by: Vineet Gupta <[email protected]>
Thx
-Vineet
> vma = find_vma(current->active_mm, address);
>
> /* check against the find_vma( ) behaviour which returns the next VMA
> @@ -110,9 +110,10 @@ static void show_faulting_vma(unsigned long address, char *buf)
> vma->vm_start < TASK_UNMAPPED_BASE ?
> address : address - vma->vm_start,
> nm, vma->vm_start, vma->vm_end);
> - } else {
> + } else
> pr_info(" @No matching VMA found\n");
> - }
> +
> + up_read(¤t->active_mm->mmap_sem);
> }
>
> static void show_ecr_verbose(struct pt_regs *regs)
>
On Sat, Apr 19, 2014 at 07:26:28PM -0700, Davidlohr Bueso wrote:
> Performing vma lookups without taking the mm->mmap_sem is asking
> for trouble. While doing the search, the vma in question can be
> modified or even removed before returning to the caller. Take the
> lock (exclusively) in order to avoid races while iterating through
> the vmacache and/or rbtree.
>
> Updates two functions:
> - process_fpemu_return()
> - cteon_flush_cache_sigtramp()
>
> This patch is completely *untested*.
>
> Signed-off-by: Davidlohr Bueso <[email protected]>
> Cc: Ralf Baechle <[email protected]>
> Cc: [email protected]
Tested-by: Andreas Herrmann <[email protected]>
Thanks,
Andreas
> ---
> arch/mips/kernel/traps.c | 2 ++
> arch/mips/mm/c-octeon.c | 2 ++
> 2 files changed, 4 insertions(+)
>
> diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
> index 074e857..c51bd20 100644
> --- a/arch/mips/kernel/traps.c
> +++ b/arch/mips/kernel/traps.c
> @@ -712,10 +712,12 @@ int process_fpemu_return(int sig, void __user *fault_addr)
> si.si_addr = fault_addr;
> si.si_signo = sig;
> if (sig == SIGSEGV) {
> + down_read(¤t->mm->mmap_sem);
> if (find_vma(current->mm, (unsigned long)fault_addr))
> si.si_code = SEGV_ACCERR;
> else
> si.si_code = SEGV_MAPERR;
> + up_read(¤t->mm->mmap_sem);
> } else {
> si.si_code = BUS_ADRERR;
> }
> diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
> index f41a5c5..05b1d7c 100644
> --- a/arch/mips/mm/c-octeon.c
> +++ b/arch/mips/mm/c-octeon.c
> @@ -137,8 +137,10 @@ static void octeon_flush_cache_sigtramp(unsigned long addr)
> {
> struct vm_area_struct *vma;
>
> + down_read(¤t->mm->mmap_sem);
> vma = find_vma(current->mm, addr);
> octeon_flush_icache_all_cores(vma);
> + up_read(¤t->mm->mmap_sem);
> }
>
>
> --
> 1.8.1.4
>
>
Performing vma lookups without taking the mm->mmap_sem is asking
for trouble. While doing the search, the vma in question can
be modified or even removed before returning to the caller.
Take the lock in order to avoid races while iterating through
the vmacache and/or rbtree.
Also do some very minor cleanup changes.
This patch is only compile tested.
Signed-off-by: Davidlohr Bueso <[email protected]>
Cc: Pawel Osciak <[email protected]>
Cc: Marek Szyprowski <[email protected]>
Cc: Kyungmin Park <[email protected]>
Cc: Mauro Carvalho Chehab <[email protected]>
Cc: [email protected]
---
It would seem this is the last offending user.
v4l2 is a maze but I believe that this is needed as I don't
see the mmap_sem being taken by any callers of vb2_dma_sg_get_userptr().
drivers/media/v4l2-core/videobuf2-dma-sg.c | 12 ++++++++----
1 file changed, 8 insertions(+), 4 deletions(-)
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c
index c779f21..2a21100 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c
@@ -168,8 +168,9 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
unsigned long first, last;
int num_pages_from_user;
struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
- buf = kzalloc(sizeof *buf, GFP_KERNEL);
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
if (!buf)
return NULL;
@@ -178,7 +179,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
buf->offset = vaddr & ~PAGE_MASK;
buf->size = size;
- first = (vaddr & PAGE_MASK) >> PAGE_SHIFT;
+ first = (vaddr & PAGE_MASK) >> PAGE_SHIFT;
last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
buf->num_pages = last - first + 1;
@@ -187,7 +188,8 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
if (!buf->pages)
goto userptr_fail_alloc_pages;
- vma = find_vma(current->mm, vaddr);
+ down_write(&mm->mmap_sem);
+ vma = find_vma(mm, vaddr);
if (!vma) {
dprintk(1, "no vma for address %lu\n", vaddr);
goto userptr_fail_find_vma;
@@ -218,7 +220,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
buf->pages[num_pages_from_user] = pfn_to_page(pfn);
}
} else
- num_pages_from_user = get_user_pages(current, current->mm,
+ num_pages_from_user = get_user_pages(current, mm,
vaddr & PAGE_MASK,
buf->num_pages,
write,
@@ -233,6 +235,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
buf->num_pages, buf->offset, size, 0))
goto userptr_fail_alloc_table_from_pages;
+ up_write(&mm->mmap_sem);
return buf;
userptr_fail_alloc_table_from_pages:
@@ -244,6 +247,7 @@ userptr_fail_get_user_pages:
put_page(buf->pages[num_pages_from_user]);
vb2_put_vma(buf->vma);
userptr_fail_find_vma:
+ up_write(&mm->mmap_sem);
kfree(buf->pages);
userptr_fail_alloc_pages:
kfree(buf);
--
1.8.1.4
Hello,
On 2014-04-28 20:09, Davidlohr Bueso wrote:
> Performing vma lookups without taking the mm->mmap_sem is asking
> for trouble. While doing the search, the vma in question can
> be modified or even removed before returning to the caller.
> Take the lock in order to avoid races while iterating through
> the vmacache and/or rbtree.
>
> Also do some very minor cleanup changes.
>
> This patch is only compile tested.
NACK.
mm->mmap_sem is taken by videobuf2-core to avoid AB-BA deadlock with v4l2 core lock. For more information, please check videobuf2-core.c. However you are right that this is a bit confusing and we need more comments about the place where mmap_sem is taken. Here is some background for this decision:
https://www.mail-archive.com/[email protected]/msg38599.html
http://www.spinics.net/lists/linux-media/msg40225.html
> Signed-off-by: Davidlohr Bueso <[email protected]>
> Cc: Pawel Osciak <[email protected]>
> Cc: Marek Szyprowski <[email protected]>
> Cc: Kyungmin Park <[email protected]>
> Cc: Mauro Carvalho Chehab <[email protected]>
> Cc: [email protected]
> ---
> It would seem this is the last offending user.
> v4l2 is a maze but I believe that this is needed as I don't
> see the mmap_sem being taken by any callers of vb2_dma_sg_get_userptr().
>
> drivers/media/v4l2-core/videobuf2-dma-sg.c | 12 ++++++++----
> 1 file changed, 8 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c
> index c779f21..2a21100 100644
> --- a/drivers/media/v4l2-core/videobuf2-dma-sg.c
> +++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c
> @@ -168,8 +168,9 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
> unsigned long first, last;
> int num_pages_from_user;
> struct vm_area_struct *vma;
> + struct mm_struct *mm = current->mm;
>
> - buf = kzalloc(sizeof *buf, GFP_KERNEL);
> + buf = kzalloc(sizeof(*buf), GFP_KERNEL);
> if (!buf)
> return NULL;
>
> @@ -178,7 +179,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
> buf->offset = vaddr & ~PAGE_MASK;
> buf->size = size;
>
> - first = (vaddr & PAGE_MASK) >> PAGE_SHIFT;
> + first = (vaddr & PAGE_MASK) >> PAGE_SHIFT;
> last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
> buf->num_pages = last - first + 1;
>
> @@ -187,7 +188,8 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
> if (!buf->pages)
> goto userptr_fail_alloc_pages;
>
> - vma = find_vma(current->mm, vaddr);
> + down_write(&mm->mmap_sem);
> + vma = find_vma(mm, vaddr);
> if (!vma) {
> dprintk(1, "no vma for address %lu\n", vaddr);
> goto userptr_fail_find_vma;
> @@ -218,7 +220,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
> buf->pages[num_pages_from_user] = pfn_to_page(pfn);
> }
> } else
> - num_pages_from_user = get_user_pages(current, current->mm,
> + num_pages_from_user = get_user_pages(current, mm,
> vaddr & PAGE_MASK,
> buf->num_pages,
> write,
> @@ -233,6 +235,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
> buf->num_pages, buf->offset, size, 0))
> goto userptr_fail_alloc_table_from_pages;
>
> + up_write(&mm->mmap_sem);
> return buf;
>
> userptr_fail_alloc_table_from_pages:
> @@ -244,6 +247,7 @@ userptr_fail_get_user_pages:
> put_page(buf->pages[num_pages_from_user]);
> vb2_put_vma(buf->vma);
> userptr_fail_find_vma:
> + up_write(&mm->mmap_sem);
> kfree(buf->pages);
> userptr_fail_alloc_pages:
> kfree(buf);
Best regards
--
Marek Szyprowski, PhD
Samsung R&D Institute Poland