From: Gavin Shan <[email protected]>
While registering MMU notifier, new instance of MMU notifier_mm will
be allocated and later free'd if currrent mm_struct's MMU notifier_mm
has been initialized. That cause some overhead. The patch tries to
eleminate that.
Signed-off-by: Gavin Shan <[email protected]>
Signed-off-by: Wanpeng Li <[email protected]>
---
mm/mmu_notifier.c | 22 +++++++++++-----------
1 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 862b608..fb4067f 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -192,22 +192,23 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
BUG_ON(atomic_read(&mm->mm_users) <= 0);
- ret = -ENOMEM;
- mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
- if (unlikely(!mmu_notifier_mm))
- goto out;
-
if (take_mmap_sem)
down_write(&mm->mmap_sem);
ret = mm_take_all_locks(mm);
if (unlikely(ret))
- goto out_cleanup;
+ goto out;
if (!mm_has_notifiers(mm)) {
+ mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm),
+ GFP_ATOMIC);
+ if (unlikely(!mmu_notifier_mm)) {
+ ret = -ENOMEM;
+ goto out_of_mem;
+ }
INIT_HLIST_HEAD(&mmu_notifier_mm->list);
spin_lock_init(&mmu_notifier_mm->lock);
+
mm->mmu_notifier_mm = mmu_notifier_mm;
- mmu_notifier_mm = NULL;
}
atomic_inc(&mm->mm_count);
@@ -223,13 +224,12 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
spin_unlock(&mm->mmu_notifier_mm->lock);
+out_of_mem:
mm_drop_all_locks(mm);
-out_cleanup:
+out:
if (take_mmap_sem)
up_write(&mm->mmap_sem);
- /* kfree() does nothing if mmu_notifier_mm is NULL */
- kfree(mmu_notifier_mm);
-out:
+
BUG_ON(atomic_read(&mm->mm_users) <= 0);
return ret;
}
--
1.7.7.6