Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752893AbbHaUyl (ORCPT ); Mon, 31 Aug 2015 16:54:41 -0400 Received: from col004-omc1s8.hotmail.com ([65.55.34.18]:64948 "EHLO COL004-OMC1S8.hotmail.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751631AbbHaUyk convert rfc822-to-8bit (ORCPT ); Mon, 31 Aug 2015 16:54:40 -0400 X-TMN: [oXpoDrzGffbYj7mv94WBrHgBlIG/aXXV] X-Originating-Email: [xili_gchen_5257@hotmail.com] Message-ID: From: Chen Gang To: Andrew Morton , "mhocko@suse.cz" CC: Linux Memory , kernel mailing list Subject: [PATCH] mm/mmap.c: Only call vma_unlock_anon_vm() when failure occurs in expand_upwards() and expand_downwards() Date: Tue, 1 Sep 2015 04:54:40 +0800 Importance: Normal Content-Type: text/plain; charset="iso-8859-1" Content-Transfer-Encoding: 8BIT MIME-Version: 1.0 X-OriginalArrivalTime: 31 Aug 2015 20:54:40.0415 (UTC) FILETIME=[4150BAF0:01D0E42F] Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 6351 Lines: 192 When failure occurs, we need not call khugepaged_enter_vma_merge() or validate_mm(). Also simplify do_munmap(): declare 'error' 1 time instead of 2 times in sub-blocks. Signed-off-by: Chen Gang --- ?mm/mmap.c | 116 +++++++++++++++++++++++++++++++------------------------------- ?1 file changed, 58 insertions(+), 58 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index df6d5f0..d32199a 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2182,10 +2182,9 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) ? if (address < PAGE_ALIGN(address+4)) ? address = PAGE_ALIGN(address+4); ? else { - vma_unlock_anon_vma(vma); - return -ENOMEM; + error = -ENOMEM; + goto err; ? } - error = 0; ? ? /* Somebody else might have raced and expanded it already */ ? if (address> vma->vm_end) { @@ -2194,38 +2193,39 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) ? size = address - vma->vm_start; ? grow = (address - vma->vm_end)>> PAGE_SHIFT; ? - error = -ENOMEM; - if (vma->vm_pgoff + (size>> PAGE_SHIFT)>= vma->vm_pgoff) { - error = acct_stack_growth(vma, size, grow); - if (!error) { - /* - * vma_gap_update() doesn't support concurrent - * updates, but we only hold a shared mmap_sem - * lock here, so we need to protect against - * concurrent vma expansions. - * vma_lock_anon_vma() doesn't help here, as - * we don't guarantee that all growable vmas - * in a mm share the same root anon vma. - * So, we reuse mm->page_table_lock to guard - * against concurrent vma expansions. - */ - spin_lock(&vma->vm_mm->page_table_lock); - anon_vma_interval_tree_pre_update_vma(vma); - vma->vm_end = address; - anon_vma_interval_tree_post_update_vma(vma); - if (vma->vm_next) - vma_gap_update(vma->vm_next); - else - vma->vm_mm->highest_vm_end = address; - spin_unlock(&vma->vm_mm->page_table_lock); - - perf_event_mmap(vma); - } + if (vma->vm_pgoff + (size>> PAGE_SHIFT) < vma->vm_pgoff) { + error = -ENOMEM; + goto err; ? } + error = acct_stack_growth(vma, size, grow); + if (error) + goto err; + /* + * vma_gap_update() doesn't support concurrent updates, but we + * only hold a shared mmap_sem lock here, so we need to protect + * against concurrent vma expansions. vma_lock_anon_vma() + * doesn't help here, as we don't guarantee that all growable + * vmas in a mm share the same root anon vma. So, we reuse mm-> + * page_table_lock to guard against concurrent vma expansions. + */ + spin_lock(&vma->vm_mm->page_table_lock); + anon_vma_interval_tree_pre_update_vma(vma); + vma->vm_end = address; + anon_vma_interval_tree_post_update_vma(vma); + if (vma->vm_next) + vma_gap_update(vma->vm_next); + else + vma->vm_mm->highest_vm_end = address; + spin_unlock(&vma->vm_mm->page_table_lock); + + perf_event_mmap(vma); ? } ? vma_unlock_anon_vma(vma); ? khugepaged_enter_vma_merge(vma, vma->vm_flags); ? validate_mm(vma->vm_mm); + return 0; +err: + vma_unlock_anon_vma(vma); ? return error; ?} ?#endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */ @@ -2265,36 +2265,37 @@ int expand_downwards(struct vm_area_struct *vma, ? size = vma->vm_end - address; ? grow = (vma->vm_start - address)>> PAGE_SHIFT; ? - error = -ENOMEM; - if (grow <= vma->vm_pgoff) { - error = acct_stack_growth(vma, size, grow); - if (!error) { - /* - * vma_gap_update() doesn't support concurrent - * updates, but we only hold a shared mmap_sem - * lock here, so we need to protect against - * concurrent vma expansions. - * vma_lock_anon_vma() doesn't help here, as - * we don't guarantee that all growable vmas - * in a mm share the same root anon vma. - * So, we reuse mm->page_table_lock to guard - * against concurrent vma expansions. - */ - spin_lock(&vma->vm_mm->page_table_lock); - anon_vma_interval_tree_pre_update_vma(vma); - vma->vm_start = address; - vma->vm_pgoff -= grow; - anon_vma_interval_tree_post_update_vma(vma); - vma_gap_update(vma); - spin_unlock(&vma->vm_mm->page_table_lock); - - perf_event_mmap(vma); - } + if (grow> vma->vm_pgoff) { + error = -ENOMEM; + goto err; ? } + error = acct_stack_growth(vma, size, grow); + if (error) + goto err; + /* + * vma_gap_update() doesn't support concurrent updates, but we + * only hold a shared mmap_sem lock here, so we need to protect + * against concurrent vma expansions. vma_lock_anon_vma() + * doesn't help here, as we don't guarantee that all growable + * vmas in a mm share the same root anon vma. So, we reuse mm-> + * page_table_lock to guard against concurrent vma expansions. + */ + spin_lock(&vma->vm_mm->page_table_lock); + anon_vma_interval_tree_pre_update_vma(vma); + vma->vm_start = address; + vma->vm_pgoff -= grow; + anon_vma_interval_tree_post_update_vma(vma); + vma_gap_update(vma); + spin_unlock(&vma->vm_mm->page_table_lock); + + perf_event_mmap(vma); ? } ? vma_unlock_anon_vma(vma); ? khugepaged_enter_vma_merge(vma, vma->vm_flags); ? validate_mm(vma->vm_mm); + return 0; +err: + vma_unlock_anon_vma(vma); ? return error; ?} ? @@ -2542,6 +2543,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) ?{ ? unsigned long end; ? struct vm_area_struct *vma, *prev, *last; + int error; ? ? if ((start & ~PAGE_MASK) || start> TASK_SIZE || len> TASK_SIZE-start) ? return -EINVAL; @@ -2570,8 +2572,6 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) ? * places tmp vma above, and higher split_vma places tmp vma below. ? */ ? if (start> vma->vm_start) { - int error; - ? /* ? * Make sure that map_count on return from munmap() will ? * not exceed its limit; but let map_count go just above @@ -2589,7 +2589,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) ? /* Does it split the last one? */ ? last = find_vma(mm, end); ? if (last && end> last->vm_start) { - int error = __split_vma(mm, last, end, 1); + error = __split_vma(mm, last, end, 1); ? if (error) ? return error; ? } --? 1.9.3 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/