Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1762986AbdDSMSt (ORCPT ); Wed, 19 Apr 2017 08:18:49 -0400 Received: from mx0b-001b2d01.pphosted.com ([148.163.158.5]:44173 "EHLO mx0a-001b2d01.pphosted.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1762950AbdDSMSm (ORCPT ); Wed, 19 Apr 2017 08:18:42 -0400 From: Laurent Dufour To: linux-mm@kvack.org Cc: Davidlohr Bueso , akpm@linux-foundation.org, Jan Kara , "Kirill A . Shutemov" , Michal Hocko , Peter Zijlstra , Mel Gorman , haren@linux.vnet.ibm.com, aneesh.kumar@linux.vnet.ibm.com, khandual@linux.vnet.ibm.com, Paul.McKenney@us.ibm.com, linux-kernel@vger.kernel.org Subject: [RFC 2/4] Deactivate mmap_sem assert Date: Wed, 19 Apr 2017 14:18:25 +0200 X-Mailer: git-send-email 2.7.4 In-Reply-To: References: In-Reply-To: References: X-TM-AS-GCONF: 00 x-cbid: 17041912-0016-0000-0000-000004807CCE X-IBM-AV-DETECTION: SAVI=unused REMOTE=unused XFE=unused x-cbparentid: 17041912-0017-0000-0000-0000274E0E20 Message-Id: <582009a3f9459de3d8def1e76db46e815ea6153c.1492595897.git.ldufour@linux.vnet.ibm.com> X-Proofpoint-Virus-Version: vendor=fsecure engine=2.50.10432:,, definitions=2017-04-19_10:,, signatures=0 X-Proofpoint-Spam-Details: rule=outbound_notspam policy=outbound score=0 spamscore=0 suspectscore=1 malwarescore=0 phishscore=0 adultscore=0 bulkscore=0 classifier=spam adjust=0 reason=mlx scancount=1 engine=8.0.1-1703280000 definitions=main-1704190108 Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 5398 Lines: 152 When mmap_sem will be moved to a range lock, some assertion done in the code are no more valid, like the one ensuring mmap_sem is held. This patch should be reverted later and some check might be reviewed once the range locking API provides dedicated services. Signed-off-by: Laurent Dufour --- arch/x86/events/core.c | 1 - fs/userfaultfd.c | 6 ------ include/linux/huge_mm.h | 2 -- mm/gup.c | 1 - mm/memory.c | 12 +++--------- mm/pagewalk.c | 3 --- 6 files changed, 3 insertions(+), 22 deletions(-) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 580b60f5ac83..86beb42376b8 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -2120,7 +2120,6 @@ static void x86_pmu_event_mapped(struct perf_event *event) * For now, this can't happen because all callers hold mmap_sem * for write. If this changes, we'll need a different solution. */ - lockdep_assert_held_exclusive(¤t->mm->mmap_sem); if (atomic_inc_return(¤t->mm->context.perf_rdpmc_allowed) == 1) on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1); diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index b83117741b11..5752b3b65638 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -222,8 +222,6 @@ static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx, pte_t *pte; bool ret = true; - VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); - pte = huge_pte_offset(mm, address); if (!pte) goto out; @@ -271,8 +269,6 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx, pte_t *pte; bool ret = true; - VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); - pgd = pgd_offset(mm, address); if (!pgd_present(*pgd)) goto out; @@ -340,8 +336,6 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason) bool must_wait, return_to_userland; long blocking_state; - BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); - ret = VM_FAULT_SIGBUS; ctx = vmf->vma->vm_userfaultfd_ctx.ctx; if (!ctx) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index a3762d49ba39..d400014892c7 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -161,7 +161,6 @@ extern spinlock_t *__pud_trans_huge_lock(pud_t *pud, static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) { - VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) return __pmd_trans_huge_lock(pmd, vma); else @@ -170,7 +169,6 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma) { - VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); if (pud_trans_huge(*pud) || pud_devmap(*pud)) return __pud_trans_huge_lock(pud, vma); else diff --git a/mm/gup.c b/mm/gup.c index b83b47804c6e..ad83cfa38649 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1040,7 +1040,6 @@ long populate_vma_page_range(struct vm_area_struct *vma, VM_BUG_ON(end & ~PAGE_MASK); VM_BUG_ON_VMA(start < vma->vm_start, vma); VM_BUG_ON_VMA(end > vma->vm_end, vma); - VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm); gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK; if (vma->vm_flags & VM_LOCKONFAULT) diff --git a/mm/memory.c b/mm/memory.c index 745acb75b3b4..9adb7d4396bf 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1298,8 +1298,7 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, next = pmd_addr_end(addr, end); if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { if (next - addr != HPAGE_PMD_SIZE) { - VM_BUG_ON_VMA(vma_is_anonymous(vma) && - !rwsem_is_locked(&tlb->mm->mmap_sem), vma); + VM_BUG_ON_VMA(vma_is_anonymous(vma), vma); __split_huge_pmd(vma, pmd, addr, false, NULL); } else if (zap_huge_pmd(tlb, vma, pmd, addr)) goto next; @@ -1334,10 +1333,9 @@ static inline unsigned long zap_pud_range(struct mmu_gather *tlb, do { next = pud_addr_end(addr, end); if (pud_trans_huge(*pud) || pud_devmap(*pud)) { - if (next - addr != HPAGE_PUD_SIZE) { - VM_BUG_ON_VMA(!rwsem_is_locked(&tlb->mm->mmap_sem), vma); + if (next - addr != HPAGE_PUD_SIZE) split_huge_pud(vma, pud, addr); - } else if (zap_huge_pud(tlb, vma, pud, addr)) + else if (zap_huge_pud(tlb, vma, pud, addr)) goto next; /* fall through */ } @@ -4305,10 +4303,6 @@ void __might_fault(const char *file, int line) if (pagefault_disabled()) return; __might_sleep(file, line, 0); -#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) - if (current->mm) - might_lock_read(¤t->mm->mmap_sem); -#endif } EXPORT_SYMBOL(__might_fault); #endif diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 60f7856e508f..13429c7815c9 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -293,8 +293,6 @@ int walk_page_range(unsigned long start, unsigned long end, if (!walk->mm) return -EINVAL; - VM_BUG_ON_MM(!rwsem_is_locked(&walk->mm->mmap_sem), walk->mm); - vma = find_vma(walk->mm, start); do { if (!vma) { /* after the last vma */ @@ -336,7 +334,6 @@ int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk) if (!walk->mm) return -EINVAL; - VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem)); VM_BUG_ON(!vma); walk->vma = vma; err = walk_page_test(vma->vm_start, vma->vm_end, walk); -- 2.7.4