Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S933245AbcCKAAQ (ORCPT ); Thu, 10 Mar 2016 19:00:16 -0500 Received: from mga02.intel.com ([134.134.136.20]:26185 "EHLO mga02.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S932955AbcCJXzq (ORCPT ); Thu, 10 Mar 2016 18:55:46 -0500 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.24,317,1455004800"; d="scan'208";a="931363921" From: Matthew Wilcox To: Andrew Morton Cc: Matthew Wilcox , linux-mm@kvack.org, linux-nvdimm@ml01.01.org, linux-fsdevel@vger.kernel.org, linux-kernel@vger.kernel.org, x86@kernel.org Subject: [PATCH v5 10/14] dax: Support for transparent PUD pages Date: Thu, 10 Mar 2016 18:55:27 -0500 Message-Id: <1457654131-4562-11-git-send-email-matthew.r.wilcox@intel.com> X-Mailer: git-send-email 2.7.0 In-Reply-To: <1457654131-4562-1-git-send-email-matthew.r.wilcox@intel.com> References: <1457654131-4562-1-git-send-email-matthew.r.wilcox@intel.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 6583 Lines: 224 From: Matthew Wilcox The DAX support for transparent huge PUD pages Signed-off-by: Matthew Wilcox --- fs/dax.c | 188 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 188 insertions(+) diff --git a/fs/dax.c b/fs/dax.c index ef46bd8..35f0709 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -977,6 +977,184 @@ static int dax_pmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf, result = VM_FAULT_FALLBACK; goto out; } + +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD +/* + * The 'colour' (ie low bits) within a PUD of a page offset. This comes up + * more often than one might expect in the below function. + */ +#define PG_PUD_COLOUR ((PUD_SIZE >> PAGE_SHIFT) - 1) + +#define dax_pud_dbg(bh, address, reason) __dax_dbg(bh, address, reason, "dax_pud") + +#define DAX_PUD_FAULT +static int dax_pud_fault(struct vm_area_struct *vma, struct vm_fault *vmf, + get_block_t get_block, dax_iodone_t complete_unwritten) +{ + struct file *file = vma->vm_file; + struct address_space *mapping = file->f_mapping; + struct inode *inode = mapping->host; + struct buffer_head bh; + unsigned blkbits = inode->i_blkbits; + unsigned long address = (unsigned long)vmf->virtual_address; + unsigned long pud_addr = address & PUD_MASK; + bool write = vmf->flags & FAULT_FLAG_WRITE; + struct block_device *bdev; + pgoff_t size, pgoff; + sector_t block; + int result = 0; + bool alloc = false; + + /* dax pud mappings require pfn_t_devmap() */ + if (!IS_ENABLED(CONFIG_FS_DAX_PMD)) + return VM_FAULT_FALLBACK; + + /* Fall back to PTEs if we're going to COW */ + if (write && !(vma->vm_flags & VM_SHARED)) { + split_huge_pud(vma, vmf->pud, address); + dax_pud_dbg(NULL, address, "cow write"); + return VM_FAULT_FALLBACK; + } + /* If the PUD would extend outside the VMA */ + if (pud_addr < vma->vm_start) { + dax_pud_dbg(NULL, address, "vma start unaligned"); + return VM_FAULT_FALLBACK; + } + if ((pud_addr + PUD_SIZE) > vma->vm_end) { + dax_pud_dbg(NULL, address, "vma end unaligned"); + return VM_FAULT_FALLBACK; + } + + pgoff = linear_page_index(vma, pud_addr); + size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; + if (pgoff >= size) + return VM_FAULT_SIGBUS; + /* If the PUD would cover blocks out of the file */ + if ((pgoff | PG_PUD_COLOUR) >= size) { + dax_pud_dbg(NULL, address, + "offset + huge page size > file size"); + return VM_FAULT_FALLBACK; + } + + memset(&bh, 0, sizeof(bh)); + bh.b_bdev = inode->i_sb->s_bdev; + block = (sector_t)pgoff << (PAGE_SHIFT - blkbits); + + bh.b_size = PUD_SIZE; + + if (get_block(inode, block, &bh, 0) != 0) + return VM_FAULT_SIGBUS; + + if (!buffer_mapped(&bh) && write) { + if (get_block(inode, block, &bh, 1) != 0) + return VM_FAULT_SIGBUS; + alloc = true; + } + + bdev = bh.b_bdev; + + /* + * If the filesystem isn't willing to tell us the length of a hole, + * just fall back to PMDs. Calling get_block 512 times in a loop + * would be silly. + */ + if (!buffer_size_valid(&bh) || bh.b_size < PUD_SIZE) { + dax_pud_dbg(&bh, address, "allocated block too small"); + return VM_FAULT_FALLBACK; + } + + /* + * If we allocated new storage, make sure no process has any + * zero pages covering this hole + */ + if (alloc) { + loff_t lstart = pgoff << PAGE_SHIFT; + loff_t lend = lstart + PUD_SIZE - 1; /* inclusive */ + + truncate_pagecache_range(inode, lstart, lend); + } + + i_mmap_lock_read(mapping); + + /* + * If a truncate happened while we were allocating blocks, we may + * leave blocks allocated to the file that are beyond EOF. We can't + * take i_mutex here, so just leave them hanging; they'll be freed + * when the file is deleted. + */ + size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; + if (pgoff >= size) { + result = VM_FAULT_SIGBUS; + goto out; + } + if ((pgoff | PG_PUD_COLOUR) >= size) { + dax_pud_dbg(&bh, address, "page extends outside VMA"); + goto fallback; + } + + if (!write && !buffer_mapped(&bh) && buffer_uptodate(&bh)) { + dax_pud_dbg(&bh, address, "no zero page"); + goto fallback; + } else { + struct blk_dax_ctl dax = { + .sector = to_sector(&bh, inode), + .size = PUD_SIZE, + }; + long length = dax_map_atomic(bdev, &dax); + + if (length < 0) { + result = VM_FAULT_SIGBUS; + goto out; + } + if (length < PUD_SIZE) { + dax_pud_dbg(&bh, address, "dax-length too small"); + dax_unmap_atomic(bdev, &dax); + goto fallback; + } + if (pfn_t_to_pfn(dax.pfn) & PG_PUD_COLOUR) { + dax_pud_dbg(&bh, address, "pfn unaligned"); + dax_unmap_atomic(bdev, &dax); + goto fallback; + } + + if (!pfn_t_devmap(dax.pfn)) { + dax_unmap_atomic(bdev, &dax); + dax_pud_dbg(&bh, address, "pfn not in memmap"); + goto fallback; + } + + if (buffer_unwritten(&bh) || buffer_new(&bh)) { + clear_pmem(dax.addr, PUD_SIZE); + wmb_pmem(); + count_vm_event(PGMAJFAULT); + mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); + result |= VM_FAULT_MAJOR; + } + dax_unmap_atomic(bdev, &dax); + + dev_dbg(part_to_dev(bdev->bd_part), + "%s: %s addr: %lx pfn: %lx sect: %llx\n", + __func__, current->comm, address, + pfn_t_to_pfn(dax.pfn), + (unsigned long long) dax.sector); + result |= vmf_insert_pfn_pud(vma, address, vmf->pud, + dax.pfn, write); + } + + out: + i_mmap_unlock_read(mapping); + + if (buffer_unwritten(&bh)) + complete_unwritten(&bh, !(result & VM_FAULT_ERROR)); + + return result; + + fallback: + count_vm_event(THP_FAULT_FALLBACK); + result = VM_FAULT_FALLBACK; + goto out; +} +#endif /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ static int dax_pmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf, get_block_t get_block, dax_iodone_t complete_unwritten) @@ -985,6 +1163,14 @@ static int dax_pmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf, } #endif /* !CONFIG_TRANSPARENT_HUGEPAGE */ +#ifndef DAX_PUD_FAULT +static int dax_pud_fault(struct vm_area_struct *vma, struct vm_fault *vmf, + get_block_t get_block, dax_iodone_t complete_unwritten) +{ + return VM_FAULT_FALLBACK; +} +#endif + /** * dax_fault - handle a page fault on a DAX file * @vma: The virtual memory area where the fault occurred @@ -1009,6 +1195,8 @@ int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, return dax_pte_fault(vma, vmf, get_block, iodone); case FAULT_FLAG_SIZE_PMD: return dax_pmd_fault(vma, vmf, get_block, iodone); + case FAULT_FLAG_SIZE_PUD: + return dax_pud_fault(vma, vmf, get_block, iodone); default: return VM_FAULT_FALLBACK; } -- 2.7.0