2021-04-07 14:54:38

by Shiyang Ruan

[permalink] [raw]
Subject: [PATCH 0/3] fsdax: Factor helper functions to simplify the code

The page fault part of fsdax code is little complex. In order to add CoW
feature and make it easy to understand, I was suggested to factor some
helper functions to simplify the current dax code.

(Rebased on v5.12-rc5)
==

Shiyang Ruan (3):
fsdax: Factor helpers to simplify dax fault code
fsdax: Factor helper: dax_fault_actor()
fsdax: Output address in dax_iomap_pfn() and rename it

fs/dax.c | 438 +++++++++++++++++++++++++++++--------------------------
1 file changed, 232 insertions(+), 206 deletions(-)

--
2.31.0




2021-04-07 14:55:27

by Shiyang Ruan

[permalink] [raw]
Subject: [PATCH 1/3] fsdax: Factor helpers to simplify dax fault code

The dax page fault code is too long and a bit difficult to read. And it
is hard to understand when we trying to add new features. Some of the
PTE/PMD codes have similar logic. So, factor them as helper functions to
simplify the code.

Signed-off-by: Shiyang Ruan <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
Reviewed-by: Ritesh Harjani <[email protected]>
---
fs/dax.c | 152 ++++++++++++++++++++++++++++++-------------------------
1 file changed, 84 insertions(+), 68 deletions(-)

diff --git a/fs/dax.c b/fs/dax.c
index b3d27fdc6775..dc75ea04b6d9 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1244,6 +1244,52 @@ static bool dax_fault_is_synchronous(unsigned long flags,
&& (iomap->flags & IOMAP_F_DIRTY);
}

+/*
+ * If we are doing synchronous page fault and inode needs fsync, we can insert
+ * PTE/PMD into page tables only after that happens. Skip insertion for now and
+ * return the pfn so that caller can insert it after fsync is done.
+ */
+static vm_fault_t dax_fault_synchronous_pfnp(pfn_t *pfnp, pfn_t pfn)
+{
+ if (WARN_ON_ONCE(!pfnp))
+ return VM_FAULT_SIGBUS;
+
+ *pfnp = pfn;
+ return VM_FAULT_NEEDDSYNC;
+}
+
+static int dax_fault_cow_page(struct vm_fault *vmf, struct iomap *iomap,
+ loff_t pos, vm_fault_t *ret)
+{
+ int error = 0;
+ unsigned long vaddr = vmf->address;
+ sector_t sector = dax_iomap_sector(iomap, pos);
+
+ switch (iomap->type) {
+ case IOMAP_HOLE:
+ case IOMAP_UNWRITTEN:
+ clear_user_highpage(vmf->cow_page, vaddr);
+ break;
+ case IOMAP_MAPPED:
+ error = copy_cow_page_dax(iomap->bdev, iomap->dax_dev,
+ sector, vmf->cow_page, vaddr);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ error = -EIO;
+ break;
+ }
+
+ if (error)
+ return error;
+
+ __SetPageUptodate(vmf->cow_page);
+ *ret = finish_fault(vmf);
+ if (!*ret)
+ *ret = VM_FAULT_DONE_COW;
+ return 0;
+}
+
static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
int *iomap_errp, const struct iomap_ops *ops)
{
@@ -1312,30 +1358,9 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
}

if (vmf->cow_page) {
- sector_t sector = dax_iomap_sector(&iomap, pos);
-
- switch (iomap.type) {
- case IOMAP_HOLE:
- case IOMAP_UNWRITTEN:
- clear_user_highpage(vmf->cow_page, vaddr);
- break;
- case IOMAP_MAPPED:
- error = copy_cow_page_dax(iomap.bdev, iomap.dax_dev,
- sector, vmf->cow_page, vaddr);
- break;
- default:
- WARN_ON_ONCE(1);
- error = -EIO;
- break;
- }
-
+ error = dax_fault_cow_page(vmf, &iomap, pos, &ret);
if (error)
- goto error_finish_iomap;
-
- __SetPageUptodate(vmf->cow_page);
- ret = finish_fault(vmf);
- if (!ret)
- ret = VM_FAULT_DONE_COW;
+ ret = dax_fault_return(error);
goto finish_iomap;
}

@@ -1355,19 +1380,8 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
0, write && !sync);

- /*
- * If we are doing synchronous page fault and inode needs fsync,
- * we can insert PTE into page tables only after that happens.
- * Skip insertion for now and return the pfn so that caller can
- * insert it after fsync is done.
- */
if (sync) {
- if (WARN_ON_ONCE(!pfnp)) {
- error = -EIO;
- goto error_finish_iomap;
- }
- *pfnp = pfn;
- ret = VM_FAULT_NEEDDSYNC | major;
+ ret = dax_fault_synchronous_pfnp(pfnp, pfn);
goto finish_iomap;
}
trace_dax_insert_mapping(inode, vmf, entry);
@@ -1466,13 +1480,45 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
return VM_FAULT_FALLBACK;
}

+static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas,
+ pgoff_t max_pgoff)
+{
+ unsigned long pmd_addr = vmf->address & PMD_MASK;
+ bool write = vmf->flags & FAULT_FLAG_WRITE;
+
+ /*
+ * Make sure that the faulting address's PMD offset (color) matches
+ * the PMD offset from the start of the file. This is necessary so
+ * that a PMD range in the page table overlaps exactly with a PMD
+ * range in the page cache.
+ */
+ if ((vmf->pgoff & PG_PMD_COLOUR) !=
+ ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
+ return true;
+
+ /* Fall back to PTEs if we're going to COW */
+ if (write && !(vmf->vma->vm_flags & VM_SHARED))
+ return true;
+
+ /* If the PMD would extend outside the VMA */
+ if (pmd_addr < vmf->vma->vm_start)
+ return true;
+ if ((pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
+ return true;
+
+ /* If the PMD would extend beyond the file size */
+ if ((xas->xa_index | PG_PMD_COLOUR) >= max_pgoff)
+ return true;
+
+ return false;
+}
+
static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
const struct iomap_ops *ops)
{
struct vm_area_struct *vma = vmf->vma;
struct address_space *mapping = vma->vm_file->f_mapping;
XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER);
- unsigned long pmd_addr = vmf->address & PMD_MASK;
bool write = vmf->flags & FAULT_FLAG_WRITE;
bool sync;
unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
@@ -1495,33 +1541,12 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,

trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);

- /*
- * Make sure that the faulting address's PMD offset (color) matches
- * the PMD offset from the start of the file. This is necessary so
- * that a PMD range in the page table overlaps exactly with a PMD
- * range in the page cache.
- */
- if ((vmf->pgoff & PG_PMD_COLOUR) !=
- ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
- goto fallback;
-
- /* Fall back to PTEs if we're going to COW */
- if (write && !(vma->vm_flags & VM_SHARED))
- goto fallback;
-
- /* If the PMD would extend outside the VMA */
- if (pmd_addr < vma->vm_start)
- goto fallback;
- if ((pmd_addr + PMD_SIZE) > vma->vm_end)
- goto fallback;
-
if (xas.xa_index >= max_pgoff) {
result = VM_FAULT_SIGBUS;
goto out;
}

- /* If the PMD would extend beyond the file size */
- if ((xas.xa_index | PG_PMD_COLOUR) >= max_pgoff)
+ if (dax_fault_check_fallback(vmf, &xas, max_pgoff))
goto fallback;

/*
@@ -1573,17 +1598,8 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
DAX_PMD, write && !sync);

- /*
- * If we are doing synchronous page fault and inode needs fsync,
- * we can insert PMD into page tables only after that happens.
- * Skip insertion for now and return the pfn so that caller can
- * insert it after fsync is done.
- */
if (sync) {
- if (WARN_ON_ONCE(!pfnp))
- goto finish_iomap;
- *pfnp = pfn;
- result = VM_FAULT_NEEDDSYNC;
+ result = dax_fault_synchronous_pfnp(pfnp, pfn);
goto finish_iomap;
}

--
2.31.0



2021-04-07 19:24:18

by Shiyang Ruan

[permalink] [raw]
Subject: [PATCH 2/3] fsdax: Factor helper: dax_fault_actor()

The core logic in the two dax page fault functions is similar. So, move
the logic into a common helper function. Also, to facilitate the
addition of new features, such as CoW, switch-case is no longer used to
handle different iomap types.

Signed-off-by: Shiyang Ruan <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
---
fs/dax.c | 294 ++++++++++++++++++++++++++++---------------------------
1 file changed, 148 insertions(+), 146 deletions(-)

diff --git a/fs/dax.c b/fs/dax.c
index dc75ea04b6d9..19fa22ab50fa 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1054,6 +1054,66 @@ static vm_fault_t dax_load_hole(struct xa_state *xas,
return ret;
}

+#ifdef CONFIG_FS_DAX_PMD
+static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
+ struct iomap *iomap, void **entry)
+{
+ struct address_space *mapping = vmf->vma->vm_file->f_mapping;
+ unsigned long pmd_addr = vmf->address & PMD_MASK;
+ struct vm_area_struct *vma = vmf->vma;
+ struct inode *inode = mapping->host;
+ pgtable_t pgtable = NULL;
+ struct page *zero_page;
+ spinlock_t *ptl;
+ pmd_t pmd_entry;
+ pfn_t pfn;
+
+ zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
+
+ if (unlikely(!zero_page))
+ goto fallback;
+
+ pfn = page_to_pfn_t(zero_page);
+ *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
+ DAX_PMD | DAX_ZERO_PAGE, false);
+
+ if (arch_needs_pgtable_deposit()) {
+ pgtable = pte_alloc_one(vma->vm_mm);
+ if (!pgtable)
+ return VM_FAULT_OOM;
+ }
+
+ ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
+ if (!pmd_none(*(vmf->pmd))) {
+ spin_unlock(ptl);
+ goto fallback;
+ }
+
+ if (pgtable) {
+ pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
+ mm_inc_nr_ptes(vma->vm_mm);
+ }
+ pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
+ pmd_entry = pmd_mkhuge(pmd_entry);
+ set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
+ spin_unlock(ptl);
+ trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
+ return VM_FAULT_NOPAGE;
+
+fallback:
+ if (pgtable)
+ pte_free(vma->vm_mm, pgtable);
+ trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
+ return VM_FAULT_FALLBACK;
+}
+#else
+static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
+ struct iomap *iomap, void **entry)
+{
+ return VM_FAULT_FALLBACK;
+}
+#endif /* CONFIG_FS_DAX_PMD */
+
s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap)
{
sector_t sector = iomap_sector(iomap, pos & PAGE_MASK);
@@ -1290,6 +1350,64 @@ static int dax_fault_cow_page(struct vm_fault *vmf, struct iomap *iomap,
return 0;
}

+/**
+ * dax_fault_actor - Common actor to handle pfn insertion in PTE/PMD fault.
+ * @vmf: vm fault instance
+ * @pfnp: pfn to be returned
+ * @xas: the dax mapping tree of a file
+ * @entry: an unlocked dax entry to be inserted
+ * @pmd: distinguish whether it is a pmd fault
+ * @flags: iomap flags
+ * @iomap: from iomap_begin()
+ * @srcmap: from iomap_begin(), not equal to iomap if it is a CoW
+ */
+static vm_fault_t dax_fault_actor(struct vm_fault *vmf, pfn_t *pfnp,
+ struct xa_state *xas, void **entry, bool pmd,
+ unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
+{
+ struct address_space *mapping = vmf->vma->vm_file->f_mapping;
+ size_t size = pmd ? PMD_SIZE : PAGE_SIZE;
+ loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT;
+ bool write = vmf->flags & FAULT_FLAG_WRITE;
+ bool sync = dax_fault_is_synchronous(flags, vmf->vma, iomap);
+ unsigned long entry_flags = pmd ? DAX_PMD : 0;
+ int err = 0;
+ pfn_t pfn;
+
+ /* if we are reading UNWRITTEN and HOLE, return a hole. */
+ if (!write &&
+ (iomap->type == IOMAP_UNWRITTEN || iomap->type == IOMAP_HOLE)) {
+ if (!pmd)
+ return dax_load_hole(xas, mapping, entry, vmf);
+ else
+ return dax_pmd_load_hole(xas, vmf, iomap, entry);
+ }
+
+ if (iomap->type != IOMAP_MAPPED) {
+ WARN_ON_ONCE(1);
+ return pmd ? VM_FAULT_FALLBACK : VM_FAULT_SIGBUS;
+ }
+
+ err = dax_iomap_pfn(iomap, pos, size, &pfn);
+ if (err)
+ return pmd ? VM_FAULT_FALLBACK : dax_fault_return(err);
+
+ *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, entry_flags,
+ write && !sync);
+
+ if (sync)
+ return dax_fault_synchronous_pfnp(pfnp, pfn);
+
+ /* insert PMD pfn */
+ if (pmd)
+ return vmf_insert_pfn_pmd(vmf, pfn, write);
+
+ /* insert PTE pfn */
+ if (write)
+ return vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
+ return vmf_insert_mixed(vmf->vma, vmf->address, pfn);
+}
+
static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
int *iomap_errp, const struct iomap_ops *ops)
{
@@ -1297,17 +1415,14 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
struct address_space *mapping = vma->vm_file->f_mapping;
XA_STATE(xas, &mapping->i_pages, vmf->pgoff);
struct inode *inode = mapping->host;
- unsigned long vaddr = vmf->address;
loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
struct iomap iomap = { .type = IOMAP_HOLE };
struct iomap srcmap = { .type = IOMAP_HOLE };
unsigned flags = IOMAP_FAULT;
int error, major = 0;
bool write = vmf->flags & FAULT_FLAG_WRITE;
- bool sync;
vm_fault_t ret = 0;
void *entry;
- pfn_t pfn;

trace_dax_pte_fault(inode, vmf, ret);
/*
@@ -1353,8 +1468,8 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
goto unlock_entry;
}
if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
- error = -EIO; /* fs corruption? */
- goto error_finish_iomap;
+ ret = VM_FAULT_SIGBUS; /* fs corruption? */
+ goto finish_iomap;
}

if (vmf->cow_page) {
@@ -1364,49 +1479,19 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
goto finish_iomap;
}

- sync = dax_fault_is_synchronous(flags, vma, &iomap);
-
- switch (iomap.type) {
- case IOMAP_MAPPED:
- if (iomap.flags & IOMAP_F_NEW) {
- count_vm_event(PGMAJFAULT);
- count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
- major = VM_FAULT_MAJOR;
- }
- error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn);
- if (error < 0)
- goto error_finish_iomap;
-
- entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
- 0, write && !sync);
-
- if (sync) {
- ret = dax_fault_synchronous_pfnp(pfnp, pfn);
- goto finish_iomap;
- }
- trace_dax_insert_mapping(inode, vmf, entry);
- if (write)
- ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn);
- else
- ret = vmf_insert_mixed(vma, vaddr, pfn);
-
+ ret = dax_fault_actor(vmf, pfnp, &xas, &entry, false, flags,
+ &iomap, &srcmap);
+ if (ret == VM_FAULT_SIGBUS)
goto finish_iomap;
- case IOMAP_UNWRITTEN:
- case IOMAP_HOLE:
- if (!write) {
- ret = dax_load_hole(&xas, mapping, &entry, vmf);
- goto finish_iomap;
- }
- fallthrough;
- default:
- WARN_ON_ONCE(1);
- error = -EIO;
- break;
+
+ /* read/write MAPPED, CoW UNWRITTEN */
+ if (iomap.flags & IOMAP_F_NEW) {
+ count_vm_event(PGMAJFAULT);
+ count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
+ major = VM_FAULT_MAJOR;
}

- error_finish_iomap:
- ret = dax_fault_return(error);
- finish_iomap:
+finish_iomap:
if (ops->iomap_end) {
int copied = PAGE_SIZE;

@@ -1420,66 +1505,14 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
*/
ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
}
- unlock_entry:
+unlock_entry:
dax_unlock_entry(&xas, entry);
- out:
+out:
trace_dax_pte_fault_done(inode, vmf, ret);
return ret | major;
}

#ifdef CONFIG_FS_DAX_PMD
-static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
- struct iomap *iomap, void **entry)
-{
- struct address_space *mapping = vmf->vma->vm_file->f_mapping;
- unsigned long pmd_addr = vmf->address & PMD_MASK;
- struct vm_area_struct *vma = vmf->vma;
- struct inode *inode = mapping->host;
- pgtable_t pgtable = NULL;
- struct page *zero_page;
- spinlock_t *ptl;
- pmd_t pmd_entry;
- pfn_t pfn;
-
- zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
-
- if (unlikely(!zero_page))
- goto fallback;
-
- pfn = page_to_pfn_t(zero_page);
- *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
- DAX_PMD | DAX_ZERO_PAGE, false);
-
- if (arch_needs_pgtable_deposit()) {
- pgtable = pte_alloc_one(vma->vm_mm);
- if (!pgtable)
- return VM_FAULT_OOM;
- }
-
- ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
- if (!pmd_none(*(vmf->pmd))) {
- spin_unlock(ptl);
- goto fallback;
- }
-
- if (pgtable) {
- pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
- mm_inc_nr_ptes(vma->vm_mm);
- }
- pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
- pmd_entry = pmd_mkhuge(pmd_entry);
- set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
- spin_unlock(ptl);
- trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
- return VM_FAULT_NOPAGE;
-
-fallback:
- if (pgtable)
- pte_free(vma->vm_mm, pgtable);
- trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
- return VM_FAULT_FALLBACK;
-}
-
static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas,
pgoff_t max_pgoff)
{
@@ -1520,17 +1553,15 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
struct address_space *mapping = vma->vm_file->f_mapping;
XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER);
bool write = vmf->flags & FAULT_FLAG_WRITE;
- bool sync;
- unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
+ unsigned int flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
struct inode *inode = mapping->host;
- vm_fault_t result = VM_FAULT_FALLBACK;
+ vm_fault_t ret = VM_FAULT_FALLBACK;
struct iomap iomap = { .type = IOMAP_HOLE };
struct iomap srcmap = { .type = IOMAP_HOLE };
pgoff_t max_pgoff;
void *entry;
loff_t pos;
int error;
- pfn_t pfn;

/*
* Check whether offset isn't beyond end of file now. Caller is
@@ -1542,7 +1573,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);

if (xas.xa_index >= max_pgoff) {
- result = VM_FAULT_SIGBUS;
+ ret = VM_FAULT_SIGBUS;
goto out;
}

@@ -1557,7 +1588,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
*/
entry = grab_mapping_entry(&xas, mapping, PMD_ORDER);
if (xa_is_internal(entry)) {
- result = xa_to_internal(entry);
+ ret = xa_to_internal(entry);
goto fallback;
}

@@ -1569,7 +1600,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
*/
if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
!pmd_devmap(*vmf->pmd)) {
- result = 0;
+ ret = 0;
goto unlock_entry;
}

@@ -1579,49 +1610,21 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
* to look up our filesystem block.
*/
pos = (loff_t)xas.xa_index << PAGE_SHIFT;
- error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap,
- &srcmap);
+ error = ops->iomap_begin(inode, pos, PMD_SIZE, flags, &iomap, &srcmap);
if (error)
goto unlock_entry;

if (iomap.offset + iomap.length < pos + PMD_SIZE)
goto finish_iomap;

- sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap);
-
- switch (iomap.type) {
- case IOMAP_MAPPED:
- error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn);
- if (error < 0)
- goto finish_iomap;
+ ret = dax_fault_actor(vmf, pfnp, &xas, &entry, true, flags,
+ &iomap, &srcmap);

- entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
- DAX_PMD, write && !sync);
-
- if (sync) {
- result = dax_fault_synchronous_pfnp(pfnp, pfn);
- goto finish_iomap;
- }
-
- trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry);
- result = vmf_insert_pfn_pmd(vmf, pfn, write);
- break;
- case IOMAP_UNWRITTEN:
- case IOMAP_HOLE:
- if (WARN_ON_ONCE(write))
- break;
- result = dax_pmd_load_hole(&xas, vmf, &iomap, &entry);
- break;
- default:
- WARN_ON_ONCE(1);
- break;
- }
-
- finish_iomap:
+finish_iomap:
if (ops->iomap_end) {
int copied = PMD_SIZE;

- if (result == VM_FAULT_FALLBACK)
+ if (ret == VM_FAULT_FALLBACK)
copied = 0;
/*
* The fault is done by now and there's no way back (other
@@ -1629,19 +1632,18 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
* Just ignore error from ->iomap_end since we cannot do much
* with it.
*/
- ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
- &iomap);
+ ops->iomap_end(inode, pos, PMD_SIZE, copied, flags, &iomap);
}
- unlock_entry:
+unlock_entry:
dax_unlock_entry(&xas, entry);
- fallback:
- if (result == VM_FAULT_FALLBACK) {
+fallback:
+ if (ret == VM_FAULT_FALLBACK) {
split_huge_pmd(vma, vmf->pmd, vmf->address);
count_vm_event(THP_FAULT_FALLBACK);
}
out:
- trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
- return result;
+ trace_dax_pmd_fault_done(inode, vmf, max_pgoff, ret);
+ return ret;
}
#else
static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
--
2.31.0



2021-04-07 19:25:33

by Shiyang Ruan

[permalink] [raw]
Subject: [PATCH 3/3] fsdax: Output address in dax_iomap_pfn() and rename it

Add address output in dax_iomap_pfn() in order to perform a memcpy() in
CoW case. Since this function both output address and pfn, rename it to
dax_iomap_direct_access().

Signed-off-by: Shiyang Ruan <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
Reviewed-by: Ritesh Harjani <[email protected]>
---
fs/dax.c | 16 ++++++++++++----
1 file changed, 12 insertions(+), 4 deletions(-)

diff --git a/fs/dax.c b/fs/dax.c
index 19fa22ab50fa..ec66207a3199 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -998,8 +998,8 @@ static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9;
}

-static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
- pfn_t *pfnp)
+static int dax_iomap_direct_access(struct iomap *iomap, loff_t pos, size_t size,
+ void **kaddr, pfn_t *pfnp)
{
const sector_t sector = dax_iomap_sector(iomap, pos);
pgoff_t pgoff;
@@ -1011,11 +1011,13 @@ static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
return rc;
id = dax_read_lock();
length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
- NULL, pfnp);
+ kaddr, pfnp);
if (length < 0) {
rc = length;
goto out;
}
+ if (!pfnp)
+ goto out_check_addr;
rc = -EINVAL;
if (PFN_PHYS(length) < size)
goto out;
@@ -1025,6 +1027,12 @@ static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
if (length > 1 && !pfn_t_devmap(*pfnp))
goto out;
rc = 0;
+
+out_check_addr:
+ if (!kaddr)
+ goto out;
+ if (!*kaddr)
+ rc = -EFAULT;
out:
dax_read_unlock(id);
return rc;
@@ -1388,7 +1396,7 @@ static vm_fault_t dax_fault_actor(struct vm_fault *vmf, pfn_t *pfnp,
return pmd ? VM_FAULT_FALLBACK : VM_FAULT_SIGBUS;
}

- err = dax_iomap_pfn(iomap, pos, size, &pfn);
+ err = dax_iomap_direct_access(iomap, pos, size, NULL, &pfn);
if (err)
return pmd ? VM_FAULT_FALLBACK : dax_fault_return(err);

--
2.31.0



2021-04-07 20:48:45

by Ritesh Harjani

[permalink] [raw]
Subject: Re: [PATCH 2/3] fsdax: Factor helper: dax_fault_actor()

On 21/04/07 02:32PM, Shiyang Ruan wrote:
> The core logic in the two dax page fault functions is similar. So, move
> the logic into a common helper function. Also, to facilitate the
> addition of new features, such as CoW, switch-case is no longer used to
> handle different iomap types.
>
> Signed-off-by: Shiyang Ruan <[email protected]>
> Reviewed-by: Christoph Hellwig <[email protected]>
> ---
> fs/dax.c | 294 ++++++++++++++++++++++++++++---------------------------
> 1 file changed, 148 insertions(+), 146 deletions(-)

Thanks for addressing comments.
A msg in cover letter acknowledging that the review comments mentioned here[1]
were addressed is helpful for everyone.

Please feel free to add.
Reviewed-by: Ritesh Harjani <[email protected]>

[1]: https://patchwork.kernel.org/project/linux-nvdimm/patch/[email protected]/

-ritesh

2021-04-07 20:50:00

by Ritesh Harjani

[permalink] [raw]
Subject: Re: [PATCH 1/3] fsdax: Factor helpers to simplify dax fault code

On 21/04/07 02:32PM, Shiyang Ruan wrote:
> The dax page fault code is too long and a bit difficult to read. And it
> is hard to understand when we trying to add new features. Some of the
> PTE/PMD codes have similar logic. So, factor them as helper functions to
> simplify the code.
>
> Signed-off-by: Shiyang Ruan <[email protected]>
> Reviewed-by: Christoph Hellwig <[email protected]>
> Reviewed-by: Ritesh Harjani <[email protected]>

Sorry, but above email address is wrong. Either of below is ok.

Reviewed-by: Ritesh Harjani <[email protected]>
OR
Reviewed-by: Ritesh Harjani <[email protected]>

>
>
> ---
> fs/dax.c | 152 ++++++++++++++++++++++++++++++-------------------------
> 1 file changed, 84 insertions(+), 68 deletions(-)


2021-04-07 20:54:32

by Matthew Wilcox

[permalink] [raw]
Subject: Re: [PATCH 1/3] fsdax: Factor helpers to simplify dax fault code

On Wed, Apr 07, 2021 at 02:32:05PM +0800, Shiyang Ruan wrote:
> +static int dax_fault_cow_page(struct vm_fault *vmf, struct iomap *iomap,
> + loff_t pos, vm_fault_t *ret)
> +{
> + int error = 0;
> + unsigned long vaddr = vmf->address;
> + sector_t sector = dax_iomap_sector(iomap, pos);
> +
> + switch (iomap->type) {
> + case IOMAP_HOLE:
> + case IOMAP_UNWRITTEN:
> + clear_user_highpage(vmf->cow_page, vaddr);
> + break;
> + case IOMAP_MAPPED:
> + error = copy_cow_page_dax(iomap->bdev, iomap->dax_dev,
> + sector, vmf->cow_page, vaddr);
> + break;
> + default:
> + WARN_ON_ONCE(1);
> + error = -EIO;
> + break;
> + }
> +
> + if (error)
> + return error;
> +
> + __SetPageUptodate(vmf->cow_page);
> + *ret = finish_fault(vmf);
> + if (!*ret)
> + *ret = VM_FAULT_DONE_COW;
> + return 0;
> +}
...

> + error = dax_fault_cow_page(vmf, &iomap, pos, &ret);
> if (error)
> + ret = dax_fault_return(error);
> goto finish_iomap;

This seems unnecessarily complex. Why not return the vm_fault_t instead of
returning the errno and then converting it?

2021-04-07 20:58:15

by Shiyang Ruan

[permalink] [raw]
Subject: RE: [PATCH 1/3] fsdax: Factor helpers to simplify dax fault code



> -----Original Message-----
> From: Matthew Wilcox <[email protected]>
> Sent: Wednesday, April 7, 2021 7:14 PM
> To: Ruan, Shiyang/?? ???? <[email protected]>
> Cc: [email protected]; [email protected];
> [email protected]; [email protected];
> [email protected]; [email protected]; [email protected];
> [email protected]; [email protected]; [email protected];
> [email protected]; [email protected]; Ritesh Harjani <[email protected]>
> Subject: Re: [PATCH 1/3] fsdax: Factor helpers to simplify dax fault code
>
> On Wed, Apr 07, 2021 at 02:32:05PM +0800, Shiyang Ruan wrote:
> > +static int dax_fault_cow_page(struct vm_fault *vmf, struct iomap *iomap,
> > + loff_t pos, vm_fault_t *ret)
> > +{
> > + int error = 0;
> > + unsigned long vaddr = vmf->address;
> > + sector_t sector = dax_iomap_sector(iomap, pos);
> > +
> > + switch (iomap->type) {
> > + case IOMAP_HOLE:
> > + case IOMAP_UNWRITTEN:
> > + clear_user_highpage(vmf->cow_page, vaddr);
> > + break;
> > + case IOMAP_MAPPED:
> > + error = copy_cow_page_dax(iomap->bdev, iomap->dax_dev,
> > + sector, vmf->cow_page, vaddr);
> > + break;
> > + default:
> > + WARN_ON_ONCE(1);
> > + error = -EIO;
> > + break;
> > + }
> > +
> > + if (error)
> > + return error;
> > +
> > + __SetPageUptodate(vmf->cow_page);
> > + *ret = finish_fault(vmf);
> > + if (!*ret)
> > + *ret = VM_FAULT_DONE_COW;
> > + return 0;
> > +}
> ...
>
> > + error = dax_fault_cow_page(vmf, &iomap, pos, &ret);
> > if (error)
> > + ret = dax_fault_return(error);
> > goto finish_iomap;
>
> This seems unnecessarily complex. Why not return the vm_fault_t instead of
> returning the errno and then converting it?

Yes, I'll fix it.


--
Thanks,
Ruan Shiyang.

2021-04-07 22:36:33

by Ritesh Harjani

[permalink] [raw]
Subject: Re: [PATCH 3/3] fsdax: Output address in dax_iomap_pfn() and rename it

On 21/04/07 02:32PM, Shiyang Ruan wrote:
> Add address output in dax_iomap_pfn() in order to perform a memcpy() in
> CoW case. Since this function both output address and pfn, rename it to
> dax_iomap_direct_access().
>
> Signed-off-by: Shiyang Ruan <[email protected]>
> Reviewed-by: Christoph Hellwig <[email protected]>
> Reviewed-by: Ritesh Harjani <[email protected]>

Same here. It should be either of below.

Reviewed-by: Ritesh Harjani <[email protected]>
OR
Reviewed-by: Ritesh Harjani <[email protected]>

> ---
> fs/dax.c | 16 ++++++++++++----
> 1 file changed, 12 insertions(+), 4 deletions(-)