2022-12-20 07:51:54

by Chih-En Lin

[permalink] [raw]
Subject: [PATCH v3 03/14] mm: Add break COW PTE fault and helper functions

Add the function, break_cow_pte_fault(), to break (unshare) COW-ed PTE
with the page fault that will modify the PTE table or the mapped page
resided in COW-ed PTE (i.e., write, unshared, file read).

When breaking COW PTE, it first checks COW-ed PTE's refcount to try to
reuse it. If COW-ed PTE cannot be reused, allocates new PTE and
duplicates all pte entries in COW-ed PTE. Moreover, flush TLB when we
change the write protection of PTE.

In addition, provide the helper functions, break_cow_pte{,_range}(), to
let the other features (remap, THP, migration, swapfile, etc) to use.

Signed-off-by: Chih-En Lin <[email protected]>
---
include/linux/mm.h | 4 +
include/linux/pgtable.h | 6 +
mm/memory.c | 319 +++++++++++++++++++++++++++++++++++++++-
mm/mmap.c | 4 +
mm/mremap.c | 2 +
mm/swapfile.c | 2 +
6 files changed, 331 insertions(+), 6 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 8c6ec1da2336f..6a0eb01ee6f7e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1894,6 +1894,10 @@ void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
int generic_error_remove_page(struct address_space *mapping, struct page *page);

+int break_cow_pte(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr);
+int break_cow_pte_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+
#ifdef CONFIG_MMU
extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
unsigned long address, unsigned int flags,
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index a108b60a6962b..895fa18e3b011 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -1395,6 +1395,12 @@ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
if (pmd_none(pmdval) || pmd_trans_huge(pmdval) ||
(IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION) && !pmd_present(pmdval)))
return 1;
+ /*
+ * COW-ed PTE has write protection which can trigger pmd_bad().
+ * To avoid this, return here if entry is write protection.
+ */
+ if (!pmd_write(pmdval))
+ return 0;
if (unlikely(pmd_bad(pmdval))) {
pmd_clear_bad(pmd);
return 1;
diff --git a/mm/memory.c b/mm/memory.c
index 5b474d14a5411..8ebff4cac2191 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -239,6 +239,35 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
+ /*
+ * For COW-ed PTE, the pte entries still mapping to pages.
+ * However, we should did de-accounting to all of it. So,
+ * even if the refcount is not the same as zapping, we
+ * could still fall back to normal PTE and handle it
+ * without traversing entries to do the de-accounting.
+ */
+ if (test_bit(MMF_COW_PTE, &tlb->mm->flags)) {
+ if (!pmd_none(*pmd) && !pmd_write(*pmd)) {
+ spinlock_t *ptl = pte_lockptr(tlb->mm, pmd);
+
+ spin_lock(ptl);
+ if (!pmd_put_pte(pmd)) {
+ pmd_t new = pmd_mkwrite(*pmd);
+
+ set_pmd_at(tlb->mm, addr, pmd, new);
+ spin_unlock(ptl);
+ free_pte_range(tlb, pmd, addr);
+ continue;
+ }
+ spin_unlock(ptl);
+
+ pmd_clear(pmd);
+ mm_dec_nr_ptes(tlb->mm);
+ flush_tlb_mm_range(tlb->mm, addr, next,
+ PAGE_SHIFT, false);
+ } else
+ VM_WARN_ON(cow_pte_count(pmd) != 1);
+ }
if (pmd_none_or_clear_bad(pmd))
continue;
free_pte_range(tlb, pmd, addr);
@@ -1676,12 +1705,34 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
pte_t *start_pte;
pte_t *pte;
swp_entry_t entry;
+ bool pte_is_shared = false;
+
+ if (test_bit(MMF_COW_PTE, &mm->flags) && !pmd_write(*pmd)) {
+ if (!range_in_vma(vma, addr & PMD_MASK,
+ (addr + PMD_SIZE) & PMD_MASK)) {
+ /*
+ * We cannot promise this COW-ed PTE will also be zap
+ * with the rest of VMAs. So, break COW PTE here.
+ */
+ break_cow_pte(vma, pmd, addr);
+ } else {
+ start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+ if (cow_pte_count(pmd) == 1) {
+ /* Reuse COW-ed PTE */
+ pmd_t new = pmd_mkwrite(*pmd);
+ set_pmd_at(tlb->mm, addr, pmd, new);
+ } else
+ pte_is_shared = true;
+ pte_unmap_unlock(start_pte, ptl);
+ }
+ }

tlb_change_page_size(tlb, PAGE_SIZE);
again:
init_rss_vec(rss);
start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
pte = start_pte;
+
flush_tlb_batched_pending(mm);
arch_enter_lazy_mmu_mode();
do {
@@ -1698,11 +1749,15 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
page = vm_normal_page(vma, addr, ptent);
if (unlikely(!should_zap_page(details, page)))
continue;
- ptent = ptep_get_and_clear_full(mm, addr, pte,
- tlb->fullmm);
+ if (pte_is_shared)
+ ptent = *pte;
+ else
+ ptent = ptep_get_and_clear_full(mm, addr, pte,
+ tlb->fullmm);
tlb_remove_tlb_entry(tlb, pte, addr);
- zap_install_uffd_wp_if_needed(vma, addr, pte, details,
- ptent);
+ if (!pte_is_shared)
+ zap_install_uffd_wp_if_needed(vma, addr, pte,
+ details, ptent);
if (unlikely(!page))
continue;

@@ -1768,8 +1823,12 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
/* We should have covered all the swap entry types */
WARN_ON_ONCE(1);
}
- pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
- zap_install_uffd_wp_if_needed(vma, addr, pte, details, ptent);
+
+ if (!pte_is_shared) {
+ pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
+ zap_install_uffd_wp_if_needed(vma, addr, pte,
+ details, ptent);
+ }
} while (pte++, addr += PAGE_SIZE, addr != end);

add_mm_rss_vec(mm, rss);
@@ -2147,6 +2206,8 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
if (retval)
goto out;
retval = -ENOMEM;
+ if (break_cow_pte(vma, NULL, addr) < 0)
+ goto out;
pte = get_locked_pte(vma->vm_mm, addr, &ptl);
if (!pte)
goto out;
@@ -2406,6 +2467,9 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
pte_t *pte, entry;
spinlock_t *ptl;

+ if (break_cow_pte(vma, NULL, addr) < 0)
+ return VM_FAULT_OOM;
+
pte = get_locked_pte(mm, addr, &ptl);
if (!pte)
return VM_FAULT_OOM;
@@ -2783,6 +2847,10 @@ int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
BUG_ON(addr >= end);
pfn -= addr >> PAGE_SHIFT;
pgd = pgd_offset(mm, addr);
+
+ if (!break_cow_pte_range(vma, addr, end))
+ return -ENOMEM;
+
flush_cache_range(vma, addr, end);
do {
next = pgd_addr_end(addr, end);
@@ -5143,6 +5211,226 @@ static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
return VM_FAULT_FALLBACK;
}

+/* Break (unshare) COW PTE */
+static vm_fault_t handle_cow_pte_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct mm_struct *mm = vma->vm_mm;
+ pmd_t *pmd = vmf->pmd;
+ unsigned long start, end, addr = vmf->address;
+ struct mmu_notifier_range range;
+ pmd_t cowed_entry;
+ pte_t *orig_dst_pte, *orig_src_pte;
+ pte_t *dst_pte, *src_pte;
+ spinlock_t *dst_ptl, *src_ptl;
+ int ret = 0;
+
+ /*
+ * Do nothing with the fault that doesn't have PTE yet
+ * (from lazy fork).
+ */
+ if (pmd_none(*pmd) || pmd_write(*pmd))
+ return 0;
+ /* COW PTE doesn't handle huge page. */
+ if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
+ return 0;
+
+ mmap_assert_write_locked(mm);
+
+ start = addr & PMD_MASK;
+ end = (addr + PMD_SIZE) & PMD_MASK;
+ addr = start;
+
+ mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
+ 0, vma, mm, start, end);
+ /*
+ * Because of the address range is PTE not only for the faulted
+ * vma, it might have some unmatch situations since mmu notifier
+ * will only reigster the faulted vma.
+ * Do we really need to care about this kind of unmatch?
+ */
+ mmu_notifier_invalidate_range_start(&range);
+ raw_write_seqcount_begin(&mm->write_protect_seq);
+
+ /*
+ * Fast path, check if we are the only one faulted task
+ * references to this COW-ed PTE, reuse it.
+ */
+ src_pte = pte_offset_map_lock(mm, pmd, addr, &src_ptl);
+ if (cow_pte_count(pmd) == 1) {
+ pmd_t new = pmd_mkwrite(*pmd);
+ set_pmd_at(mm, addr, pmd, new);
+ pte_unmap_unlock(src_pte, src_ptl);
+ goto flush_tlb;
+ }
+ pte_unmap_unlock(src_pte, src_ptl);
+
+ /*
+ * Slow path. Since we already did the accounting and still
+ * sharing the mapped pages, we can just clone PTE.
+ */
+
+ cowed_entry = READ_ONCE(*pmd);
+ /* Decrease the pgtable_bytes of COW-ed PTE. */
+ mm_dec_nr_ptes(mm);
+ pmd_clear(pmd);
+ orig_dst_pte = dst_pte = pte_alloc_map_lock(mm, pmd, addr, &dst_ptl);
+ if (unlikely(!dst_pte)) {
+ /* If allocation failed, restore COW-ed PTE. */
+ set_pmd_at(mm, addr, pmd, cowed_entry);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * We should hold the lock of COW-ed PTE until all the operations
+ * have been done, including duplicating, TLB flush, and decrease
+ * refcount.
+ */
+ src_pte = pte_offset_map_lock(mm, &cowed_entry, addr, &src_ptl);
+ orig_src_pte = src_pte;
+ arch_enter_lazy_mmu_mode();
+
+ do {
+ if (pte_none(*src_pte))
+ continue;
+ /*
+ * We should handled the most of cases in copy_cow_pte_range(),
+ * But, we cannot distinguish the vma is belong to parent or
+ * child, so we need to take care about it.
+ */
+ set_pte_at(mm, addr, dst_pte, *src_pte);
+ } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
+
+ arch_leave_lazy_mmu_mode();
+ pte_unmap_unlock(orig_dst_pte, dst_ptl);
+
+ /* Decrease the refcount of COW-ed PTE. */
+ if (!pmd_put_pte(&cowed_entry)) {
+ /* COW-ed (old) PTE's refcount is 1, reuse it. */
+ pgtable_t token = pmd_pgtable(*pmd);
+ /* Reuse COW-ed PTE. */
+ pmd_t new = pmd_mkwrite(cowed_entry);
+
+ /* Clear all the entries of new PTE. */
+ addr = start;
+ dst_pte = pte_offset_map_lock(mm, pmd, addr, &dst_ptl);
+ orig_dst_pte = dst_pte;
+ do {
+ if (pte_none(*dst_pte))
+ continue;
+ if (pte_present(*dst_pte))
+ page_table_check_pte_clear(mm, addr, *dst_pte);
+ pte_clear(mm, addr, dst_pte);
+ } while (dst_pte++, addr += PAGE_SIZE, addr != end);
+ pte_unmap_unlock(orig_dst_pte, dst_ptl);
+ /* Now, we can safely free new PTE. */
+ pmd_clear(pmd);
+ pte_free(mm, token);
+ /* Reuse COW-ed PTE */
+ set_pmd_at(mm, start, pmd, new);
+ }
+
+ pte_unmap_unlock(orig_src_pte, src_ptl);
+
+flush_tlb:
+ /*
+ * If we change the protection, flush TLB.
+ * flush_tlb_range() will only use vma to get mm, we don't need
+ * to consider the unmatch address range with vma problem here.
+ */
+ flush_tlb_range(vma, start, end);
+out:
+ raw_write_seqcount_end(&mm->write_protect_seq);
+ mmu_notifier_invalidate_range_end(&range);
+
+ return ret;
+}
+
+static inline int __break_cow_pte(struct vm_area_struct *vma, pmd_t *pmd,
+ unsigned long addr)
+{
+ struct vm_fault vmf = {
+ .vma = vma,
+ .address = addr & PAGE_MASK,
+ .pmd = pmd,
+ };
+
+ return handle_cow_pte_fault(&vmf);
+}
+
+/**
+ * break_cow_pte - duplicate/reuse shared, wprotected (COW-ed) PTE
+ * @vma: target vma want to break COW
+ * @pmd: pmd index that maps to the shared PTE
+ * @addr: the address trigger break COW PTE
+ *
+ * The address needs to be in the range of shared and write portected
+ * PTE that the pmd index mapped. If pmd is NULL, it will get the pmd
+ * from vma. Duplicate COW-ed PTE when some still mapping to it.
+ * Otherwise, reuse COW-ed PTE.
+ */
+int break_cow_pte(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr)
+{
+ struct mm_struct *mm;
+ pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
+
+ if (!vma)
+ return -EINVAL;
+ mm = vma->vm_mm;
+
+ if (!test_bit(MMF_COW_PTE, &mm->flags))
+ return 0;
+
+ if (!pmd) {
+ pgd = pgd_offset(mm, addr);
+ if (pgd_none_or_clear_bad(pgd))
+ return 0;
+ p4d = p4d_offset(pgd, addr);
+ if (p4d_none_or_clear_bad(p4d))
+ return 0;
+ pud = pud_offset(p4d, addr);
+ if (pud_none_or_clear_bad(pud))
+ return 0;
+ pmd = pmd_offset(pud, addr);
+ }
+
+ /* We will check the type of pmd entry later. */
+
+ return __break_cow_pte(vma, pmd, addr);
+}
+
+/**
+ * break_cow_pte_range - duplicate/reuse COW-ed PTE in a given range
+ * @vma: target vma want to break COW
+ * @start: the address of start breaking
+ * @end: the address of end breaking
+ *
+ * Return: zero on success, the number of failed otherwise.
+ */
+int break_cow_pte_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ unsigned long addr, next;
+ int nr_failed = 0;
+
+ if (!vma)
+ return -EINVAL;
+ if (range_in_vma(vma, start, end))
+ return -EINVAL;
+
+ addr = start;
+ do {
+ next = pmd_addr_end(addr, end);
+ if (break_cow_pte(vma, NULL, addr) < 0)
+ nr_failed++;
+ } while (addr = next, addr != end);
+
+ return nr_failed;
+}
+
/*
* These routines also need to handle stuff like marking pages dirty
* and/or accessed for architectures that don't do it in hardware (most
@@ -5355,8 +5643,27 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
return 0;
}
}
+ /*
+ * Duplicate COW-ed PTE when page fault will change the
+ * mapped pages (write or unshared fault) or COW-ed PTE
+ * (file mapped read fault, see do_read_fault()).
+ */
+ if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE) ||
+ vma->vm_ops) && test_bit(MMF_COW_PTE, &mm->flags)) {
+ ret = handle_cow_pte_fault(&vmf);
+ if (unlikely(ret == -ENOMEM))
+ return VM_FAULT_OOM;
+ }
}

+ /*
+ * It's definitely will break the kernel when refcount of PTE
+ * is higher than 1 and it is writeable in PMD entry. But we
+ * want to see more information so just warning here.
+ */
+ if (likely(!pmd_none(*vmf.pmd)))
+ VM_WARN_ON(cow_pte_count(vmf.pmd) > 1 && pmd_write(*vmf.pmd));
+
return handle_pte_fault(&vmf);
}

diff --git a/mm/mmap.c b/mm/mmap.c
index 74a84eb33b904..3eb9b852adc3b 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2208,6 +2208,10 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
return err;
}

+ err = break_cow_pte(vma, NULL, addr);
+ if (err)
+ return err;
+
new = vm_area_dup(vma);
if (!new)
return -ENOMEM;
diff --git a/mm/mremap.c b/mm/mremap.c
index e465ffe279bb0..b4136b12f24b6 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -534,6 +534,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
old_pmd = get_old_pmd(vma->vm_mm, old_addr);
if (!old_pmd)
continue;
+ /* TLB flush twice time here? */
+ break_cow_pte(vma, old_pmd, old_addr);
new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
if (!new_pmd)
break;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 72e481aacd5df..10af3e0a2eb5d 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1911,6 +1911,8 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
next = pmd_addr_end(addr, end);
if (pmd_none_or_trans_huge_or_clear_bad(pmd))
continue;
+ if (break_cow_pte(vma, pmd, addr) < 0)
+ return -ENOMEM;
ret = unuse_pte_range(vma, pmd, addr, next, type);
if (ret)
return ret;
--
2.37.3


2022-12-20 12:22:16

by kernel test robot

[permalink] [raw]
Subject: Re: [PATCH v3 03/14] mm: Add break COW PTE fault and helper functions

Hi Chih-En,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on v6.1-rc7]
[also build test ERROR on next-20221220]
[cannot apply to akpm-mm/mm-everything tip/perf/core acme/perf/core linus/master v6.1 v6.1-rc8]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url: https://github.com/intel-lab-lkp/linux/commits/Chih-En-Lin/Introduce-Copy-On-Write-to-Page-Table/20221220-153207
patch link: https://lore.kernel.org/r/20221220072743.3039060-4-shiyn.lin%40gmail.com
patch subject: [PATCH v3 03/14] mm: Add break COW PTE fault and helper functions
config: powerpc-allnoconfig
compiler: powerpc-linux-gcc (GCC) 12.1.0
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# https://github.com/intel-lab-lkp/linux/commit/18accecf2701f7705ada53938cc2005fa15cc063
git remote add linux-review https://github.com/intel-lab-lkp/linux
git fetch --no-tags linux-review Chih-En-Lin/Introduce-Copy-On-Write-to-Page-Table/20221220-153207
git checkout 18accecf2701f7705ada53938cc2005fa15cc063
# save the config file
mkdir build_dir && cp config build_dir/.config
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-12.1.0 make.cross W=1 O=build_dir ARCH=powerpc olddefconfig
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-12.1.0 make.cross W=1 O=build_dir ARCH=powerpc SHELL=/bin/bash

If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <[email protected]>

All errors (new ones prefixed by >>):

mm/memory.c: In function 'free_pmd_range':
>> mm/memory.c:255:53: error: implicit declaration of function 'pmd_mkwrite'; did you mean 'pmd_write'? [-Werror=implicit-function-declaration]
255 | pmd_t new = pmd_mkwrite(*pmd);
| ^~~~~~~~~~~
| pmd_write
>> mm/memory.c:255:53: error: invalid initializer
mm/memory.c:257:41: error: implicit declaration of function 'set_pmd_at'; did you mean 'set_pte_at'? [-Werror=implicit-function-declaration]
257 | set_pmd_at(tlb->mm, addr, pmd, new);
| ^~~~~~~~~~
| set_pte_at
>> mm/memory.c:266:33: error: implicit declaration of function 'flush_tlb_mm_range'; did you mean 'flush_tlb_range'? [-Werror=implicit-function-declaration]
266 | flush_tlb_mm_range(tlb->mm, addr, next,
| ^~~~~~~~~~~~~~~~~~
| flush_tlb_range
In file included from arch/powerpc/include/asm/page.h:331,
from arch/powerpc/include/asm/thread_info.h:13,
from include/linux/thread_info.h:60,
from arch/powerpc/include/asm/ptrace.h:328,
from arch/powerpc/include/asm/hw_irq.h:12,
from arch/powerpc/include/asm/irqflags.h:12,
from include/linux/irqflags.h:16,
from include/asm-generic/cmpxchg-local.h:6,
from arch/powerpc/include/asm/cmpxchg.h:526,
from arch/powerpc/include/asm/atomic.h:11,
from include/linux/atomic.h:7,
from include/linux/cpumask.h:13,
from include/linux/smp.h:13,
from include/linux/kernel_stat.h:5,
from mm/memory.c:42:
mm/memory.c: In function 'copy_cow_pte_range':
include/asm-generic/memory_model.h:18:33: error: initialization of 'pgtable_t' {aka 'long unsigned int *'} from incompatible pointer type 'struct page *' [-Werror=incompatible-pointer-types]
18 | #define __pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET))
| ^
include/asm-generic/memory_model.h:53:21: note: in expansion of macro '__pfn_to_page'
53 | #define pfn_to_page __pfn_to_page
| ^~~~~~~~~~~~~
arch/powerpc/include/asm/book3s/32/pgtable.h:363:33: note: in expansion of macro 'pfn_to_page'
363 | #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
| ^~~~~~~~~~~
mm/memory.c:1217:31: note: in expansion of macro 'pmd_page'
1217 | pgtable_t pte_table = pmd_page(*src_pmd);
| ^~~~~~~~
mm/memory.c:1329:48: error: implicit declaration of function 'pmd_wrprotect'; did you mean 'pte_wrprotect'? [-Werror=implicit-function-declaration]
1329 | set_pmd_at(dst_mm, orig_addr, dst_pmd, pmd_wrprotect(*src_pmd));
| ^~~~~~~~~~~~~
| pte_wrprotect
mm/memory.c: In function 'zap_pte_range':
mm/memory.c:1722:45: error: invalid initializer
1722 | pmd_t new = pmd_mkwrite(*pmd);
| ^~~~~~~~~~~
mm/memory.c: In function 'handle_cow_pte_fault':
mm/memory.c:5261:29: error: invalid initializer
5261 | pmd_t new = pmd_mkwrite(*pmd);
| ^~~~~~~~~~~
mm/memory.c:5313:29: error: invalid initializer
5313 | pmd_t new = pmd_mkwrite(cowed_entry);
| ^~~~~~~~~~~
cc1: some warnings being treated as errors


vim +255 mm/memory.c

229
230 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
231 unsigned long addr, unsigned long end,
232 unsigned long floor, unsigned long ceiling)
233 {
234 pmd_t *pmd;
235 unsigned long next;
236 unsigned long start;
237
238 start = addr;
239 pmd = pmd_offset(pud, addr);
240 do {
241 next = pmd_addr_end(addr, end);
242 /*
243 * For COW-ed PTE, the pte entries still mapping to pages.
244 * However, we should did de-accounting to all of it. So,
245 * even if the refcount is not the same as zapping, we
246 * could still fall back to normal PTE and handle it
247 * without traversing entries to do the de-accounting.
248 */
249 if (test_bit(MMF_COW_PTE, &tlb->mm->flags)) {
250 if (!pmd_none(*pmd) && !pmd_write(*pmd)) {
251 spinlock_t *ptl = pte_lockptr(tlb->mm, pmd);
252
253 spin_lock(ptl);
254 if (!pmd_put_pte(pmd)) {
> 255 pmd_t new = pmd_mkwrite(*pmd);
256
257 set_pmd_at(tlb->mm, addr, pmd, new);
258 spin_unlock(ptl);
259 free_pte_range(tlb, pmd, addr);
260 continue;
261 }
262 spin_unlock(ptl);
263
264 pmd_clear(pmd);
265 mm_dec_nr_ptes(tlb->mm);
> 266 flush_tlb_mm_range(tlb->mm, addr, next,
267 PAGE_SHIFT, false);
268 } else
269 VM_WARN_ON(cow_pte_count(pmd) != 1);
270 }
271 if (pmd_none_or_clear_bad(pmd))
272 continue;
273 free_pte_range(tlb, pmd, addr);
274 } while (pmd++, addr = next, addr != end);
275
276 start &= PUD_MASK;
277 if (start < floor)
278 return;
279 if (ceiling) {
280 ceiling &= PUD_MASK;
281 if (!ceiling)
282 return;
283 }
284 if (end - 1 > ceiling - 1)
285 return;
286
287 pmd = pmd_offset(pud, start);
288 pud_clear(pud);
289 pmd_free_tlb(tlb, pmd, start);
290 mm_dec_nr_pmds(tlb->mm);
291 }
292

--
0-DAY CI Kernel Test Service
https://01.org/lkp


Attachments:
(No filename) (7.89 kB)
config (31.40 kB)
Download all attachments