2023-06-29 21:00:27

by Axel Rasmussen

[permalink] [raw]
Subject: [PATCH v2 1/6] mm: userfaultfd: add new UFFDIO_POISON ioctl

The basic idea here is to "simulate" memory poisoning for VMs. A VM
running on some host might encounter a memory error, after which some
page(s) are poisoned (i.e., future accesses SIGBUS). They expect that
once poisoned, pages can never become "un-poisoned". So, when we live
migrate the VM, we need to preserve the poisoned status of these pages.

When live migrating, we try to get the guest running on its new host as
quickly as possible. So, we start it running before all memory has been
copied, and before we're certain which pages should be poisoned or not.

So the basic way to use this new feature is:

- On the new host, the guest's memory is registered with userfaultfd, in
either MISSING or MINOR mode (doesn't really matter for this purpose).
- On any first access, we get a userfaultfd event. At this point we can
communicate with the old host to find out if the page was poisoned.
- If so, we can respond with a UFFDIO_POISON - this places a swap marker
so any future accesses will SIGBUS. Because the pte is now "present",
future accesses won't generate more userfaultfd events, they'll just
SIGBUS directly.

UFFDIO_POISON does not handle unmapping previously-present PTEs. This
isn't needed, because during live migration we want to intercept
all accesses with userfaultfd (not just writes, so WP mode isn't useful
for this). So whether minor or missing mode is being used (or both), the
PTE won't be present in any case, so handling that case isn't needed.

Why return VM_FAULT_HWPOISON instead of VM_FAULT_SIGBUS when one of
these markers is encountered? For "normal" userspace programs there
isn't a big difference, both yield a SIGBUS. The difference for KVM is
key though: VM_FAULT_HWPOISON will result in an MCE being injected into
the guest (which is the behavior we want). With VM_FAULT_SIGBUS, the
hypervisor would need to catch the SIGBUS and deal with the MCE
injection itself.

Signed-off-by: Axel Rasmussen <[email protected]>
---
fs/userfaultfd.c | 63 ++++++++++++++++++++++++++++++++
include/linux/swapops.h | 3 +-
include/linux/userfaultfd_k.h | 4 ++
include/uapi/linux/userfaultfd.h | 25 +++++++++++--
mm/memory.c | 4 ++
mm/userfaultfd.c | 62 ++++++++++++++++++++++++++++++-
6 files changed, 156 insertions(+), 5 deletions(-)

diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 7cecd49e078b..c26a883399c9 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -1965,6 +1965,66 @@ static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg)
return ret;
}

+static inline int userfaultfd_poison(struct userfaultfd_ctx *ctx, unsigned long arg)
+{
+ __s64 ret;
+ struct uffdio_poison uffdio_poison;
+ struct uffdio_poison __user *user_uffdio_poison;
+ struct userfaultfd_wake_range range;
+
+ user_uffdio_poison = (struct uffdio_poison __user *)arg;
+
+ ret = -EAGAIN;
+ if (atomic_read(&ctx->mmap_changing))
+ goto out;
+
+ ret = -EFAULT;
+ if (copy_from_user(&uffdio_poison, user_uffdio_poison,
+ /* don't copy the output fields */
+ sizeof(uffdio_poison) - (sizeof(__s64))))
+ goto out;
+
+ ret = validate_range(ctx->mm, uffdio_poison.range.start,
+ uffdio_poison.range.len);
+ if (ret)
+ goto out;
+
+ ret = -EINVAL;
+ /* double check for wraparound just in case. */
+ if (uffdio_poison.range.start + uffdio_poison.range.len <=
+ uffdio_poison.range.start) {
+ goto out;
+ }
+ if (uffdio_poison.mode & ~UFFDIO_POISON_MODE_DONTWAKE)
+ goto out;
+
+ if (mmget_not_zero(ctx->mm)) {
+ ret = mfill_atomic_poison(ctx->mm, uffdio_poison.range.start,
+ uffdio_poison.range.len,
+ &ctx->mmap_changing, 0);
+ mmput(ctx->mm);
+ } else {
+ return -ESRCH;
+ }
+
+ if (unlikely(put_user(ret, &user_uffdio_poison->updated)))
+ return -EFAULT;
+ if (ret < 0)
+ goto out;
+
+ /* len == 0 would wake all */
+ BUG_ON(!ret);
+ range.len = ret;
+ if (!(uffdio_poison.mode & UFFDIO_POISON_MODE_DONTWAKE)) {
+ range.start = uffdio_poison.range.start;
+ wake_userfault(ctx, &range);
+ }
+ ret = range.len == uffdio_poison.range.len ? 0 : -EAGAIN;
+
+out:
+ return ret;
+}
+
static inline unsigned int uffd_ctx_features(__u64 user_features)
{
/*
@@ -2066,6 +2126,9 @@ static long userfaultfd_ioctl(struct file *file, unsigned cmd,
case UFFDIO_CONTINUE:
ret = userfaultfd_continue(ctx, arg);
break;
+ case UFFDIO_POISON:
+ ret = userfaultfd_poison(ctx, arg);
+ break;
}
return ret;
}
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 4c932cb45e0b..8259fee32421 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -394,7 +394,8 @@ typedef unsigned long pte_marker;

#define PTE_MARKER_UFFD_WP BIT(0)
#define PTE_MARKER_SWAPIN_ERROR BIT(1)
-#define PTE_MARKER_MASK (BIT(2) - 1)
+#define PTE_MARKER_UFFD_POISON BIT(2)
+#define PTE_MARKER_MASK (BIT(3) - 1)

static inline swp_entry_t make_pte_marker_entry(pte_marker marker)
{
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index ac7b0c96d351..ac8c6854097c 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -46,6 +46,7 @@ enum mfill_atomic_mode {
MFILL_ATOMIC_COPY,
MFILL_ATOMIC_ZEROPAGE,
MFILL_ATOMIC_CONTINUE,
+ MFILL_ATOMIC_POISON,
NR_MFILL_ATOMIC_MODES,
};

@@ -83,6 +84,9 @@ extern ssize_t mfill_atomic_zeropage(struct mm_struct *dst_mm,
extern ssize_t mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long dst_start,
unsigned long len, atomic_t *mmap_changing,
uffd_flags_t flags);
+extern ssize_t mfill_atomic_poison(struct mm_struct *dst_mm, unsigned long start,
+ unsigned long len, atomic_t *mmap_changing,
+ uffd_flags_t flags);
extern int mwriteprotect_range(struct mm_struct *dst_mm,
unsigned long start, unsigned long len,
bool enable_wp, atomic_t *mmap_changing);
diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h
index 66dd4cd277bd..62151706c5a3 100644
--- a/include/uapi/linux/userfaultfd.h
+++ b/include/uapi/linux/userfaultfd.h
@@ -39,7 +39,8 @@
UFFD_FEATURE_MINOR_SHMEM | \
UFFD_FEATURE_EXACT_ADDRESS | \
UFFD_FEATURE_WP_HUGETLBFS_SHMEM | \
- UFFD_FEATURE_WP_UNPOPULATED)
+ UFFD_FEATURE_WP_UNPOPULATED | \
+ UFFD_FEATURE_POISON)
#define UFFD_API_IOCTLS \
((__u64)1 << _UFFDIO_REGISTER | \
(__u64)1 << _UFFDIO_UNREGISTER | \
@@ -49,12 +50,14 @@
(__u64)1 << _UFFDIO_COPY | \
(__u64)1 << _UFFDIO_ZEROPAGE | \
(__u64)1 << _UFFDIO_WRITEPROTECT | \
- (__u64)1 << _UFFDIO_CONTINUE)
+ (__u64)1 << _UFFDIO_CONTINUE | \
+ (__u64)1 << _UFFDIO_POISON)
#define UFFD_API_RANGE_IOCTLS_BASIC \
((__u64)1 << _UFFDIO_WAKE | \
(__u64)1 << _UFFDIO_COPY | \
+ (__u64)1 << _UFFDIO_WRITEPROTECT | \
(__u64)1 << _UFFDIO_CONTINUE | \
- (__u64)1 << _UFFDIO_WRITEPROTECT)
+ (__u64)1 << _UFFDIO_POISON)

/*
* Valid ioctl command number range with this API is from 0x00 to
@@ -71,6 +74,7 @@
#define _UFFDIO_ZEROPAGE (0x04)
#define _UFFDIO_WRITEPROTECT (0x06)
#define _UFFDIO_CONTINUE (0x07)
+#define _UFFDIO_POISON (0x08)
#define _UFFDIO_API (0x3F)

/* userfaultfd ioctl ids */
@@ -91,6 +95,8 @@
struct uffdio_writeprotect)
#define UFFDIO_CONTINUE _IOWR(UFFDIO, _UFFDIO_CONTINUE, \
struct uffdio_continue)
+#define UFFDIO_POISON _IOWR(UFFDIO, _UFFDIO_POISON, \
+ struct uffdio_poison)

/* read() structure */
struct uffd_msg {
@@ -225,6 +231,7 @@ struct uffdio_api {
#define UFFD_FEATURE_EXACT_ADDRESS (1<<11)
#define UFFD_FEATURE_WP_HUGETLBFS_SHMEM (1<<12)
#define UFFD_FEATURE_WP_UNPOPULATED (1<<13)
+#define UFFD_FEATURE_POISON (1<<14)
__u64 features;

__u64 ioctls;
@@ -321,6 +328,18 @@ struct uffdio_continue {
__s64 mapped;
};

+struct uffdio_poison {
+ struct uffdio_range range;
+#define UFFDIO_POISON_MODE_DONTWAKE ((__u64)1<<0)
+ __u64 mode;
+
+ /*
+ * Fields below here are written by the ioctl and must be at the end:
+ * the copy_from_user will not read past here.
+ */
+ __s64 updated;
+};
+
/*
* Flags for the userfaultfd(2) system call itself.
*/
diff --git a/mm/memory.c b/mm/memory.c
index d8a9a770b1f1..7fbda39e060d 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3692,6 +3692,10 @@ static vm_fault_t handle_pte_marker(struct vm_fault *vmf)
if (WARN_ON_ONCE(!marker))
return VM_FAULT_SIGBUS;

+ /* Poison emulation explicitly requested for this PTE. */
+ if (marker & PTE_MARKER_UFFD_POISON)
+ return VM_FAULT_HWPOISON;
+
/* Higher priority than uffd-wp when data corrupted */
if (marker & PTE_MARKER_SWAPIN_ERROR)
return VM_FAULT_SIGBUS;
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index a2bf37ee276d..87b62ca1e09e 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -286,6 +286,51 @@ static int mfill_atomic_pte_continue(pmd_t *dst_pmd,
goto out;
}

+/* Handles UFFDIO_POISON for all non-hugetlb VMAs. */
+static int mfill_atomic_pte_poison(pmd_t *dst_pmd,
+ struct vm_area_struct *dst_vma,
+ unsigned long dst_addr,
+ uffd_flags_t flags)
+{
+ int ret;
+ struct mm_struct *dst_mm = dst_vma->vm_mm;
+ pte_t _dst_pte, *dst_pte;
+ spinlock_t *ptl;
+
+ _dst_pte = make_pte_marker(PTE_MARKER_UFFD_POISON);
+ dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
+
+ if (vma_is_shmem(dst_vma)) {
+ struct inode *inode;
+ pgoff_t offset, max_off;
+
+ /* serialize against truncate with the page table lock */
+ inode = dst_vma->vm_file->f_inode;
+ offset = linear_page_index(dst_vma, dst_addr);
+ max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
+ ret = -EFAULT;
+ if (unlikely(offset >= max_off))
+ goto out_unlock;
+ }
+
+ ret = -EEXIST;
+ /*
+ * For now, we don't handle unmapping pages, so only support filling in
+ * none PTEs, or replacing PTE markers.
+ */
+ if (!pte_none_mostly(*dst_pte))
+ goto out_unlock;
+
+ set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
+
+ /* No need to invalidate - it was non-present before */
+ update_mmu_cache(dst_vma, dst_addr, dst_pte);
+ ret = 0;
+out_unlock:
+ pte_unmap_unlock(dst_pte, ptl);
+ return ret;
+}
+
static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
{
pgd_t *pgd;
@@ -336,8 +381,12 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
* supported by hugetlb. A PMD_SIZE huge pages may exist as used
* by THP. Since we can not reliably insert a zero page, this
* feature is not supported.
+ *
+ * PTE marker handling for hugetlb is a bit special, so for now
+ * UFFDIO_POISON is not supported.
*/
- if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE)) {
+ if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE) ||
+ uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
mmap_read_unlock(dst_mm);
return -EINVAL;
}
@@ -481,6 +530,9 @@ static __always_inline ssize_t mfill_atomic_pte(pmd_t *dst_pmd,
if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) {
return mfill_atomic_pte_continue(dst_pmd, dst_vma,
dst_addr, flags);
+ } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
+ return mfill_atomic_pte_poison(dst_pmd, dst_vma,
+ dst_addr, flags);
}

/*
@@ -702,6 +754,14 @@ ssize_t mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long start,
uffd_flags_set_mode(flags, MFILL_ATOMIC_CONTINUE));
}

+ssize_t mfill_atomic_poison(struct mm_struct *dst_mm, unsigned long start,
+ unsigned long len, atomic_t *mmap_changing,
+ uffd_flags_t flags)
+{
+ return mfill_atomic(dst_mm, start, 0, len, mmap_changing,
+ uffd_flags_set_mode(flags, MFILL_ATOMIC_POISON));
+}
+
long uffd_wp_range(struct vm_area_struct *dst_vma,
unsigned long start, unsigned long len, bool enable_wp)
{
--
2.41.0.255.g8b1d071c50-goog



2023-06-29 21:01:20

by Axel Rasmussen

[permalink] [raw]
Subject: [PATCH v2 3/6] mm: userfaultfd: support UFFDIO_POISON for hugetlbfs

The behavior here is the same as it is for anon/shmem. This is done
separately because hugetlb pte marker handling is a bit different.

Signed-off-by: Axel Rasmussen <[email protected]>
---
mm/hugetlb.c | 33 +++++++++++++++++++++++++++++++--
mm/userfaultfd.c | 6 +-----
2 files changed, 32 insertions(+), 7 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 38711d49e4db..05abe88986b6 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -6090,14 +6090,24 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
}

entry = huge_ptep_get(ptep);
- /* PTE markers should be handled the same way as none pte */
- if (huge_pte_none_mostly(entry))
+ if (huge_pte_none_mostly(entry)) {
+ if (is_pte_marker(entry)) {
+ unsigned long marker = pte_marker_get(pte_to_swp_entry(entry));
+
+ if (marker & PTE_MARKER_UFFD_POISON) {
+ ret = VM_FAULT_HWPOISON_LARGE;
+ goto out_mutex;
+ }
+ }
/*
+ * Other PTE markers should be handled the same way as none PTE.
+ *
* hugetlb_no_page will drop vma lock and hugetlb fault
* mutex internally, which make us return immediately.
*/
return hugetlb_no_page(mm, vma, mapping, idx, address, ptep,
entry, flags);
+ }

ret = 0;

@@ -6253,6 +6263,25 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
int writable;
bool folio_in_pagecache = false;

+ if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
+ ptl = huge_pte_lock(h, dst_mm, dst_pte);
+
+ /* Don't overwrite any existing PTEs (even markers) */
+ if (!huge_pte_none(huge_ptep_get(dst_pte))) {
+ spin_unlock(ptl);
+ return -EEXIST;
+ }
+
+ _dst_pte = make_pte_marker(PTE_MARKER_UFFD_POISON);
+ set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
+
+ /* No need to invalidate - it was non-present before */
+ update_mmu_cache(dst_vma, dst_addr, dst_pte);
+
+ spin_unlock(ptl);
+ return 0;
+ }
+
if (is_continue) {
ret = -EFAULT;
folio = filemap_lock_folio(mapping, idx);
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 87b62ca1e09e..4436cae1c7a8 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -381,12 +381,8 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
* supported by hugetlb. A PMD_SIZE huge pages may exist as used
* by THP. Since we can not reliably insert a zero page, this
* feature is not supported.
- *
- * PTE marker handling for hugetlb is a bit special, so for now
- * UFFDIO_POISON is not supported.
*/
- if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE) ||
- uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
+ if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE)) {
mmap_read_unlock(dst_mm);
return -EINVAL;
}
--
2.41.0.255.g8b1d071c50-goog


2023-06-29 21:02:31

by Axel Rasmussen

[permalink] [raw]
Subject: [PATCH v2 2/6] mm: userfaultfd: refactor hugetlb folio allocation / lookup code

At the top of `hugetlb_mfill_atomic_pte`, we need to get the folio we're
going to be mapping. There are three basic cases we're dealing with
here:

1. We're doing a UFFDIO_CONTINUE, in which case we lookup an existing
folio in the pagecache, instead of allocating a new one.
2. We need to allocate a new folio.
3. We previously failed while populating our new folio, so we "returned"
a temporary folio using `foliop` and had our caller retry.

In a future commit I'm going to add a fourth case for UFFDIO_POISON,
where we aren't going to map a folio at all (newly allocated or
otherwise). This end state will be simpler, and we can re-use a bit more
code, if we stop using `if (...)` to distinguish the cases.

So, refactor the cases so they share most of the same code, and instead
switch to `goto` to skip some parts depending on the case at hand.

Signed-off-by: Axel Rasmussen <[email protected]>
---
mm/hugetlb.c | 53 +++++++++++++++++++++++++---------------------------
1 file changed, 25 insertions(+), 28 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index bce28cca73a1..38711d49e4db 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -6259,22 +6259,32 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
if (IS_ERR(folio))
goto out;
folio_in_pagecache = true;
- } else if (!*foliop) {
- /* If a folio already exists, then it's UFFDIO_COPY for
- * a non-missing case. Return -EEXIST.
- */
- if (vm_shared &&
- hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
- ret = -EEXIST;
- goto out;
+ goto ready;
+ }
+
+ /* If a folio already exists, then it's UFFDIO_COPY for
+ * a non-missing case. Return -EEXIST.
+ */
+ if (vm_shared && hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
+ ret = -EEXIST;
+ if (*foliop) {
+ folio_put(*foliop);
+ *foliop = NULL;
}
+ goto out;
+ }

- folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0);
- if (IS_ERR(folio)) {
- ret = -ENOMEM;
- goto out;
+ folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0);
+ if (IS_ERR(folio)) {
+ ret = -ENOMEM;
+ if (*foliop) {
+ folio_put(*foliop);
+ *foliop = NULL;
}
+ goto out;
+ }

+ if (!*foliop) {
ret = copy_folio_from_user(folio, (const void __user *) src_addr,
false);

@@ -6302,22 +6312,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
*/
goto out;
}
- } else {
- if (vm_shared &&
- hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
- folio_put(*foliop);
- ret = -EEXIST;
- *foliop = NULL;
- goto out;
- }
-
- folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0);
- if (IS_ERR(folio)) {
- folio_put(*foliop);
- ret = -ENOMEM;
- *foliop = NULL;
- goto out;
- }
+ } else { /* Caller retried because we set *foliop previously */
ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma);
folio_put(*foliop);
*foliop = NULL;
@@ -6327,6 +6322,8 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
}
}

+ready: /* `folio` ready to map (non-NULL, populated) */
+
/*
* The memory barrier inside __folio_mark_uptodate makes sure that
* preceding stores to the page contents become visible before
--
2.41.0.255.g8b1d071c50-goog


2023-06-29 21:06:06

by Axel Rasmussen

[permalink] [raw]
Subject: [PATCH v2 4/6] selftests/mm: refactor uffd_poll_thread to allow custom fault handlers

Previously, we had "one fault handler to rule them all", which used
several branches to deal with all of the scenarios required by all of
the various tests.

In upcoming patches, I plan to add a new test, which has its own
slightly different fault handling logic. Instead of continuing to add
cruft to the existing fault handler, let's allow tests to define custom
ones, separate from other tests.

Signed-off-by: Axel Rasmussen <[email protected]>
---
tools/testing/selftests/mm/uffd-common.c | 5 ++++-
tools/testing/selftests/mm/uffd-common.h | 3 +++
tools/testing/selftests/mm/uffd-stress.c | 12 +++++++-----
3 files changed, 14 insertions(+), 6 deletions(-)

diff --git a/tools/testing/selftests/mm/uffd-common.c b/tools/testing/selftests/mm/uffd-common.c
index ba20d7504022..02b89860e193 100644
--- a/tools/testing/selftests/mm/uffd-common.c
+++ b/tools/testing/selftests/mm/uffd-common.c
@@ -499,6 +499,9 @@ void *uffd_poll_thread(void *arg)
int ret;
char tmp_chr;

+ if (!args->handle_fault)
+ args->handle_fault = uffd_handle_page_fault;
+
pollfd[0].fd = uffd;
pollfd[0].events = POLLIN;
pollfd[1].fd = pipefd[cpu*2];
@@ -527,7 +530,7 @@ void *uffd_poll_thread(void *arg)
err("unexpected msg event %u\n", msg.event);
break;
case UFFD_EVENT_PAGEFAULT:
- uffd_handle_page_fault(&msg, args);
+ args->handle_fault(&msg, args);
break;
case UFFD_EVENT_FORK:
close(uffd);
diff --git a/tools/testing/selftests/mm/uffd-common.h b/tools/testing/selftests/mm/uffd-common.h
index 197f5262fe0d..7c4fa964c3b0 100644
--- a/tools/testing/selftests/mm/uffd-common.h
+++ b/tools/testing/selftests/mm/uffd-common.h
@@ -77,6 +77,9 @@ struct uffd_args {
unsigned long missing_faults;
unsigned long wp_faults;
unsigned long minor_faults;
+
+ /* A custom fault handler; defaults to uffd_handle_page_fault. */
+ void (*handle_fault)(struct uffd_msg *msg, struct uffd_args *args);
};

struct uffd_test_ops {
diff --git a/tools/testing/selftests/mm/uffd-stress.c b/tools/testing/selftests/mm/uffd-stress.c
index 995ff13e74c7..50b1224d72c7 100644
--- a/tools/testing/selftests/mm/uffd-stress.c
+++ b/tools/testing/selftests/mm/uffd-stress.c
@@ -189,10 +189,8 @@ static int stress(struct uffd_args *args)
locking_thread, (void *)cpu))
return 1;
if (bounces & BOUNCE_POLL) {
- if (pthread_create(&uffd_threads[cpu], &attr,
- uffd_poll_thread,
- (void *)&args[cpu]))
- return 1;
+ if (pthread_create(&uffd_threads[cpu], &attr, uffd_poll_thread, &args[cpu]))
+ err("uffd_poll_thread create");
} else {
if (pthread_create(&uffd_threads[cpu], &attr,
uffd_read_thread,
@@ -247,9 +245,13 @@ static int userfaultfd_stress(void)
{
void *area;
unsigned long nr;
- struct uffd_args args[nr_cpus];
+ struct uffd_args *args;
uint64_t mem_size = nr_pages * page_size;

+ args = calloc(nr_cpus, sizeof(struct uffd_args));
+ if (!args)
+ err("allocating args array failed");
+
if (uffd_test_ctx_init(UFFD_FEATURE_WP_UNPOPULATED, NULL))
err("context init failed");

--
2.41.0.255.g8b1d071c50-goog


2023-06-29 21:08:26

by Axel Rasmussen

[permalink] [raw]
Subject: [PATCH v2 6/6] mm: userfaultfd: add basic documentation for UFFDIO_POISON

Just describe the feature at a really basic level.

Signed-off-by: Axel Rasmussen <[email protected]>
---
Documentation/admin-guide/mm/userfaultfd.rst | 15 +++++++++++++++
1 file changed, 15 insertions(+)

diff --git a/Documentation/admin-guide/mm/userfaultfd.rst b/Documentation/admin-guide/mm/userfaultfd.rst
index 7c304e432205..b19053436369 100644
--- a/Documentation/admin-guide/mm/userfaultfd.rst
+++ b/Documentation/admin-guide/mm/userfaultfd.rst
@@ -244,6 +244,21 @@ write-protected (so future writes will also result in a WP fault). These ioctls
support a mode flag (``UFFDIO_COPY_MODE_WP`` or ``UFFDIO_CONTINUE_MODE_WP``
respectively) to configure the mapping this way.

+Memory Poisioning Emulation
+---------------------------
+
+In response to a fault (either missing or minor), an action userspace can
+take to "resolve" it is to issue a ``UFFDIO_POISON``. This will cause any
+future faulters to either get a SIGBUS, or in KVM's case the guest will
+receive an MCE as if there were hardware memory poisoning.
+
+This is used to emulate hardware memory poisoning. Imagine a VM running on a
+machine which experiences a real hardware memory error. Later, we live migrate
+the VM to another physical machine. Since we want the migration to be
+transparent to the guest, we want that same address range to act as if it was
+still poisoned, even though it's on a new physical host which ostentisbly
+doesn't have a memory error in the exact same spot.
+
QEMU/KVM
========

--
2.41.0.255.g8b1d071c50-goog


2023-06-29 21:21:07

by Axel Rasmussen

[permalink] [raw]
Subject: [PATCH v2 5/6] selftests/mm: add uffd unit test for UFFDIO_POISON

The test is pretty basic, and exercises UFFDIO_POISON straightforwardly.
We register a region with userfaultfd, in missing fault mode. For each
fault, we either UFFDIO_COPY a zeroed page (odd pages) or UFFDIO_POISON
(even pages). We do this mix to test "something like a real use case",
where guest memory would be some mix of poisoned and non-poisoned pages.

We read each page in the region, and assert that the odd pages are
zeroed as expected, and the even pages yield a SIGBUS as expected.

Why UFFDIO_COPY instead of UFFDIO_ZEROPAGE? Because hugetlb doesn't
support UFFDIO_ZEROPAGE, and we don't want to have special case code.

Signed-off-by: Axel Rasmussen <[email protected]>
---
tools/testing/selftests/mm/uffd-unit-tests.c | 117 +++++++++++++++++++
1 file changed, 117 insertions(+)

diff --git a/tools/testing/selftests/mm/uffd-unit-tests.c b/tools/testing/selftests/mm/uffd-unit-tests.c
index 04d91f144d1c..2709a34a39c5 100644
--- a/tools/testing/selftests/mm/uffd-unit-tests.c
+++ b/tools/testing/selftests/mm/uffd-unit-tests.c
@@ -951,6 +951,117 @@ static void uffd_zeropage_test(uffd_test_args_t *args)
uffd_test_pass();
}

+static void uffd_register_poison(int uffd, void *addr, uint64_t len)
+{
+ uint64_t ioctls = 0;
+ uint64_t expected = (1 << _UFFDIO_COPY) | (1 << _UFFDIO_POISON);
+
+ if (uffd_register_with_ioctls(uffd, addr, len, true,
+ false, false, &ioctls))
+ err("poison register fail");
+
+ if ((ioctls & expected) != expected)
+ err("registered area doesn't support COPY and POISON ioctls");
+}
+
+static void do_uffdio_poison(int uffd, unsigned long offset)
+{
+ struct uffdio_poison uffdio_poison = { 0 };
+ int ret;
+ __s64 res;
+
+ uffdio_poison.range.start = (unsigned long) area_dst + offset;
+ uffdio_poison.range.len = page_size;
+ uffdio_poison.mode = 0;
+ ret = ioctl(uffd, UFFDIO_POISON, &uffdio_poison);
+ res = uffdio_poison.updated;
+
+ if (ret)
+ err("UFFDIO_POISON error: %"PRId64, (int64_t)res);
+ else if (res != page_size)
+ err("UFFDIO_POISON unexpected size: %"PRId64, (int64_t)res);
+}
+
+static void uffd_poison_handle_fault(
+ struct uffd_msg *msg, struct uffd_args *args)
+{
+ unsigned long offset;
+
+ if (msg->event != UFFD_EVENT_PAGEFAULT)
+ err("unexpected msg event %u", msg->event);
+
+ if (msg->arg.pagefault.flags &
+ (UFFD_PAGEFAULT_FLAG_WP | UFFD_PAGEFAULT_FLAG_MINOR))
+ err("unexpected fault type %llu", msg->arg.pagefault.flags);
+
+ offset = (char *)(unsigned long)msg->arg.pagefault.address - area_dst;
+ offset &= ~(page_size-1);
+
+ /* Odd pages -> copy zeroed page; even pages -> poison. */
+ if (offset & page_size)
+ copy_page(uffd, offset, false);
+ else
+ do_uffdio_poison(uffd, offset);
+}
+
+static void uffd_poison_test(uffd_test_args_t *targs)
+{
+ pthread_t uffd_mon;
+ char c;
+ struct uffd_args args = { 0 };
+ struct sigaction act = { 0 };
+ unsigned long nr_sigbus = 0;
+ unsigned long nr;
+
+ fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK);
+
+ uffd_register_poison(uffd, area_dst, nr_pages * page_size);
+ memset(area_src, 0, nr_pages * page_size);
+
+ args.handle_fault = uffd_poison_handle_fault;
+ if (pthread_create(&uffd_mon, NULL, uffd_poll_thread, &args))
+ err("uffd_poll_thread create");
+
+ sigbuf = &jbuf;
+ act.sa_sigaction = sighndl;
+ act.sa_flags = SA_SIGINFO;
+ if (sigaction(SIGBUS, &act, 0))
+ err("sigaction");
+
+ for (nr = 0; nr < nr_pages; ++nr) {
+ unsigned long offset = nr * page_size;
+ const char *bytes = (const char *) area_dst + offset;
+ const char *i;
+
+ if (sigsetjmp(*sigbuf, 1)) {
+ /*
+ * Access below triggered a SIGBUS, which was caught by
+ * sighndl, which then jumped here. Count this SIGBUS,
+ * and move on to next page.
+ */
+ ++nr_sigbus;
+ continue;
+ }
+
+ for (i = bytes; i < bytes + page_size; ++i) {
+ if (*i)
+ err("nonzero byte in area_dst (%p) at %p: %u",
+ area_dst, i, *i);
+ }
+ }
+
+ if (write(pipefd[1], &c, sizeof(c)) != sizeof(c))
+ err("pipe write");
+ if (pthread_join(uffd_mon, NULL))
+ err("pthread_join()");
+
+ if (nr_sigbus != nr_pages / 2)
+ err("expected to receive %lu SIGBUS, actually received %lu",
+ nr_pages / 2, nr_sigbus);
+
+ uffd_test_pass();
+}
+
/*
* Test the returned uffdio_register.ioctls with different register modes.
* Note that _UFFDIO_ZEROPAGE is tested separately in the zeropage test.
@@ -1126,6 +1237,12 @@ uffd_test_case_t uffd_tests[] = {
UFFD_FEATURE_PAGEFAULT_FLAG_WP |
UFFD_FEATURE_WP_HUGETLBFS_SHMEM,
},
+ {
+ .name = "poison",
+ .uffd_fn = uffd_poison_test,
+ .mem_targets = MEM_ALL,
+ .uffd_feature_required = UFFD_FEATURE_POISON,
+ },
};

static void usage(const char *prog)
--
2.41.0.255.g8b1d071c50-goog


2023-06-29 21:33:34

by Randy Dunlap

[permalink] [raw]
Subject: Re: [PATCH v2 6/6] mm: userfaultfd: add basic documentation for UFFDIO_POISON

typo below.

On 6/29/23 13:50, Axel Rasmussen wrote:
> Just describe the feature at a really basic level.
>
> Signed-off-by: Axel Rasmussen <[email protected]>
> ---
> Documentation/admin-guide/mm/userfaultfd.rst | 15 +++++++++++++++
> 1 file changed, 15 insertions(+)
>
> diff --git a/Documentation/admin-guide/mm/userfaultfd.rst b/Documentation/admin-guide/mm/userfaultfd.rst
> index 7c304e432205..b19053436369 100644
> --- a/Documentation/admin-guide/mm/userfaultfd.rst
> +++ b/Documentation/admin-guide/mm/userfaultfd.rst
> @@ -244,6 +244,21 @@ write-protected (so future writes will also result in a WP fault). These ioctls
> support a mode flag (``UFFDIO_COPY_MODE_WP`` or ``UFFDIO_CONTINUE_MODE_WP``
> respectively) to configure the mapping this way.
>
> +Memory Poisioning Emulation
> +---------------------------
> +
> +In response to a fault (either missing or minor), an action userspace can
> +take to "resolve" it is to issue a ``UFFDIO_POISON``. This will cause any
> +future faulters to either get a SIGBUS, or in KVM's case the guest will
> +receive an MCE as if there were hardware memory poisoning.
> +
> +This is used to emulate hardware memory poisoning. Imagine a VM running on a
> +machine which experiences a real hardware memory error. Later, we live migrate
> +the VM to another physical machine. Since we want the migration to be
> +transparent to the guest, we want that same address range to act as if it was
> +still poisoned, even though it's on a new physical host which ostentisbly

ostensibly

> +doesn't have a memory error in the exact same spot.
> +
> QEMU/KVM
> ========
>

--
~Randy

2023-07-04 20:45:50

by Peter Xu

[permalink] [raw]
Subject: Re: [PATCH v2 1/6] mm: userfaultfd: add new UFFDIO_POISON ioctl

On Thu, Jun 29, 2023 at 01:50:35PM -0700, Axel Rasmussen wrote:
> The basic idea here is to "simulate" memory poisoning for VMs. A VM
> running on some host might encounter a memory error, after which some
> page(s) are poisoned (i.e., future accesses SIGBUS). They expect that
> once poisoned, pages can never become "un-poisoned". So, when we live
> migrate the VM, we need to preserve the poisoned status of these pages.
>
> When live migrating, we try to get the guest running on its new host as
> quickly as possible. So, we start it running before all memory has been
> copied, and before we're certain which pages should be poisoned or not.
>
> So the basic way to use this new feature is:
>
> - On the new host, the guest's memory is registered with userfaultfd, in
> either MISSING or MINOR mode (doesn't really matter for this purpose).
> - On any first access, we get a userfaultfd event. At this point we can
> communicate with the old host to find out if the page was poisoned.
> - If so, we can respond with a UFFDIO_POISON - this places a swap marker
> so any future accesses will SIGBUS. Because the pte is now "present",
> future accesses won't generate more userfaultfd events, they'll just
> SIGBUS directly.
>
> UFFDIO_POISON does not handle unmapping previously-present PTEs. This
> isn't needed, because during live migration we want to intercept
> all accesses with userfaultfd (not just writes, so WP mode isn't useful
> for this). So whether minor or missing mode is being used (or both), the
> PTE won't be present in any case, so handling that case isn't needed.
>
> Why return VM_FAULT_HWPOISON instead of VM_FAULT_SIGBUS when one of
> these markers is encountered? For "normal" userspace programs there
> isn't a big difference, both yield a SIGBUS. The difference for KVM is
> key though: VM_FAULT_HWPOISON will result in an MCE being injected into
> the guest (which is the behavior we want). With VM_FAULT_SIGBUS, the
> hypervisor would need to catch the SIGBUS and deal with the MCE
> injection itself.
>
> Signed-off-by: Axel Rasmussen <[email protected]>

Maybe have a cover letter for the next version?

> ---
> fs/userfaultfd.c | 63 ++++++++++++++++++++++++++++++++
> include/linux/swapops.h | 3 +-
> include/linux/userfaultfd_k.h | 4 ++
> include/uapi/linux/userfaultfd.h | 25 +++++++++++--
> mm/memory.c | 4 ++
> mm/userfaultfd.c | 62 ++++++++++++++++++++++++++++++-
> 6 files changed, 156 insertions(+), 5 deletions(-)
>
> diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
> index 7cecd49e078b..c26a883399c9 100644
> --- a/fs/userfaultfd.c
> +++ b/fs/userfaultfd.c
> @@ -1965,6 +1965,66 @@ static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg)
> return ret;
> }
>
> +static inline int userfaultfd_poison(struct userfaultfd_ctx *ctx, unsigned long arg)
> +{
> + __s64 ret;
> + struct uffdio_poison uffdio_poison;
> + struct uffdio_poison __user *user_uffdio_poison;
> + struct userfaultfd_wake_range range;
> +
> + user_uffdio_poison = (struct uffdio_poison __user *)arg;
> +
> + ret = -EAGAIN;
> + if (atomic_read(&ctx->mmap_changing))
> + goto out;
> +
> + ret = -EFAULT;
> + if (copy_from_user(&uffdio_poison, user_uffdio_poison,
> + /* don't copy the output fields */
> + sizeof(uffdio_poison) - (sizeof(__s64))))
> + goto out;
> +
> + ret = validate_range(ctx->mm, uffdio_poison.range.start,
> + uffdio_poison.range.len);
> + if (ret)
> + goto out;
> +
> + ret = -EINVAL;
> + /* double check for wraparound just in case. */
> + if (uffdio_poison.range.start + uffdio_poison.range.len <=
> + uffdio_poison.range.start) {

Brackets not needed here.

> + goto out;
> + }
> + if (uffdio_poison.mode & ~UFFDIO_POISON_MODE_DONTWAKE)
> + goto out;
> +
> + if (mmget_not_zero(ctx->mm)) {
> + ret = mfill_atomic_poison(ctx->mm, uffdio_poison.range.start,
> + uffdio_poison.range.len,
> + &ctx->mmap_changing, 0);
> + mmput(ctx->mm);
> + } else {
> + return -ESRCH;
> + }
> +
> + if (unlikely(put_user(ret, &user_uffdio_poison->updated)))
> + return -EFAULT;
> + if (ret < 0)
> + goto out;
> +
> + /* len == 0 would wake all */
> + BUG_ON(!ret);
> + range.len = ret;
> + if (!(uffdio_poison.mode & UFFDIO_POISON_MODE_DONTWAKE)) {
> + range.start = uffdio_poison.range.start;
> + wake_userfault(ctx, &range);
> + }
> + ret = range.len == uffdio_poison.range.len ? 0 : -EAGAIN;
> +
> +out:
> + return ret;
> +}
> +
> static inline unsigned int uffd_ctx_features(__u64 user_features)
> {
> /*
> @@ -2066,6 +2126,9 @@ static long userfaultfd_ioctl(struct file *file, unsigned cmd,
> case UFFDIO_CONTINUE:
> ret = userfaultfd_continue(ctx, arg);
> break;
> + case UFFDIO_POISON:
> + ret = userfaultfd_poison(ctx, arg);
> + break;
> }
> return ret;
> }
> diff --git a/include/linux/swapops.h b/include/linux/swapops.h
> index 4c932cb45e0b..8259fee32421 100644
> --- a/include/linux/swapops.h
> +++ b/include/linux/swapops.h
> @@ -394,7 +394,8 @@ typedef unsigned long pte_marker;
>
> #define PTE_MARKER_UFFD_WP BIT(0)
> #define PTE_MARKER_SWAPIN_ERROR BIT(1)
> -#define PTE_MARKER_MASK (BIT(2) - 1)
> +#define PTE_MARKER_UFFD_POISON BIT(2)

One more tab.

Though I remembered the last time we discussed IIRC we plan to rename
SWAPIN_ERROR and reuse it, could you explain why a new bit is still needed?

I think I commented this but I'll do it again: IIUC any existing host
swapin errors for guest pages should be reported as MCE too, afaict,
happened in kvm context.

> +#define PTE_MARKER_MASK (BIT(3) - 1)
>
> static inline swp_entry_t make_pte_marker_entry(pte_marker marker)
> {
> diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
> index ac7b0c96d351..ac8c6854097c 100644
> --- a/include/linux/userfaultfd_k.h
> +++ b/include/linux/userfaultfd_k.h
> @@ -46,6 +46,7 @@ enum mfill_atomic_mode {
> MFILL_ATOMIC_COPY,
> MFILL_ATOMIC_ZEROPAGE,
> MFILL_ATOMIC_CONTINUE,
> + MFILL_ATOMIC_POISON,
> NR_MFILL_ATOMIC_MODES,
> };
>
> @@ -83,6 +84,9 @@ extern ssize_t mfill_atomic_zeropage(struct mm_struct *dst_mm,
> extern ssize_t mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long dst_start,
> unsigned long len, atomic_t *mmap_changing,
> uffd_flags_t flags);
> +extern ssize_t mfill_atomic_poison(struct mm_struct *dst_mm, unsigned long start,
> + unsigned long len, atomic_t *mmap_changing,
> + uffd_flags_t flags);
> extern int mwriteprotect_range(struct mm_struct *dst_mm,
> unsigned long start, unsigned long len,
> bool enable_wp, atomic_t *mmap_changing);
> diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h
> index 66dd4cd277bd..62151706c5a3 100644
> --- a/include/uapi/linux/userfaultfd.h
> +++ b/include/uapi/linux/userfaultfd.h
> @@ -39,7 +39,8 @@
> UFFD_FEATURE_MINOR_SHMEM | \
> UFFD_FEATURE_EXACT_ADDRESS | \
> UFFD_FEATURE_WP_HUGETLBFS_SHMEM | \
> - UFFD_FEATURE_WP_UNPOPULATED)
> + UFFD_FEATURE_WP_UNPOPULATED | \
> + UFFD_FEATURE_POISON)
> #define UFFD_API_IOCTLS \
> ((__u64)1 << _UFFDIO_REGISTER | \
> (__u64)1 << _UFFDIO_UNREGISTER | \
> @@ -49,12 +50,14 @@
> (__u64)1 << _UFFDIO_COPY | \
> (__u64)1 << _UFFDIO_ZEROPAGE | \
> (__u64)1 << _UFFDIO_WRITEPROTECT | \
> - (__u64)1 << _UFFDIO_CONTINUE)
> + (__u64)1 << _UFFDIO_CONTINUE | \
> + (__u64)1 << _UFFDIO_POISON)
> #define UFFD_API_RANGE_IOCTLS_BASIC \
> ((__u64)1 << _UFFDIO_WAKE | \
> (__u64)1 << _UFFDIO_COPY | \
> + (__u64)1 << _UFFDIO_WRITEPROTECT | \
> (__u64)1 << _UFFDIO_CONTINUE | \
> - (__u64)1 << _UFFDIO_WRITEPROTECT)
> + (__u64)1 << _UFFDIO_POISON)

May not be a large deal, but it's still better to declare the feature &
ioctls after all things implemented. Maybe make these few lines
(UFFD_API*, and the new feature bit) as the last patch to enable the
feature?

>
> /*
> * Valid ioctl command number range with this API is from 0x00 to
> @@ -71,6 +74,7 @@
> #define _UFFDIO_ZEROPAGE (0x04)
> #define _UFFDIO_WRITEPROTECT (0x06)
> #define _UFFDIO_CONTINUE (0x07)
> +#define _UFFDIO_POISON (0x08)
> #define _UFFDIO_API (0x3F)
>
> /* userfaultfd ioctl ids */
> @@ -91,6 +95,8 @@
> struct uffdio_writeprotect)
> #define UFFDIO_CONTINUE _IOWR(UFFDIO, _UFFDIO_CONTINUE, \
> struct uffdio_continue)
> +#define UFFDIO_POISON _IOWR(UFFDIO, _UFFDIO_POISON, \
> + struct uffdio_poison)
>
> /* read() structure */
> struct uffd_msg {
> @@ -225,6 +231,7 @@ struct uffdio_api {
> #define UFFD_FEATURE_EXACT_ADDRESS (1<<11)
> #define UFFD_FEATURE_WP_HUGETLBFS_SHMEM (1<<12)
> #define UFFD_FEATURE_WP_UNPOPULATED (1<<13)
> +#define UFFD_FEATURE_POISON (1<<14)
> __u64 features;
>
> __u64 ioctls;
> @@ -321,6 +328,18 @@ struct uffdio_continue {
> __s64 mapped;
> };
>
> +struct uffdio_poison {
> + struct uffdio_range range;
> +#define UFFDIO_POISON_MODE_DONTWAKE ((__u64)1<<0)
> + __u64 mode;
> +
> + /*
> + * Fields below here are written by the ioctl and must be at the end:
> + * the copy_from_user will not read past here.
> + */
> + __s64 updated;
> +};
> +
> /*
> * Flags for the userfaultfd(2) system call itself.
> */
> diff --git a/mm/memory.c b/mm/memory.c
> index d8a9a770b1f1..7fbda39e060d 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -3692,6 +3692,10 @@ static vm_fault_t handle_pte_marker(struct vm_fault *vmf)
> if (WARN_ON_ONCE(!marker))
> return VM_FAULT_SIGBUS;
>
> + /* Poison emulation explicitly requested for this PTE. */
> + if (marker & PTE_MARKER_UFFD_POISON)
> + return VM_FAULT_HWPOISON;
> +
> /* Higher priority than uffd-wp when data corrupted */
> if (marker & PTE_MARKER_SWAPIN_ERROR)
> return VM_FAULT_SIGBUS;
> diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
> index a2bf37ee276d..87b62ca1e09e 100644
> --- a/mm/userfaultfd.c
> +++ b/mm/userfaultfd.c
> @@ -286,6 +286,51 @@ static int mfill_atomic_pte_continue(pmd_t *dst_pmd,
> goto out;
> }
>
> +/* Handles UFFDIO_POISON for all non-hugetlb VMAs. */
> +static int mfill_atomic_pte_poison(pmd_t *dst_pmd,
> + struct vm_area_struct *dst_vma,
> + unsigned long dst_addr,
> + uffd_flags_t flags)
> +{
> + int ret;
> + struct mm_struct *dst_mm = dst_vma->vm_mm;
> + pte_t _dst_pte, *dst_pte;
> + spinlock_t *ptl;
> +
> + _dst_pte = make_pte_marker(PTE_MARKER_UFFD_POISON);
> + dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
> +
> + if (vma_is_shmem(dst_vma)) {
> + struct inode *inode;
> + pgoff_t offset, max_off;
> +
> + /* serialize against truncate with the page table lock */
> + inode = dst_vma->vm_file->f_inode;
> + offset = linear_page_index(dst_vma, dst_addr);
> + max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
> + ret = -EFAULT;
> + if (unlikely(offset >= max_off))
> + goto out_unlock;
> + }

Maybe good chance to have a mfill_file_over_size() helper? Other potential
users are mfill_atomic_pte_zeropage() and mfill_atomic_install_pte().

> +
> + ret = -EEXIST;
> + /*
> + * For now, we don't handle unmapping pages, so only support filling in
> + * none PTEs, or replacing PTE markers.
> + */
> + if (!pte_none_mostly(*dst_pte))
> + goto out_unlock;
> +
> + set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
> +
> + /* No need to invalidate - it was non-present before */
> + update_mmu_cache(dst_vma, dst_addr, dst_pte);
> + ret = 0;
> +out_unlock:
> + pte_unmap_unlock(dst_pte, ptl);
> + return ret;
> +}
> +
> static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
> {
> pgd_t *pgd;
> @@ -336,8 +381,12 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
> * supported by hugetlb. A PMD_SIZE huge pages may exist as used
> * by THP. Since we can not reliably insert a zero page, this
> * feature is not supported.
> + *
> + * PTE marker handling for hugetlb is a bit special, so for now
> + * UFFDIO_POISON is not supported.
> */
> - if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE)) {
> + if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE) ||
> + uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
> mmap_read_unlock(dst_mm);
> return -EINVAL;
> }
> @@ -481,6 +530,9 @@ static __always_inline ssize_t mfill_atomic_pte(pmd_t *dst_pmd,
> if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) {
> return mfill_atomic_pte_continue(dst_pmd, dst_vma,
> dst_addr, flags);
> + } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
> + return mfill_atomic_pte_poison(dst_pmd, dst_vma,
> + dst_addr, flags);
> }
>
> /*
> @@ -702,6 +754,14 @@ ssize_t mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long start,
> uffd_flags_set_mode(flags, MFILL_ATOMIC_CONTINUE));
> }
>
> +ssize_t mfill_atomic_poison(struct mm_struct *dst_mm, unsigned long start,
> + unsigned long len, atomic_t *mmap_changing,
> + uffd_flags_t flags)
> +{
> + return mfill_atomic(dst_mm, start, 0, len, mmap_changing,
> + uffd_flags_set_mode(flags, MFILL_ATOMIC_POISON));
> +}
> +
> long uffd_wp_range(struct vm_area_struct *dst_vma,
> unsigned long start, unsigned long len, bool enable_wp)
> {
> --
> 2.41.0.255.g8b1d071c50-goog
>

--
Peter Xu


2023-07-04 21:05:56

by Peter Xu

[permalink] [raw]
Subject: Re: [PATCH v2 2/6] mm: userfaultfd: refactor hugetlb folio allocation / lookup code

On Thu, Jun 29, 2023 at 01:50:36PM -0700, Axel Rasmussen wrote:
> At the top of `hugetlb_mfill_atomic_pte`, we need to get the folio we're
> going to be mapping. There are three basic cases we're dealing with
> here:
>
> 1. We're doing a UFFDIO_CONTINUE, in which case we lookup an existing
> folio in the pagecache, instead of allocating a new one.
> 2. We need to allocate a new folio.
> 3. We previously failed while populating our new folio, so we "returned"
> a temporary folio using `foliop` and had our caller retry.
>
> In a future commit I'm going to add a fourth case for UFFDIO_POISON,
> where we aren't going to map a folio at all (newly allocated or
> otherwise). This end state will be simpler, and we can re-use a bit more
> code, if we stop using `if (...)` to distinguish the cases.
>
> So, refactor the cases so they share most of the same code, and instead
> switch to `goto` to skip some parts depending on the case at hand.
>
> Signed-off-by: Axel Rasmussen <[email protected]>

I didn't get why this patch is needed..

IIUC you added MFILL_ATOMIC_POISON handling at the entry of
hugetlb_mfill_atomic_pte() anyway. Maybe it can even have its own
hugetlb_mfill_atomic_poison()? Did I miss something?

> ---
> mm/hugetlb.c | 53 +++++++++++++++++++++++++---------------------------
> 1 file changed, 25 insertions(+), 28 deletions(-)
>
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index bce28cca73a1..38711d49e4db 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -6259,22 +6259,32 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
> if (IS_ERR(folio))
> goto out;
> folio_in_pagecache = true;
> - } else if (!*foliop) {
> - /* If a folio already exists, then it's UFFDIO_COPY for
> - * a non-missing case. Return -EEXIST.
> - */
> - if (vm_shared &&
> - hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
> - ret = -EEXIST;
> - goto out;
> + goto ready;
> + }
> +
> + /* If a folio already exists, then it's UFFDIO_COPY for
> + * a non-missing case. Return -EEXIST.
> + */
> + if (vm_shared && hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
> + ret = -EEXIST;
> + if (*foliop) {
> + folio_put(*foliop);
> + *foliop = NULL;
> }
> + goto out;
> + }
>
> - folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0);
> - if (IS_ERR(folio)) {
> - ret = -ENOMEM;
> - goto out;
> + folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0);
> + if (IS_ERR(folio)) {
> + ret = -ENOMEM;
> + if (*foliop) {
> + folio_put(*foliop);
> + *foliop = NULL;
> }
> + goto out;
> + }
>
> + if (!*foliop) {
> ret = copy_folio_from_user(folio, (const void __user *) src_addr,
> false);
>
> @@ -6302,22 +6312,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
> */
> goto out;
> }
> - } else {
> - if (vm_shared &&
> - hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
> - folio_put(*foliop);
> - ret = -EEXIST;
> - *foliop = NULL;
> - goto out;
> - }
> -
> - folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0);
> - if (IS_ERR(folio)) {
> - folio_put(*foliop);
> - ret = -ENOMEM;
> - *foliop = NULL;
> - goto out;
> - }
> + } else { /* Caller retried because we set *foliop previously */
> ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma);
> folio_put(*foliop);
> *foliop = NULL;
> @@ -6327,6 +6322,8 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
> }
> }
>
> +ready: /* `folio` ready to map (non-NULL, populated) */
> +
> /*
> * The memory barrier inside __folio_mark_uptodate makes sure that
> * preceding stores to the page contents become visible before
> --
> 2.41.0.255.g8b1d071c50-goog
>

--
Peter Xu


2023-07-04 21:13:01

by Peter Xu

[permalink] [raw]
Subject: Re: [PATCH v2 4/6] selftests/mm: refactor uffd_poll_thread to allow custom fault handlers

On Thu, Jun 29, 2023 at 01:50:38PM -0700, Axel Rasmussen wrote:
> Previously, we had "one fault handler to rule them all", which used
> several branches to deal with all of the scenarios required by all of
> the various tests.
>
> In upcoming patches, I plan to add a new test, which has its own
> slightly different fault handling logic. Instead of continuing to add
> cruft to the existing fault handler, let's allow tests to define custom
> ones, separate from other tests.
>
> Signed-off-by: Axel Rasmussen <[email protected]>
> ---
> tools/testing/selftests/mm/uffd-common.c | 5 ++++-
> tools/testing/selftests/mm/uffd-common.h | 3 +++
> tools/testing/selftests/mm/uffd-stress.c | 12 +++++++-----
> 3 files changed, 14 insertions(+), 6 deletions(-)
>
> diff --git a/tools/testing/selftests/mm/uffd-common.c b/tools/testing/selftests/mm/uffd-common.c
> index ba20d7504022..02b89860e193 100644
> --- a/tools/testing/selftests/mm/uffd-common.c
> +++ b/tools/testing/selftests/mm/uffd-common.c
> @@ -499,6 +499,9 @@ void *uffd_poll_thread(void *arg)
> int ret;
> char tmp_chr;
>
> + if (!args->handle_fault)
> + args->handle_fault = uffd_handle_page_fault;
> +
> pollfd[0].fd = uffd;
> pollfd[0].events = POLLIN;
> pollfd[1].fd = pipefd[cpu*2];
> @@ -527,7 +530,7 @@ void *uffd_poll_thread(void *arg)
> err("unexpected msg event %u\n", msg.event);
> break;
> case UFFD_EVENT_PAGEFAULT:
> - uffd_handle_page_fault(&msg, args);
> + args->handle_fault(&msg, args);
> break;
> case UFFD_EVENT_FORK:
> close(uffd);
> diff --git a/tools/testing/selftests/mm/uffd-common.h b/tools/testing/selftests/mm/uffd-common.h
> index 197f5262fe0d..7c4fa964c3b0 100644
> --- a/tools/testing/selftests/mm/uffd-common.h
> +++ b/tools/testing/selftests/mm/uffd-common.h
> @@ -77,6 +77,9 @@ struct uffd_args {
> unsigned long missing_faults;
> unsigned long wp_faults;
> unsigned long minor_faults;
> +
> + /* A custom fault handler; defaults to uffd_handle_page_fault. */
> + void (*handle_fault)(struct uffd_msg *msg, struct uffd_args *args);
> };
>
> struct uffd_test_ops {
> diff --git a/tools/testing/selftests/mm/uffd-stress.c b/tools/testing/selftests/mm/uffd-stress.c
> index 995ff13e74c7..50b1224d72c7 100644
> --- a/tools/testing/selftests/mm/uffd-stress.c
> +++ b/tools/testing/selftests/mm/uffd-stress.c
> @@ -189,10 +189,8 @@ static int stress(struct uffd_args *args)
> locking_thread, (void *)cpu))
> return 1;
> if (bounces & BOUNCE_POLL) {
> - if (pthread_create(&uffd_threads[cpu], &attr,
> - uffd_poll_thread,
> - (void *)&args[cpu]))
> - return 1;
> + if (pthread_create(&uffd_threads[cpu], &attr, uffd_poll_thread, &args[cpu]))
> + err("uffd_poll_thread create");

irrelevant change?

> } else {
> if (pthread_create(&uffd_threads[cpu], &attr,
> uffd_read_thread,
> @@ -247,9 +245,13 @@ static int userfaultfd_stress(void)
> {
> void *area;
> unsigned long nr;
> - struct uffd_args args[nr_cpus];
> + struct uffd_args *args;
> uint64_t mem_size = nr_pages * page_size;
>
> + args = calloc(nr_cpus, sizeof(struct uffd_args));
> + if (!args)
> + err("allocating args array failed");
> +

It's leaked?

Isn't "args[] = { 0 }" already working?

Thanks,

> if (uffd_test_ctx_init(UFFD_FEATURE_WP_UNPOPULATED, NULL))
> err("context init failed");
>
> --
> 2.41.0.255.g8b1d071c50-goog
>

--
Peter Xu


2023-07-04 21:37:13

by Peter Xu

[permalink] [raw]
Subject: Re: [PATCH v2 3/6] mm: userfaultfd: support UFFDIO_POISON for hugetlbfs

On Thu, Jun 29, 2023 at 01:50:37PM -0700, Axel Rasmussen wrote:
> The behavior here is the same as it is for anon/shmem. This is done
> separately because hugetlb pte marker handling is a bit different.
>
> Signed-off-by: Axel Rasmussen <[email protected]>
> ---
> mm/hugetlb.c | 33 +++++++++++++++++++++++++++++++--
> mm/userfaultfd.c | 6 +-----
> 2 files changed, 32 insertions(+), 7 deletions(-)
>
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 38711d49e4db..05abe88986b6 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -6090,14 +6090,24 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
> }
>
> entry = huge_ptep_get(ptep);
> - /* PTE markers should be handled the same way as none pte */
> - if (huge_pte_none_mostly(entry))
> + if (huge_pte_none_mostly(entry)) {
> + if (is_pte_marker(entry)) {
> + unsigned long marker = pte_marker_get(pte_to_swp_entry(entry));
> +
> + if (marker & PTE_MARKER_UFFD_POISON) {
> + ret = VM_FAULT_HWPOISON_LARGE;
> + goto out_mutex;
> + }
> + }
> /*
> + * Other PTE markers should be handled the same way as none PTE.
> + *
> * hugetlb_no_page will drop vma lock and hugetlb fault
> * mutex internally, which make us return immediately.
> */
> return hugetlb_no_page(mm, vma, mapping, idx, address, ptep,
> entry, flags);
> + }
>
> ret = 0;
>
> @@ -6253,6 +6263,25 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
> int writable;
> bool folio_in_pagecache = false;
>
> + if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
> + ptl = huge_pte_lock(h, dst_mm, dst_pte);
> +
> + /* Don't overwrite any existing PTEs (even markers) */
> + if (!huge_pte_none(huge_ptep_get(dst_pte))) {
> + spin_unlock(ptl);
> + return -EEXIST;
> + }
> +
> + _dst_pte = make_pte_marker(PTE_MARKER_UFFD_POISON);
> + set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
> +
> + /* No need to invalidate - it was non-present before */
> + update_mmu_cache(dst_vma, dst_addr, dst_pte);
> +
> + spin_unlock(ptl);
> + return 0;
> + }
> +
> if (is_continue) {
> ret = -EFAULT;
> folio = filemap_lock_folio(mapping, idx);
> diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
> index 87b62ca1e09e..4436cae1c7a8 100644
> --- a/mm/userfaultfd.c
> +++ b/mm/userfaultfd.c
> @@ -381,12 +381,8 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
> * supported by hugetlb. A PMD_SIZE huge pages may exist as used
> * by THP. Since we can not reliably insert a zero page, this
> * feature is not supported.
> - *
> - * PTE marker handling for hugetlb is a bit special, so for now
> - * UFFDIO_POISON is not supported.
> */
> - if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE) ||
> - uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
> + if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE)) {
> mmap_read_unlock(dst_mm);
> return -EINVAL;

If we have the last patch declaring the feature bits and so on, IIUC we
don'tt need this change back and forth. Other than that looks good.

Thanks,

> }
> --
> 2.41.0.255.g8b1d071c50-goog
>

--
Peter Xu


2023-07-04 21:57:19

by Peter Xu

[permalink] [raw]
Subject: Re: [PATCH v2 5/6] selftests/mm: add uffd unit test for UFFDIO_POISON

On Thu, Jun 29, 2023 at 01:50:39PM -0700, Axel Rasmussen wrote:
> The test is pretty basic, and exercises UFFDIO_POISON straightforwardly.
> We register a region with userfaultfd, in missing fault mode. For each
> fault, we either UFFDIO_COPY a zeroed page (odd pages) or UFFDIO_POISON
> (even pages). We do this mix to test "something like a real use case",
> where guest memory would be some mix of poisoned and non-poisoned pages.
>
> We read each page in the region, and assert that the odd pages are
> zeroed as expected, and the even pages yield a SIGBUS as expected.
>
> Why UFFDIO_COPY instead of UFFDIO_ZEROPAGE? Because hugetlb doesn't
> support UFFDIO_ZEROPAGE, and we don't want to have special case code.
>
> Signed-off-by: Axel Rasmussen <[email protected]>

Acked-by: Peter Xu <[email protected]>

--
Peter Xu


2023-07-04 22:47:45

by Peter Xu

[permalink] [raw]
Subject: Re: [PATCH v2 6/6] mm: userfaultfd: add basic documentation for UFFDIO_POISON

On Thu, Jun 29, 2023 at 01:50:40PM -0700, Axel Rasmussen wrote:
> Just describe the feature at a really basic level.
>
> Signed-off-by: Axel Rasmussen <[email protected]>

The final enablement of the feature can be squashed into this doc update
patch too.

Acked-by: Peter Xu <[email protected]>

> ---
> Documentation/admin-guide/mm/userfaultfd.rst | 15 +++++++++++++++
> 1 file changed, 15 insertions(+)
>
> diff --git a/Documentation/admin-guide/mm/userfaultfd.rst b/Documentation/admin-guide/mm/userfaultfd.rst
> index 7c304e432205..b19053436369 100644
> --- a/Documentation/admin-guide/mm/userfaultfd.rst
> +++ b/Documentation/admin-guide/mm/userfaultfd.rst
> @@ -244,6 +244,21 @@ write-protected (so future writes will also result in a WP fault). These ioctls
> support a mode flag (``UFFDIO_COPY_MODE_WP`` or ``UFFDIO_CONTINUE_MODE_WP``
> respectively) to configure the mapping this way.
>
> +Memory Poisioning Emulation
> +---------------------------
> +
> +In response to a fault (either missing or minor), an action userspace can
> +take to "resolve" it is to issue a ``UFFDIO_POISON``. This will cause any
> +future faulters to either get a SIGBUS, or in KVM's case the guest will
> +receive an MCE as if there were hardware memory poisoning.
> +
> +This is used to emulate hardware memory poisoning. Imagine a VM running on a
> +machine which experiences a real hardware memory error. Later, we live migrate
> +the VM to another physical machine. Since we want the migration to be
> +transparent to the guest, we want that same address range to act as if it was
> +still poisoned, even though it's on a new physical host which ostentisbly
> +doesn't have a memory error in the exact same spot.
> +
> QEMU/KVM
> ========
>
> --
> 2.41.0.255.g8b1d071c50-goog
>

--
Peter Xu


2023-07-05 16:40:54

by Peter Xu

[permalink] [raw]
Subject: Re: [PATCH v2 1/6] mm: userfaultfd: add new UFFDIO_POISON ioctl

On Wed, Jul 05, 2023 at 09:09:19AM -0700, James Houghton wrote:
> > > diff --git a/include/linux/swapops.h b/include/linux/swapops.h
> > > index 4c932cb45e0b..8259fee32421 100644
> > > --- a/include/linux/swapops.h
> > > +++ b/include/linux/swapops.h
> > > @@ -394,7 +394,8 @@ typedef unsigned long pte_marker;
> > >
> > > #define PTE_MARKER_UFFD_WP BIT(0)
> > > #define PTE_MARKER_SWAPIN_ERROR BIT(1)
> > > -#define PTE_MARKER_MASK (BIT(2) - 1)
> > > +#define PTE_MARKER_UFFD_POISON BIT(2)
> >
> > One more tab.
> >
> > Though I remembered the last time we discussed IIRC we plan to rename
> > SWAPIN_ERROR and reuse it, could you explain why a new bit is still needed?
> >
> > I think I commented this but I'll do it again: IIUC any existing host
> > swapin errors for guest pages should be reported as MCE too, afaict,
> > happened in kvm context.
>
> I think swapin errors are treated differently than poison. Swapin
> errors get VM_FAULT_SIGBUS, and poison gets VM_FAULT_HWPOISON, so
> UFFDIO_POISON should also get VM_FAULT_HWPOISON (so that's what Axel
> has implemented). And I think that needs a separate PTE marker.

My question was, should we also make SWAPIN_ERROR return VM_FAULT_HWPOISON
always?

Just to recap from what I already commented above - if a guest page got
error in swapin due to block sector failures, it should be treated as
VM_FAULT_HWPOISON too, IMHO. IOW, I think current SWAPIN_ERROR is wrong
when in kvm context and we should fix it first.

>
> >
> > > +#define PTE_MARKER_MASK (BIT(3) - 1)
> > >
> > > static inline swp_entry_t make_pte_marker_entry(pte_marker marker)
> > > {
> > > diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
> > > index ac7b0c96d351..ac8c6854097c 100644
> > > --- a/include/linux/userfaultfd_k.h
> > > +++ b/include/linux/userfaultfd_k.h
> > > @@ -46,6 +46,7 @@ enum mfill_atomic_mode {
> > > MFILL_ATOMIC_COPY,
> > > MFILL_ATOMIC_ZEROPAGE,
> > > MFILL_ATOMIC_CONTINUE,
> > > + MFILL_ATOMIC_POISON,
> > > NR_MFILL_ATOMIC_MODES,
> > > };
> > >
> > > @@ -83,6 +84,9 @@ extern ssize_t mfill_atomic_zeropage(struct mm_struct *dst_mm,
> > > extern ssize_t mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long dst_start,
> > > unsigned long len, atomic_t *mmap_changing,
> > > uffd_flags_t flags);
> > > +extern ssize_t mfill_atomic_poison(struct mm_struct *dst_mm, unsigned long start,
> > > + unsigned long len, atomic_t *mmap_changing,
> > > + uffd_flags_t flags);
> > > extern int mwriteprotect_range(struct mm_struct *dst_mm,
> > > unsigned long start, unsigned long len,
> > > bool enable_wp, atomic_t *mmap_changing);
> > > diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h
> > > index 66dd4cd277bd..62151706c5a3 100644
> > > --- a/include/uapi/linux/userfaultfd.h
> > > +++ b/include/uapi/linux/userfaultfd.h
> > > @@ -39,7 +39,8 @@
> > > UFFD_FEATURE_MINOR_SHMEM | \
> > > UFFD_FEATURE_EXACT_ADDRESS | \
> > > UFFD_FEATURE_WP_HUGETLBFS_SHMEM | \
> > > - UFFD_FEATURE_WP_UNPOPULATED)
> > > + UFFD_FEATURE_WP_UNPOPULATED | \
> > > + UFFD_FEATURE_POISON)
> > > #define UFFD_API_IOCTLS \
> > > ((__u64)1 << _UFFDIO_REGISTER | \
> > > (__u64)1 << _UFFDIO_UNREGISTER | \
> > > @@ -49,12 +50,14 @@
> > > (__u64)1 << _UFFDIO_COPY | \
> > > (__u64)1 << _UFFDIO_ZEROPAGE | \
> > > (__u64)1 << _UFFDIO_WRITEPROTECT | \
> > > - (__u64)1 << _UFFDIO_CONTINUE)
> > > + (__u64)1 << _UFFDIO_CONTINUE | \
> > > + (__u64)1 << _UFFDIO_POISON)
> > > #define UFFD_API_RANGE_IOCTLS_BASIC \
> > > ((__u64)1 << _UFFDIO_WAKE | \
> > > (__u64)1 << _UFFDIO_COPY | \
> > > + (__u64)1 << _UFFDIO_WRITEPROTECT | \
> > > (__u64)1 << _UFFDIO_CONTINUE | \
> > > - (__u64)1 << _UFFDIO_WRITEPROTECT)
> > > + (__u64)1 << _UFFDIO_POISON)
> >
> > May not be a large deal, but it's still better to declare the feature &
> > ioctls after all things implemented. Maybe make these few lines
> > (UFFD_API*, and the new feature bit) as the last patch to enable the
> > feature?
>
> I agree. Another option would be to have a separate feature for
> UFFDIO_POISON for hugetlb, but I don't think we should do that. :)

Yeah let's make the features "memory-type-free" if possible. :)

Thanks,

--
Peter Xu


2023-07-05 16:41:18

by James Houghton

[permalink] [raw]
Subject: Re: [PATCH v2 1/6] mm: userfaultfd: add new UFFDIO_POISON ioctl

On Tue, Jul 4, 2023 at 1:38 PM Peter Xu <[email protected]> wrote:
>
> On Thu, Jun 29, 2023 at 01:50:35PM -0700, Axel Rasmussen wrote:
> > The basic idea here is to "simulate" memory poisoning for VMs. A VM
> > running on some host might encounter a memory error, after which some
> > page(s) are poisoned (i.e., future accesses SIGBUS). They expect that
> > once poisoned, pages can never become "un-poisoned". So, when we live
> > migrate the VM, we need to preserve the poisoned status of these pages.
> >
> > When live migrating, we try to get the guest running on its new host as
> > quickly as possible. So, we start it running before all memory has been
> > copied, and before we're certain which pages should be poisoned or not.
> >
> > So the basic way to use this new feature is:
> >
> > - On the new host, the guest's memory is registered with userfaultfd, in
> > either MISSING or MINOR mode (doesn't really matter for this purpose).
> > - On any first access, we get a userfaultfd event. At this point we can
> > communicate with the old host to find out if the page was poisoned.
> > - If so, we can respond with a UFFDIO_POISON - this places a swap marker
> > so any future accesses will SIGBUS. Because the pte is now "present",
> > future accesses won't generate more userfaultfd events, they'll just
> > SIGBUS directly.
> >
> > UFFDIO_POISON does not handle unmapping previously-present PTEs. This
> > isn't needed, because during live migration we want to intercept
> > all accesses with userfaultfd (not just writes, so WP mode isn't useful
> > for this). So whether minor or missing mode is being used (or both), the
> > PTE won't be present in any case, so handling that case isn't needed.
> >
> > Why return VM_FAULT_HWPOISON instead of VM_FAULT_SIGBUS when one of
> > these markers is encountered? For "normal" userspace programs there
> > isn't a big difference, both yield a SIGBUS. The difference for KVM is
> > key though: VM_FAULT_HWPOISON will result in an MCE being injected into
> > the guest (which is the behavior we want). With VM_FAULT_SIGBUS, the
> > hypervisor would need to catch the SIGBUS and deal with the MCE
> > injection itself.
> >
> > Signed-off-by: Axel Rasmussen <[email protected]>
>
> Maybe have a cover letter for the next version?
>
> > ---
> > fs/userfaultfd.c | 63 ++++++++++++++++++++++++++++++++
> > include/linux/swapops.h | 3 +-
> > include/linux/userfaultfd_k.h | 4 ++
> > include/uapi/linux/userfaultfd.h | 25 +++++++++++--
> > mm/memory.c | 4 ++
> > mm/userfaultfd.c | 62 ++++++++++++++++++++++++++++++-
> > 6 files changed, 156 insertions(+), 5 deletions(-)
> >
> > diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
> > index 7cecd49e078b..c26a883399c9 100644
> > --- a/fs/userfaultfd.c
> > +++ b/fs/userfaultfd.c
> > @@ -1965,6 +1965,66 @@ static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg)
> > return ret;
> > }
> >
> > +static inline int userfaultfd_poison(struct userfaultfd_ctx *ctx, unsigned long arg)
> > +{
> > + __s64 ret;
> > + struct uffdio_poison uffdio_poison;
> > + struct uffdio_poison __user *user_uffdio_poison;
> > + struct userfaultfd_wake_range range;
> > +
> > + user_uffdio_poison = (struct uffdio_poison __user *)arg;
> > +
> > + ret = -EAGAIN;
> > + if (atomic_read(&ctx->mmap_changing))
> > + goto out;
> > +
> > + ret = -EFAULT;
> > + if (copy_from_user(&uffdio_poison, user_uffdio_poison,
> > + /* don't copy the output fields */
> > + sizeof(uffdio_poison) - (sizeof(__s64))))
> > + goto out;
> > +
> > + ret = validate_range(ctx->mm, uffdio_poison.range.start,
> > + uffdio_poison.range.len);
> > + if (ret)
> > + goto out;
> > +
> > + ret = -EINVAL;
> > + /* double check for wraparound just in case. */
> > + if (uffdio_poison.range.start + uffdio_poison.range.len <=
> > + uffdio_poison.range.start) {
>
> Brackets not needed here.
>
> > + goto out;
> > + }
> > + if (uffdio_poison.mode & ~UFFDIO_POISON_MODE_DONTWAKE)
> > + goto out;
> > +
> > + if (mmget_not_zero(ctx->mm)) {
> > + ret = mfill_atomic_poison(ctx->mm, uffdio_poison.range.start,
> > + uffdio_poison.range.len,
> > + &ctx->mmap_changing, 0);
> > + mmput(ctx->mm);
> > + } else {
> > + return -ESRCH;
> > + }
> > +
> > + if (unlikely(put_user(ret, &user_uffdio_poison->updated)))
> > + return -EFAULT;
> > + if (ret < 0)
> > + goto out;
> > +
> > + /* len == 0 would wake all */
> > + BUG_ON(!ret);
> > + range.len = ret;
> > + if (!(uffdio_poison.mode & UFFDIO_POISON_MODE_DONTWAKE)) {
> > + range.start = uffdio_poison.range.start;
> > + wake_userfault(ctx, &range);
> > + }
> > + ret = range.len == uffdio_poison.range.len ? 0 : -EAGAIN;
> > +
> > +out:
> > + return ret;
> > +}
> > +
> > static inline unsigned int uffd_ctx_features(__u64 user_features)
> > {
> > /*
> > @@ -2066,6 +2126,9 @@ static long userfaultfd_ioctl(struct file *file, unsigned cmd,
> > case UFFDIO_CONTINUE:
> > ret = userfaultfd_continue(ctx, arg);
> > break;
> > + case UFFDIO_POISON:
> > + ret = userfaultfd_poison(ctx, arg);
> > + break;
> > }
> > return ret;
> > }
> > diff --git a/include/linux/swapops.h b/include/linux/swapops.h
> > index 4c932cb45e0b..8259fee32421 100644
> > --- a/include/linux/swapops.h
> > +++ b/include/linux/swapops.h
> > @@ -394,7 +394,8 @@ typedef unsigned long pte_marker;
> >
> > #define PTE_MARKER_UFFD_WP BIT(0)
> > #define PTE_MARKER_SWAPIN_ERROR BIT(1)
> > -#define PTE_MARKER_MASK (BIT(2) - 1)
> > +#define PTE_MARKER_UFFD_POISON BIT(2)
>
> One more tab.
>
> Though I remembered the last time we discussed IIRC we plan to rename
> SWAPIN_ERROR and reuse it, could you explain why a new bit is still needed?
>
> I think I commented this but I'll do it again: IIUC any existing host
> swapin errors for guest pages should be reported as MCE too, afaict,
> happened in kvm context.

I think swapin errors are treated differently than poison. Swapin
errors get VM_FAULT_SIGBUS, and poison gets VM_FAULT_HWPOISON, so
UFFDIO_POISON should also get VM_FAULT_HWPOISON (so that's what Axel
has implemented). And I think that needs a separate PTE marker.

>
> > +#define PTE_MARKER_MASK (BIT(3) - 1)
> >
> > static inline swp_entry_t make_pte_marker_entry(pte_marker marker)
> > {
> > diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
> > index ac7b0c96d351..ac8c6854097c 100644
> > --- a/include/linux/userfaultfd_k.h
> > +++ b/include/linux/userfaultfd_k.h
> > @@ -46,6 +46,7 @@ enum mfill_atomic_mode {
> > MFILL_ATOMIC_COPY,
> > MFILL_ATOMIC_ZEROPAGE,
> > MFILL_ATOMIC_CONTINUE,
> > + MFILL_ATOMIC_POISON,
> > NR_MFILL_ATOMIC_MODES,
> > };
> >
> > @@ -83,6 +84,9 @@ extern ssize_t mfill_atomic_zeropage(struct mm_struct *dst_mm,
> > extern ssize_t mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long dst_start,
> > unsigned long len, atomic_t *mmap_changing,
> > uffd_flags_t flags);
> > +extern ssize_t mfill_atomic_poison(struct mm_struct *dst_mm, unsigned long start,
> > + unsigned long len, atomic_t *mmap_changing,
> > + uffd_flags_t flags);
> > extern int mwriteprotect_range(struct mm_struct *dst_mm,
> > unsigned long start, unsigned long len,
> > bool enable_wp, atomic_t *mmap_changing);
> > diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h
> > index 66dd4cd277bd..62151706c5a3 100644
> > --- a/include/uapi/linux/userfaultfd.h
> > +++ b/include/uapi/linux/userfaultfd.h
> > @@ -39,7 +39,8 @@
> > UFFD_FEATURE_MINOR_SHMEM | \
> > UFFD_FEATURE_EXACT_ADDRESS | \
> > UFFD_FEATURE_WP_HUGETLBFS_SHMEM | \
> > - UFFD_FEATURE_WP_UNPOPULATED)
> > + UFFD_FEATURE_WP_UNPOPULATED | \
> > + UFFD_FEATURE_POISON)
> > #define UFFD_API_IOCTLS \
> > ((__u64)1 << _UFFDIO_REGISTER | \
> > (__u64)1 << _UFFDIO_UNREGISTER | \
> > @@ -49,12 +50,14 @@
> > (__u64)1 << _UFFDIO_COPY | \
> > (__u64)1 << _UFFDIO_ZEROPAGE | \
> > (__u64)1 << _UFFDIO_WRITEPROTECT | \
> > - (__u64)1 << _UFFDIO_CONTINUE)
> > + (__u64)1 << _UFFDIO_CONTINUE | \
> > + (__u64)1 << _UFFDIO_POISON)
> > #define UFFD_API_RANGE_IOCTLS_BASIC \
> > ((__u64)1 << _UFFDIO_WAKE | \
> > (__u64)1 << _UFFDIO_COPY | \
> > + (__u64)1 << _UFFDIO_WRITEPROTECT | \
> > (__u64)1 << _UFFDIO_CONTINUE | \
> > - (__u64)1 << _UFFDIO_WRITEPROTECT)
> > + (__u64)1 << _UFFDIO_POISON)
>
> May not be a large deal, but it's still better to declare the feature &
> ioctls after all things implemented. Maybe make these few lines
> (UFFD_API*, and the new feature bit) as the last patch to enable the
> feature?

I agree. Another option would be to have a separate feature for
UFFDIO_POISON for hugetlb, but I don't think we should do that. :)

>
> >
> > /*
> > * Valid ioctl command number range with this API is from 0x00 to
> > @@ -71,6 +74,7 @@
> > #define _UFFDIO_ZEROPAGE (0x04)
> > #define _UFFDIO_WRITEPROTECT (0x06)
> > #define _UFFDIO_CONTINUE (0x07)
> > +#define _UFFDIO_POISON (0x08)
> > #define _UFFDIO_API (0x3F)
> >
> > /* userfaultfd ioctl ids */
> > @@ -91,6 +95,8 @@
> > struct uffdio_writeprotect)
> > #define UFFDIO_CONTINUE _IOWR(UFFDIO, _UFFDIO_CONTINUE, \
> > struct uffdio_continue)
> > +#define UFFDIO_POISON _IOWR(UFFDIO, _UFFDIO_POISON, \
> > + struct uffdio_poison)
> >
> > /* read() structure */
> > struct uffd_msg {
> > @@ -225,6 +231,7 @@ struct uffdio_api {
> > #define UFFD_FEATURE_EXACT_ADDRESS (1<<11)
> > #define UFFD_FEATURE_WP_HUGETLBFS_SHMEM (1<<12)
> > #define UFFD_FEATURE_WP_UNPOPULATED (1<<13)
> > +#define UFFD_FEATURE_POISON (1<<14)
> > __u64 features;
> >
> > __u64 ioctls;
> > @@ -321,6 +328,18 @@ struct uffdio_continue {
> > __s64 mapped;
> > };
> >
> > +struct uffdio_poison {
> > + struct uffdio_range range;
> > +#define UFFDIO_POISON_MODE_DONTWAKE ((__u64)1<<0)
> > + __u64 mode;
> > +
> > + /*
> > + * Fields below here are written by the ioctl and must be at the end:
> > + * the copy_from_user will not read past here.
> > + */
> > + __s64 updated;
> > +};
> > +
> > /*
> > * Flags for the userfaultfd(2) system call itself.
> > */
> > diff --git a/mm/memory.c b/mm/memory.c
> > index d8a9a770b1f1..7fbda39e060d 100644
> > --- a/mm/memory.c
> > +++ b/mm/memory.c
> > @@ -3692,6 +3692,10 @@ static vm_fault_t handle_pte_marker(struct vm_fault *vmf)
> > if (WARN_ON_ONCE(!marker))
> > return VM_FAULT_SIGBUS;
> >
> > + /* Poison emulation explicitly requested for this PTE. */
> > + if (marker & PTE_MARKER_UFFD_POISON)
> > + return VM_FAULT_HWPOISON;
> > +
> > /* Higher priority than uffd-wp when data corrupted */
> > if (marker & PTE_MARKER_SWAPIN_ERROR)
> > return VM_FAULT_SIGBUS;
> > diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
> > index a2bf37ee276d..87b62ca1e09e 100644
> > --- a/mm/userfaultfd.c
> > +++ b/mm/userfaultfd.c
> > @@ -286,6 +286,51 @@ static int mfill_atomic_pte_continue(pmd_t *dst_pmd,
> > goto out;
> > }
> >
> > +/* Handles UFFDIO_POISON for all non-hugetlb VMAs. */
> > +static int mfill_atomic_pte_poison(pmd_t *dst_pmd,
> > + struct vm_area_struct *dst_vma,
> > + unsigned long dst_addr,
> > + uffd_flags_t flags)
> > +{
> > + int ret;
> > + struct mm_struct *dst_mm = dst_vma->vm_mm;
> > + pte_t _dst_pte, *dst_pte;
> > + spinlock_t *ptl;
> > +
> > + _dst_pte = make_pte_marker(PTE_MARKER_UFFD_POISON);
> > + dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
> > +
> > + if (vma_is_shmem(dst_vma)) {
> > + struct inode *inode;
> > + pgoff_t offset, max_off;
> > +
> > + /* serialize against truncate with the page table lock */
> > + inode = dst_vma->vm_file->f_inode;
> > + offset = linear_page_index(dst_vma, dst_addr);
> > + max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
> > + ret = -EFAULT;
> > + if (unlikely(offset >= max_off))
> > + goto out_unlock;
> > + }
>
> Maybe good chance to have a mfill_file_over_size() helper? Other potential
> users are mfill_atomic_pte_zeropage() and mfill_atomic_install_pte().
>
> > +
> > + ret = -EEXIST;
> > + /*
> > + * For now, we don't handle unmapping pages, so only support filling in
> > + * none PTEs, or replacing PTE markers.
> > + */
> > + if (!pte_none_mostly(*dst_pte))
> > + goto out_unlock;
> > +
> > + set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
> > +
> > + /* No need to invalidate - it was non-present before */
> > + update_mmu_cache(dst_vma, dst_addr, dst_pte);
> > + ret = 0;
> > +out_unlock:
> > + pte_unmap_unlock(dst_pte, ptl);
> > + return ret;
> > +}
> > +
> > static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
> > {
> > pgd_t *pgd;
> > @@ -336,8 +381,12 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
> > * supported by hugetlb. A PMD_SIZE huge pages may exist as used
> > * by THP. Since we can not reliably insert a zero page, this
> > * feature is not supported.
> > + *
> > + * PTE marker handling for hugetlb is a bit special, so for now
> > + * UFFDIO_POISON is not supported.
> > */
> > - if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE)) {
> > + if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE) ||
> > + uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
> > mmap_read_unlock(dst_mm);
> > return -EINVAL;
> > }
> > @@ -481,6 +530,9 @@ static __always_inline ssize_t mfill_atomic_pte(pmd_t *dst_pmd,
> > if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) {
> > return mfill_atomic_pte_continue(dst_pmd, dst_vma,
> > dst_addr, flags);
> > + } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
> > + return mfill_atomic_pte_poison(dst_pmd, dst_vma,
> > + dst_addr, flags);
> > }
> >
> > /*
> > @@ -702,6 +754,14 @@ ssize_t mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long start,
> > uffd_flags_set_mode(flags, MFILL_ATOMIC_CONTINUE));
> > }
> >
> > +ssize_t mfill_atomic_poison(struct mm_struct *dst_mm, unsigned long start,
> > + unsigned long len, atomic_t *mmap_changing,
> > + uffd_flags_t flags)
> > +{
> > + return mfill_atomic(dst_mm, start, 0, len, mmap_changing,
> > + uffd_flags_set_mode(flags, MFILL_ATOMIC_POISON));
> > +}
> > +
> > long uffd_wp_range(struct vm_area_struct *dst_vma,
> > unsigned long start, unsigned long len, bool enable_wp)
> > {
> > --
> > 2.41.0.255.g8b1d071c50-goog
> >
>
> --
> Peter Xu
>

2023-07-05 16:42:40

by James Houghton

[permalink] [raw]
Subject: Re: [PATCH v2 1/6] mm: userfaultfd: add new UFFDIO_POISON ioctl

On Thu, Jun 29, 2023 at 1:50 PM Axel Rasmussen <[email protected]> wrote:
>
> The basic idea here is to "simulate" memory poisoning for VMs. A VM
> running on some host might encounter a memory error, after which some
> page(s) are poisoned (i.e., future accesses SIGBUS). They expect that
> once poisoned, pages can never become "un-poisoned". So, when we live
> migrate the VM, we need to preserve the poisoned status of these pages.
>
> When live migrating, we try to get the guest running on its new host as
> quickly as possible. So, we start it running before all memory has been
> copied, and before we're certain which pages should be poisoned or not.
>
> So the basic way to use this new feature is:
>
> - On the new host, the guest's memory is registered with userfaultfd, in
> either MISSING or MINOR mode (doesn't really matter for this purpose).
> - On any first access, we get a userfaultfd event. At this point we can
> communicate with the old host to find out if the page was poisoned.
> - If so, we can respond with a UFFDIO_POISON - this places a swap marker
> so any future accesses will SIGBUS. Because the pte is now "present",
> future accesses won't generate more userfaultfd events, they'll just
> SIGBUS directly.
>
> UFFDIO_POISON does not handle unmapping previously-present PTEs. This
> isn't needed, because during live migration we want to intercept
> all accesses with userfaultfd (not just writes, so WP mode isn't useful
> for this). So whether minor or missing mode is being used (or both), the
> PTE won't be present in any case, so handling that case isn't needed.
>
> Why return VM_FAULT_HWPOISON instead of VM_FAULT_SIGBUS when one of
> these markers is encountered? For "normal" userspace programs there
> isn't a big difference, both yield a SIGBUS. The difference for KVM is
> key though: VM_FAULT_HWPOISON will result in an MCE being injected into
> the guest (which is the behavior we want). With VM_FAULT_SIGBUS, the
> hypervisor would need to catch the SIGBUS and deal with the MCE
> injection itself.

For a normal userspace program, the si_code will be different
(BUS_MCEERR_AR vs. BUS_ADRERR), maybe that's worth mentioning?

Also KVM doesn't inject an MCE automatically. When KVM_RUN gets
-EHWPOISON back from GUP (for x86 and arm64 anyway), KVM will queue up
a BUS_MCERR_AR signal for userspace and return 0 to userspace. Then
userspace may choose to inject an MCE. If we gave back VM_FAULT_SIGBUS
instead of VM_FAULT_HWPOISON, KVM_RUN would instead return -EFAULT to
userspace without queueing a signal. Today, without the signal, it
becomes impossible (I think) to determine what the appropriate MCE to
inject is.

>
> Signed-off-by: Axel Rasmussen <[email protected]>
> ---
> fs/userfaultfd.c | 63 ++++++++++++++++++++++++++++++++
> include/linux/swapops.h | 3 +-
> include/linux/userfaultfd_k.h | 4 ++
> include/uapi/linux/userfaultfd.h | 25 +++++++++++--
> mm/memory.c | 4 ++
> mm/userfaultfd.c | 62 ++++++++++++++++++++++++++++++-
> 6 files changed, 156 insertions(+), 5 deletions(-)
>
> diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
> index 7cecd49e078b..c26a883399c9 100644
> --- a/fs/userfaultfd.c
> +++ b/fs/userfaultfd.c
> @@ -1965,6 +1965,66 @@ static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg)
> return ret;
> }
>
> +static inline int userfaultfd_poison(struct userfaultfd_ctx *ctx, unsigned long arg)
> +{
> + __s64 ret;
> + struct uffdio_poison uffdio_poison;
> + struct uffdio_poison __user *user_uffdio_poison;
> + struct userfaultfd_wake_range range;
> +
> + user_uffdio_poison = (struct uffdio_poison __user *)arg;
> +
> + ret = -EAGAIN;
> + if (atomic_read(&ctx->mmap_changing))
> + goto out;
> +
> + ret = -EFAULT;
> + if (copy_from_user(&uffdio_poison, user_uffdio_poison,
> + /* don't copy the output fields */
> + sizeof(uffdio_poison) - (sizeof(__s64))))
> + goto out;
> +
> + ret = validate_range(ctx->mm, uffdio_poison.range.start,
> + uffdio_poison.range.len);
> + if (ret)
> + goto out;
> +
> + ret = -EINVAL;
> + /* double check for wraparound just in case. */
> + if (uffdio_poison.range.start + uffdio_poison.range.len <=
> + uffdio_poison.range.start) {
> + goto out;
> + }
> + if (uffdio_poison.mode & ~UFFDIO_POISON_MODE_DONTWAKE)
> + goto out;
> +
> + if (mmget_not_zero(ctx->mm)) {
> + ret = mfill_atomic_poison(ctx->mm, uffdio_poison.range.start,
> + uffdio_poison.range.len,
> + &ctx->mmap_changing, 0);
> + mmput(ctx->mm);
> + } else {
> + return -ESRCH;
> + }
> +
> + if (unlikely(put_user(ret, &user_uffdio_poison->updated)))
> + return -EFAULT;
> + if (ret < 0)
> + goto out;
> +
> + /* len == 0 would wake all */
> + BUG_ON(!ret);
> + range.len = ret;
> + if (!(uffdio_poison.mode & UFFDIO_POISON_MODE_DONTWAKE)) {
> + range.start = uffdio_poison.range.start;
> + wake_userfault(ctx, &range);
> + }
> + ret = range.len == uffdio_poison.range.len ? 0 : -EAGAIN;
> +
> +out:
> + return ret;
> +}
> +
> static inline unsigned int uffd_ctx_features(__u64 user_features)
> {
> /*
> @@ -2066,6 +2126,9 @@ static long userfaultfd_ioctl(struct file *file, unsigned cmd,
> case UFFDIO_CONTINUE:
> ret = userfaultfd_continue(ctx, arg);
> break;
> + case UFFDIO_POISON:
> + ret = userfaultfd_poison(ctx, arg);
> + break;
> }
> return ret;
> }
> diff --git a/include/linux/swapops.h b/include/linux/swapops.h
> index 4c932cb45e0b..8259fee32421 100644
> --- a/include/linux/swapops.h
> +++ b/include/linux/swapops.h
> @@ -394,7 +394,8 @@ typedef unsigned long pte_marker;
>
> #define PTE_MARKER_UFFD_WP BIT(0)
> #define PTE_MARKER_SWAPIN_ERROR BIT(1)
> -#define PTE_MARKER_MASK (BIT(2) - 1)
> +#define PTE_MARKER_UFFD_POISON BIT(2)
> +#define PTE_MARKER_MASK (BIT(3) - 1)
>
> static inline swp_entry_t make_pte_marker_entry(pte_marker marker)
> {
> diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
> index ac7b0c96d351..ac8c6854097c 100644
> --- a/include/linux/userfaultfd_k.h
> +++ b/include/linux/userfaultfd_k.h
> @@ -46,6 +46,7 @@ enum mfill_atomic_mode {
> MFILL_ATOMIC_COPY,
> MFILL_ATOMIC_ZEROPAGE,
> MFILL_ATOMIC_CONTINUE,
> + MFILL_ATOMIC_POISON,
> NR_MFILL_ATOMIC_MODES,
> };
>
> @@ -83,6 +84,9 @@ extern ssize_t mfill_atomic_zeropage(struct mm_struct *dst_mm,
> extern ssize_t mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long dst_start,
> unsigned long len, atomic_t *mmap_changing,
> uffd_flags_t flags);
> +extern ssize_t mfill_atomic_poison(struct mm_struct *dst_mm, unsigned long start,
> + unsigned long len, atomic_t *mmap_changing,
> + uffd_flags_t flags);
> extern int mwriteprotect_range(struct mm_struct *dst_mm,
> unsigned long start, unsigned long len,
> bool enable_wp, atomic_t *mmap_changing);
> diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h
> index 66dd4cd277bd..62151706c5a3 100644
> --- a/include/uapi/linux/userfaultfd.h
> +++ b/include/uapi/linux/userfaultfd.h
> @@ -39,7 +39,8 @@
> UFFD_FEATURE_MINOR_SHMEM | \
> UFFD_FEATURE_EXACT_ADDRESS | \
> UFFD_FEATURE_WP_HUGETLBFS_SHMEM | \
> - UFFD_FEATURE_WP_UNPOPULATED)
> + UFFD_FEATURE_WP_UNPOPULATED | \
> + UFFD_FEATURE_POISON)
> #define UFFD_API_IOCTLS \
> ((__u64)1 << _UFFDIO_REGISTER | \
> (__u64)1 << _UFFDIO_UNREGISTER | \
> @@ -49,12 +50,14 @@
> (__u64)1 << _UFFDIO_COPY | \
> (__u64)1 << _UFFDIO_ZEROPAGE | \
> (__u64)1 << _UFFDIO_WRITEPROTECT | \
> - (__u64)1 << _UFFDIO_CONTINUE)
> + (__u64)1 << _UFFDIO_CONTINUE | \
> + (__u64)1 << _UFFDIO_POISON)
> #define UFFD_API_RANGE_IOCTLS_BASIC \
> ((__u64)1 << _UFFDIO_WAKE | \
> (__u64)1 << _UFFDIO_COPY | \
> + (__u64)1 << _UFFDIO_WRITEPROTECT | \
> (__u64)1 << _UFFDIO_CONTINUE | \
> - (__u64)1 << _UFFDIO_WRITEPROTECT)
> + (__u64)1 << _UFFDIO_POISON)
>
> /*
> * Valid ioctl command number range with this API is from 0x00 to
> @@ -71,6 +74,7 @@
> #define _UFFDIO_ZEROPAGE (0x04)
> #define _UFFDIO_WRITEPROTECT (0x06)
> #define _UFFDIO_CONTINUE (0x07)
> +#define _UFFDIO_POISON (0x08)
> #define _UFFDIO_API (0x3F)
>
> /* userfaultfd ioctl ids */
> @@ -91,6 +95,8 @@
> struct uffdio_writeprotect)
> #define UFFDIO_CONTINUE _IOWR(UFFDIO, _UFFDIO_CONTINUE, \
> struct uffdio_continue)
> +#define UFFDIO_POISON _IOWR(UFFDIO, _UFFDIO_POISON, \
> + struct uffdio_poison)
>
> /* read() structure */
> struct uffd_msg {
> @@ -225,6 +231,7 @@ struct uffdio_api {
> #define UFFD_FEATURE_EXACT_ADDRESS (1<<11)
> #define UFFD_FEATURE_WP_HUGETLBFS_SHMEM (1<<12)
> #define UFFD_FEATURE_WP_UNPOPULATED (1<<13)
> +#define UFFD_FEATURE_POISON (1<<14)
> __u64 features;
>
> __u64 ioctls;
> @@ -321,6 +328,18 @@ struct uffdio_continue {
> __s64 mapped;
> };
>
> +struct uffdio_poison {
> + struct uffdio_range range;
> +#define UFFDIO_POISON_MODE_DONTWAKE ((__u64)1<<0)
> + __u64 mode;
> +
> + /*
> + * Fields below here are written by the ioctl and must be at the end:
> + * the copy_from_user will not read past here.
> + */
> + __s64 updated;
> +};
> +
> /*
> * Flags for the userfaultfd(2) system call itself.
> */
> diff --git a/mm/memory.c b/mm/memory.c
> index d8a9a770b1f1..7fbda39e060d 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -3692,6 +3692,10 @@ static vm_fault_t handle_pte_marker(struct vm_fault *vmf)
> if (WARN_ON_ONCE(!marker))
> return VM_FAULT_SIGBUS;
>
> + /* Poison emulation explicitly requested for this PTE. */
> + if (marker & PTE_MARKER_UFFD_POISON)
> + return VM_FAULT_HWPOISON;
> +
> /* Higher priority than uffd-wp when data corrupted */
> if (marker & PTE_MARKER_SWAPIN_ERROR)
> return VM_FAULT_SIGBUS;
> diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
> index a2bf37ee276d..87b62ca1e09e 100644
> --- a/mm/userfaultfd.c
> +++ b/mm/userfaultfd.c
> @@ -286,6 +286,51 @@ static int mfill_atomic_pte_continue(pmd_t *dst_pmd,
> goto out;
> }
>
> +/* Handles UFFDIO_POISON for all non-hugetlb VMAs. */
> +static int mfill_atomic_pte_poison(pmd_t *dst_pmd,
> + struct vm_area_struct *dst_vma,
> + unsigned long dst_addr,
> + uffd_flags_t flags)
> +{
> + int ret;
> + struct mm_struct *dst_mm = dst_vma->vm_mm;
> + pte_t _dst_pte, *dst_pte;
> + spinlock_t *ptl;
> +
> + _dst_pte = make_pte_marker(PTE_MARKER_UFFD_POISON);
> + dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
> +
> + if (vma_is_shmem(dst_vma)) {
> + struct inode *inode;
> + pgoff_t offset, max_off;
> +
> + /* serialize against truncate with the page table lock */
> + inode = dst_vma->vm_file->f_inode;
> + offset = linear_page_index(dst_vma, dst_addr);
> + max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
> + ret = -EFAULT;
> + if (unlikely(offset >= max_off))
> + goto out_unlock;
> + }
> +
> + ret = -EEXIST;
> + /*
> + * For now, we don't handle unmapping pages, so only support filling in
> + * none PTEs, or replacing PTE markers.
> + */
> + if (!pte_none_mostly(*dst_pte))
> + goto out_unlock;
> +
> + set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
> +
> + /* No need to invalidate - it was non-present before */
> + update_mmu_cache(dst_vma, dst_addr, dst_pte);
> + ret = 0;
> +out_unlock:
> + pte_unmap_unlock(dst_pte, ptl);
> + return ret;
> +}
> +
> static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
> {
> pgd_t *pgd;
> @@ -336,8 +381,12 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
> * supported by hugetlb. A PMD_SIZE huge pages may exist as used
> * by THP. Since we can not reliably insert a zero page, this
> * feature is not supported.
> + *
> + * PTE marker handling for hugetlb is a bit special, so for now
> + * UFFDIO_POISON is not supported.
> */
> - if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE)) {
> + if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE) ||
> + uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
> mmap_read_unlock(dst_mm);
> return -EINVAL;
> }
> @@ -481,6 +530,9 @@ static __always_inline ssize_t mfill_atomic_pte(pmd_t *dst_pmd,
> if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) {
> return mfill_atomic_pte_continue(dst_pmd, dst_vma,
> dst_addr, flags);
> + } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
> + return mfill_atomic_pte_poison(dst_pmd, dst_vma,
> + dst_addr, flags);
> }
>
> /*
> @@ -702,6 +754,14 @@ ssize_t mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long start,
> uffd_flags_set_mode(flags, MFILL_ATOMIC_CONTINUE));
> }
>
> +ssize_t mfill_atomic_poison(struct mm_struct *dst_mm, unsigned long start,
> + unsigned long len, atomic_t *mmap_changing,
> + uffd_flags_t flags)
> +{
> + return mfill_atomic(dst_mm, start, 0, len, mmap_changing,
> + uffd_flags_set_mode(flags, MFILL_ATOMIC_POISON));
> +}
> +
> long uffd_wp_range(struct vm_area_struct *dst_vma,
> unsigned long start, unsigned long len, bool enable_wp)
> {
> --
> 2.41.0.255.g8b1d071c50-goog
>

2023-07-05 16:43:01

by James Houghton

[permalink] [raw]
Subject: Re: [PATCH v2 1/6] mm: userfaultfd: add new UFFDIO_POISON ioctl

On Wed, Jul 5, 2023 at 9:15 AM Peter Xu <[email protected]> wrote:
>
> On Wed, Jul 05, 2023 at 09:09:19AM -0700, James Houghton wrote:
> > > > diff --git a/include/linux/swapops.h b/include/linux/swapops.h
> > > > index 4c932cb45e0b..8259fee32421 100644
> > > > --- a/include/linux/swapops.h
> > > > +++ b/include/linux/swapops.h
> > > > @@ -394,7 +394,8 @@ typedef unsigned long pte_marker;
> > > >
> > > > #define PTE_MARKER_UFFD_WP BIT(0)
> > > > #define PTE_MARKER_SWAPIN_ERROR BIT(1)
> > > > -#define PTE_MARKER_MASK (BIT(2) - 1)
> > > > +#define PTE_MARKER_UFFD_POISON BIT(2)
> > >
> > > One more tab.
> > >
> > > Though I remembered the last time we discussed IIRC we plan to rename
> > > SWAPIN_ERROR and reuse it, could you explain why a new bit is still needed?
> > >
> > > I think I commented this but I'll do it again: IIUC any existing host
> > > swapin errors for guest pages should be reported as MCE too, afaict,
> > > happened in kvm context.
> >
> > I think swapin errors are treated differently than poison. Swapin
> > errors get VM_FAULT_SIGBUS, and poison gets VM_FAULT_HWPOISON, so
> > UFFDIO_POISON should also get VM_FAULT_HWPOISON (so that's what Axel
> > has implemented). And I think that needs a separate PTE marker.
>
> My question was, should we also make SWAPIN_ERROR return VM_FAULT_HWPOISON
> always?
>
> Just to recap from what I already commented above - if a guest page got
> error in swapin due to block sector failures, it should be treated as
> VM_FAULT_HWPOISON too, IMHO. IOW, I think current SWAPIN_ERROR is wrong
> when in kvm context and we should fix it first.

Oh! Yes, I agree, though I'm not familiar enough with the users of
SWAPIN_ERROR to know if we can actually make this change.

2023-07-05 16:46:01

by Peter Xu

[permalink] [raw]
Subject: Re: [PATCH v2 1/6] mm: userfaultfd: add new UFFDIO_POISON ioctl

On Wed, Jul 05, 2023 at 09:27:15AM -0700, James Houghton wrote:
> On Wed, Jul 5, 2023 at 9:15 AM Peter Xu <[email protected]> wrote:
> >
> > On Wed, Jul 05, 2023 at 09:09:19AM -0700, James Houghton wrote:
> > > > > diff --git a/include/linux/swapops.h b/include/linux/swapops.h
> > > > > index 4c932cb45e0b..8259fee32421 100644
> > > > > --- a/include/linux/swapops.h
> > > > > +++ b/include/linux/swapops.h
> > > > > @@ -394,7 +394,8 @@ typedef unsigned long pte_marker;
> > > > >
> > > > > #define PTE_MARKER_UFFD_WP BIT(0)
> > > > > #define PTE_MARKER_SWAPIN_ERROR BIT(1)
> > > > > -#define PTE_MARKER_MASK (BIT(2) - 1)
> > > > > +#define PTE_MARKER_UFFD_POISON BIT(2)
> > > >
> > > > One more tab.
> > > >
> > > > Though I remembered the last time we discussed IIRC we plan to rename
> > > > SWAPIN_ERROR and reuse it, could you explain why a new bit is still needed?
> > > >
> > > > I think I commented this but I'll do it again: IIUC any existing host
> > > > swapin errors for guest pages should be reported as MCE too, afaict,
> > > > happened in kvm context.
> > >
> > > I think swapin errors are treated differently than poison. Swapin
> > > errors get VM_FAULT_SIGBUS, and poison gets VM_FAULT_HWPOISON, so
> > > UFFDIO_POISON should also get VM_FAULT_HWPOISON (so that's what Axel
> > > has implemented). And I think that needs a separate PTE marker.
> >
> > My question was, should we also make SWAPIN_ERROR return VM_FAULT_HWPOISON
> > always?
> >
> > Just to recap from what I already commented above - if a guest page got
> > error in swapin due to block sector failures, it should be treated as
> > VM_FAULT_HWPOISON too, IMHO. IOW, I think current SWAPIN_ERROR is wrong
> > when in kvm context and we should fix it first.
>
> Oh! Yes, I agree, though I'm not familiar enough with the users of
> SWAPIN_ERROR to know if we can actually make this change.

Miaohe initially proposed this swapin error facility, let's see whether he
can comment; he's already in the cc list.

AFAICT that's the right thing to do, and it shouldn't affect any existing
user of swapin error if there is.

Or say, VM_FAULT_HWPOISON should be the same as VM_FAULT_SIGBUS when not in
kvm context, so shouldn't change a thing in !kvm, while changing that
should fix kvm from crashing a guest where we shouldn't need to.

--
Peter Xu


2023-07-05 17:55:08

by Axel Rasmussen

[permalink] [raw]
Subject: Re: [PATCH v2 4/6] selftests/mm: refactor uffd_poll_thread to allow custom fault handlers

On Tue, Jul 4, 2023 at 2:03 PM Peter Xu <[email protected]> wrote:
>
> On Thu, Jun 29, 2023 at 01:50:38PM -0700, Axel Rasmussen wrote:
> > Previously, we had "one fault handler to rule them all", which used
> > several branches to deal with all of the scenarios required by all of
> > the various tests.
> >
> > In upcoming patches, I plan to add a new test, which has its own
> > slightly different fault handling logic. Instead of continuing to add
> > cruft to the existing fault handler, let's allow tests to define custom
> > ones, separate from other tests.
> >
> > Signed-off-by: Axel Rasmussen <[email protected]>
> > ---
> > tools/testing/selftests/mm/uffd-common.c | 5 ++++-
> > tools/testing/selftests/mm/uffd-common.h | 3 +++
> > tools/testing/selftests/mm/uffd-stress.c | 12 +++++++-----
> > 3 files changed, 14 insertions(+), 6 deletions(-)
> >
> > diff --git a/tools/testing/selftests/mm/uffd-common.c b/tools/testing/selftests/mm/uffd-common.c
> > index ba20d7504022..02b89860e193 100644
> > --- a/tools/testing/selftests/mm/uffd-common.c
> > +++ b/tools/testing/selftests/mm/uffd-common.c
> > @@ -499,6 +499,9 @@ void *uffd_poll_thread(void *arg)
> > int ret;
> > char tmp_chr;
> >
> > + if (!args->handle_fault)
> > + args->handle_fault = uffd_handle_page_fault;
> > +
> > pollfd[0].fd = uffd;
> > pollfd[0].events = POLLIN;
> > pollfd[1].fd = pipefd[cpu*2];
> > @@ -527,7 +530,7 @@ void *uffd_poll_thread(void *arg)
> > err("unexpected msg event %u\n", msg.event);
> > break;
> > case UFFD_EVENT_PAGEFAULT:
> > - uffd_handle_page_fault(&msg, args);
> > + args->handle_fault(&msg, args);
> > break;
> > case UFFD_EVENT_FORK:
> > close(uffd);
> > diff --git a/tools/testing/selftests/mm/uffd-common.h b/tools/testing/selftests/mm/uffd-common.h
> > index 197f5262fe0d..7c4fa964c3b0 100644
> > --- a/tools/testing/selftests/mm/uffd-common.h
> > +++ b/tools/testing/selftests/mm/uffd-common.h
> > @@ -77,6 +77,9 @@ struct uffd_args {
> > unsigned long missing_faults;
> > unsigned long wp_faults;
> > unsigned long minor_faults;
> > +
> > + /* A custom fault handler; defaults to uffd_handle_page_fault. */
> > + void (*handle_fault)(struct uffd_msg *msg, struct uffd_args *args);
> > };
> >
> > struct uffd_test_ops {
> > diff --git a/tools/testing/selftests/mm/uffd-stress.c b/tools/testing/selftests/mm/uffd-stress.c
> > index 995ff13e74c7..50b1224d72c7 100644
> > --- a/tools/testing/selftests/mm/uffd-stress.c
> > +++ b/tools/testing/selftests/mm/uffd-stress.c
> > @@ -189,10 +189,8 @@ static int stress(struct uffd_args *args)
> > locking_thread, (void *)cpu))
> > return 1;
> > if (bounces & BOUNCE_POLL) {
> > - if (pthread_create(&uffd_threads[cpu], &attr,
> > - uffd_poll_thread,
> > - (void *)&args[cpu]))
> > - return 1;
> > + if (pthread_create(&uffd_threads[cpu], &attr, uffd_poll_thread, &args[cpu]))
> > + err("uffd_poll_thread create");
>
> irrelevant change?

Right, I'll revert this. In an earlier version I had a more
substantial change here, and just didn't fully revert it.

>
> > } else {
> > if (pthread_create(&uffd_threads[cpu], &attr,
> > uffd_read_thread,
> > @@ -247,9 +245,13 @@ static int userfaultfd_stress(void)
> > {
> > void *area;
> > unsigned long nr;
> > - struct uffd_args args[nr_cpus];
> > + struct uffd_args *args;
> > uint64_t mem_size = nr_pages * page_size;
> >
> > + args = calloc(nr_cpus, sizeof(struct uffd_args));
> > + if (!args)
> > + err("allocating args array failed");
> > +
>
> It's leaked?
>
> Isn't "args[] = { 0 }" already working?

That works, but GCC can warn in this case (-Wmissing-braces) depending
on the definition of struct uffd_args. I liked switching to calloc
because it avoids any possibility of that even as we add/remove things
to struct uffd_args in the future.

Since it's a selftest and this function is only called exactly once,
it didn't seem worth the code making certain we free it, instead just
leaving it to be cleaned up when the process exits.

>
> Thanks,
>
> > if (uffd_test_ctx_init(UFFD_FEATURE_WP_UNPOPULATED, NULL))
> > err("context init failed");
> >
> > --
> > 2.41.0.255.g8b1d071c50-goog
> >
>
> --
> Peter Xu
>

2023-07-05 18:18:31

by Axel Rasmussen

[permalink] [raw]
Subject: Re: [PATCH v2 1/6] mm: userfaultfd: add new UFFDIO_POISON ioctl

On Wed, Jul 5, 2023 at 9:38 AM Peter Xu <[email protected]> wrote:
>
> On Wed, Jul 05, 2023 at 09:27:15AM -0700, James Houghton wrote:
> > On Wed, Jul 5, 2023 at 9:15 AM Peter Xu <[email protected]> wrote:
> > >
> > > On Wed, Jul 05, 2023 at 09:09:19AM -0700, James Houghton wrote:
> > > > > > diff --git a/include/linux/swapops.h b/include/linux/swapops.h
> > > > > > index 4c932cb45e0b..8259fee32421 100644
> > > > > > --- a/include/linux/swapops.h
> > > > > > +++ b/include/linux/swapops.h
> > > > > > @@ -394,7 +394,8 @@ typedef unsigned long pte_marker;
> > > > > >
> > > > > > #define PTE_MARKER_UFFD_WP BIT(0)
> > > > > > #define PTE_MARKER_SWAPIN_ERROR BIT(1)
> > > > > > -#define PTE_MARKER_MASK (BIT(2) - 1)
> > > > > > +#define PTE_MARKER_UFFD_POISON BIT(2)
> > > > >
> > > > > One more tab.
> > > > >
> > > > > Though I remembered the last time we discussed IIRC we plan to rename
> > > > > SWAPIN_ERROR and reuse it, could you explain why a new bit is still needed?
> > > > >
> > > > > I think I commented this but I'll do it again: IIUC any existing host
> > > > > swapin errors for guest pages should be reported as MCE too, afaict,
> > > > > happened in kvm context.
> > > >
> > > > I think swapin errors are treated differently than poison. Swapin
> > > > errors get VM_FAULT_SIGBUS, and poison gets VM_FAULT_HWPOISON, so
> > > > UFFDIO_POISON should also get VM_FAULT_HWPOISON (so that's what Axel
> > > > has implemented). And I think that needs a separate PTE marker.
> > >
> > > My question was, should we also make SWAPIN_ERROR return VM_FAULT_HWPOISON
> > > always?
> > >
> > > Just to recap from what I already commented above - if a guest page got
> > > error in swapin due to block sector failures, it should be treated as
> > > VM_FAULT_HWPOISON too, IMHO. IOW, I think current SWAPIN_ERROR is wrong
> > > when in kvm context and we should fix it first.
> >
> > Oh! Yes, I agree, though I'm not familiar enough with the users of
> > SWAPIN_ERROR to know if we can actually make this change.

Sorry I missed this, I'll take another pass looking at existing
SWAPIN_ERROR uses, and see if this can be done.

Thanks for the thorough review Peter, I'll address this comment and
the others in a v3. :)

>
> Miaohe initially proposed this swapin error facility, let's see whether he
> can comment; he's already in the cc list.
>
> AFAICT that's the right thing to do, and it shouldn't affect any existing
> user of swapin error if there is.
>
> Or say, VM_FAULT_HWPOISON should be the same as VM_FAULT_SIGBUS when not in
> kvm context, so shouldn't change a thing in !kvm, while changing that
> should fix kvm from crashing a guest where we shouldn't need to.
>
> --
> Peter Xu
>

2023-07-05 18:19:52

by Peter Xu

[permalink] [raw]
Subject: Re: [PATCH v2 4/6] selftests/mm: refactor uffd_poll_thread to allow custom fault handlers

On Wed, Jul 05, 2023 at 10:50:34AM -0700, Axel Rasmussen wrote:
> On Tue, Jul 4, 2023 at 2:03 PM Peter Xu <[email protected]> wrote:
> >
> > On Thu, Jun 29, 2023 at 01:50:38PM -0700, Axel Rasmussen wrote:
> > > Previously, we had "one fault handler to rule them all", which used
> > > several branches to deal with all of the scenarios required by all of
> > > the various tests.
> > >
> > > In upcoming patches, I plan to add a new test, which has its own
> > > slightly different fault handling logic. Instead of continuing to add
> > > cruft to the existing fault handler, let's allow tests to define custom
> > > ones, separate from other tests.
> > >
> > > Signed-off-by: Axel Rasmussen <[email protected]>
> > > ---
> > > tools/testing/selftests/mm/uffd-common.c | 5 ++++-
> > > tools/testing/selftests/mm/uffd-common.h | 3 +++
> > > tools/testing/selftests/mm/uffd-stress.c | 12 +++++++-----
> > > 3 files changed, 14 insertions(+), 6 deletions(-)
> > >
> > > diff --git a/tools/testing/selftests/mm/uffd-common.c b/tools/testing/selftests/mm/uffd-common.c
> > > index ba20d7504022..02b89860e193 100644
> > > --- a/tools/testing/selftests/mm/uffd-common.c
> > > +++ b/tools/testing/selftests/mm/uffd-common.c
> > > @@ -499,6 +499,9 @@ void *uffd_poll_thread(void *arg)
> > > int ret;
> > > char tmp_chr;
> > >
> > > + if (!args->handle_fault)
> > > + args->handle_fault = uffd_handle_page_fault;
> > > +
> > > pollfd[0].fd = uffd;
> > > pollfd[0].events = POLLIN;
> > > pollfd[1].fd = pipefd[cpu*2];
> > > @@ -527,7 +530,7 @@ void *uffd_poll_thread(void *arg)
> > > err("unexpected msg event %u\n", msg.event);
> > > break;
> > > case UFFD_EVENT_PAGEFAULT:
> > > - uffd_handle_page_fault(&msg, args);
> > > + args->handle_fault(&msg, args);
> > > break;
> > > case UFFD_EVENT_FORK:
> > > close(uffd);
> > > diff --git a/tools/testing/selftests/mm/uffd-common.h b/tools/testing/selftests/mm/uffd-common.h
> > > index 197f5262fe0d..7c4fa964c3b0 100644
> > > --- a/tools/testing/selftests/mm/uffd-common.h
> > > +++ b/tools/testing/selftests/mm/uffd-common.h
> > > @@ -77,6 +77,9 @@ struct uffd_args {
> > > unsigned long missing_faults;
> > > unsigned long wp_faults;
> > > unsigned long minor_faults;
> > > +
> > > + /* A custom fault handler; defaults to uffd_handle_page_fault. */
> > > + void (*handle_fault)(struct uffd_msg *msg, struct uffd_args *args);
> > > };
> > >
> > > struct uffd_test_ops {
> > > diff --git a/tools/testing/selftests/mm/uffd-stress.c b/tools/testing/selftests/mm/uffd-stress.c
> > > index 995ff13e74c7..50b1224d72c7 100644
> > > --- a/tools/testing/selftests/mm/uffd-stress.c
> > > +++ b/tools/testing/selftests/mm/uffd-stress.c
> > > @@ -189,10 +189,8 @@ static int stress(struct uffd_args *args)
> > > locking_thread, (void *)cpu))
> > > return 1;
> > > if (bounces & BOUNCE_POLL) {
> > > - if (pthread_create(&uffd_threads[cpu], &attr,
> > > - uffd_poll_thread,
> > > - (void *)&args[cpu]))
> > > - return 1;
> > > + if (pthread_create(&uffd_threads[cpu], &attr, uffd_poll_thread, &args[cpu]))
> > > + err("uffd_poll_thread create");
> >
> > irrelevant change?
>
> Right, I'll revert this. In an earlier version I had a more
> substantial change here, and just didn't fully revert it.
>
> >
> > > } else {
> > > if (pthread_create(&uffd_threads[cpu], &attr,
> > > uffd_read_thread,
> > > @@ -247,9 +245,13 @@ static int userfaultfd_stress(void)
> > > {
> > > void *area;
> > > unsigned long nr;
> > > - struct uffd_args args[nr_cpus];
> > > + struct uffd_args *args;
> > > uint64_t mem_size = nr_pages * page_size;
> > >
> > > + args = calloc(nr_cpus, sizeof(struct uffd_args));
> > > + if (!args)
> > > + err("allocating args array failed");
> > > +
> >
> > It's leaked?
> >
> > Isn't "args[] = { 0 }" already working?
>
> That works, but GCC can warn in this case (-Wmissing-braces) depending
> on the definition of struct uffd_args. I liked switching to calloc
> because it avoids any possibility of that even as we add/remove things
> to struct uffd_args in the future.

But afaict we also have other places using the static way, I hope we can
still keep them the same.

>
> Since it's a selftest and this function is only called exactly once,
> it didn't seem worth the code making certain we free it, instead just
> leaving it to be cleaned up when the process exits.

I'm also fine if you like to allocate it, but in that case please free it
even if used only once - we're already at it, no good reason to explicitly
leak it. Let's just assume if userfaultfd_stress() can succeed it can be
called 100000.. times without leaking anything.

Thanks,

--
Peter Xu


2023-07-05 18:33:58

by Jiaqi Yan

[permalink] [raw]
Subject: Re: [PATCH v2 1/6] mm: userfaultfd: add new UFFDIO_POISON ioctl

On Thu, Jun 29, 2023 at 1:50 PM Axel Rasmussen <[email protected]> wrote:
>
> The basic idea here is to "simulate" memory poisoning for VMs. A VM
> running on some host might encounter a memory error, after which some
> page(s) are poisoned (i.e., future accesses SIGBUS). They expect that
> once poisoned, pages can never become "un-poisoned". So, when we live
> migrate the VM, we need to preserve the poisoned status of these pages.
>
> When live migrating, we try to get the guest running on its new host as
> quickly as possible. So, we start it running before all memory has been
> copied, and before we're certain which pages should be poisoned or not.
>
> So the basic way to use this new feature is:
>
> - On the new host, the guest's memory is registered with userfaultfd, in
> either MISSING or MINOR mode (doesn't really matter for this purpose).
> - On any first access, we get a userfaultfd event. At this point we can
> communicate with the old host to find out if the page was poisoned.
> - If so, we can respond with a UFFDIO_POISON - this places a swap marker
> so any future accesses will SIGBUS. Because the pte is now "present",
> future accesses won't generate more userfaultfd events, they'll just
> SIGBUS directly.
>
> UFFDIO_POISON does not handle unmapping previously-present PTEs. This

A minor suggestion, would UFFDIO_HWPOISON be better? so that readers
won't be confused with CONFIG_PAGE_POISONING (a feature to fill the
pages with poison patterns after free).

> isn't needed, because during live migration we want to intercept
> all accesses with userfaultfd (not just writes, so WP mode isn't useful
> for this). So whether minor or missing mode is being used (or both), the
> PTE won't be present in any case, so handling that case isn't needed.
>
> Why return VM_FAULT_HWPOISON instead of VM_FAULT_SIGBUS when one of
> these markers is encountered? For "normal" userspace programs there
> isn't a big difference, both yield a SIGBUS. The difference for KVM is
> key though: VM_FAULT_HWPOISON will result in an MCE being injected into
> the guest (which is the behavior we want). With VM_FAULT_SIGBUS, the
> hypervisor would need to catch the SIGBUS and deal with the MCE
> injection itself.
>
> Signed-off-by: Axel Rasmussen <[email protected]>
> ---
> fs/userfaultfd.c | 63 ++++++++++++++++++++++++++++++++
> include/linux/swapops.h | 3 +-
> include/linux/userfaultfd_k.h | 4 ++
> include/uapi/linux/userfaultfd.h | 25 +++++++++++--
> mm/memory.c | 4 ++
> mm/userfaultfd.c | 62 ++++++++++++++++++++++++++++++-
> 6 files changed, 156 insertions(+), 5 deletions(-)
>
> diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
> index 7cecd49e078b..c26a883399c9 100644
> --- a/fs/userfaultfd.c
> +++ b/fs/userfaultfd.c
> @@ -1965,6 +1965,66 @@ static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg)
> return ret;
> }
>
> +static inline int userfaultfd_poison(struct userfaultfd_ctx *ctx, unsigned long arg)
> +{
> + __s64 ret;
> + struct uffdio_poison uffdio_poison;
> + struct uffdio_poison __user *user_uffdio_poison;
> + struct userfaultfd_wake_range range;
> +
> + user_uffdio_poison = (struct uffdio_poison __user *)arg;
> +
> + ret = -EAGAIN;
> + if (atomic_read(&ctx->mmap_changing))
> + goto out;
> +
> + ret = -EFAULT;
> + if (copy_from_user(&uffdio_poison, user_uffdio_poison,
> + /* don't copy the output fields */
> + sizeof(uffdio_poison) - (sizeof(__s64))))
> + goto out;
> +
> + ret = validate_range(ctx->mm, uffdio_poison.range.start,
> + uffdio_poison.range.len);
> + if (ret)
> + goto out;
> +
> + ret = -EINVAL;
> + /* double check for wraparound just in case. */
> + if (uffdio_poison.range.start + uffdio_poison.range.len <=
> + uffdio_poison.range.start) {
> + goto out;
> + }
> + if (uffdio_poison.mode & ~UFFDIO_POISON_MODE_DONTWAKE)
> + goto out;
> +
> + if (mmget_not_zero(ctx->mm)) {
> + ret = mfill_atomic_poison(ctx->mm, uffdio_poison.range.start,
> + uffdio_poison.range.len,
> + &ctx->mmap_changing, 0);
> + mmput(ctx->mm);
> + } else {
> + return -ESRCH;
> + }
> +
> + if (unlikely(put_user(ret, &user_uffdio_poison->updated)))
> + return -EFAULT;
> + if (ret < 0)
> + goto out;
> +
> + /* len == 0 would wake all */
> + BUG_ON(!ret);
> + range.len = ret;
> + if (!(uffdio_poison.mode & UFFDIO_POISON_MODE_DONTWAKE)) {
> + range.start = uffdio_poison.range.start;
> + wake_userfault(ctx, &range);
> + }
> + ret = range.len == uffdio_poison.range.len ? 0 : -EAGAIN;
> +
> +out:
> + return ret;
> +}
> +
> static inline unsigned int uffd_ctx_features(__u64 user_features)
> {
> /*
> @@ -2066,6 +2126,9 @@ static long userfaultfd_ioctl(struct file *file, unsigned cmd,
> case UFFDIO_CONTINUE:
> ret = userfaultfd_continue(ctx, arg);
> break;
> + case UFFDIO_POISON:
> + ret = userfaultfd_poison(ctx, arg);
> + break;
> }
> return ret;
> }
> diff --git a/include/linux/swapops.h b/include/linux/swapops.h
> index 4c932cb45e0b..8259fee32421 100644
> --- a/include/linux/swapops.h
> +++ b/include/linux/swapops.h
> @@ -394,7 +394,8 @@ typedef unsigned long pte_marker;
>
> #define PTE_MARKER_UFFD_WP BIT(0)
> #define PTE_MARKER_SWAPIN_ERROR BIT(1)
> -#define PTE_MARKER_MASK (BIT(2) - 1)
> +#define PTE_MARKER_UFFD_POISON BIT(2)
> +#define PTE_MARKER_MASK (BIT(3) - 1)
>
> static inline swp_entry_t make_pte_marker_entry(pte_marker marker)
> {
> diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
> index ac7b0c96d351..ac8c6854097c 100644
> --- a/include/linux/userfaultfd_k.h
> +++ b/include/linux/userfaultfd_k.h
> @@ -46,6 +46,7 @@ enum mfill_atomic_mode {
> MFILL_ATOMIC_COPY,
> MFILL_ATOMIC_ZEROPAGE,
> MFILL_ATOMIC_CONTINUE,
> + MFILL_ATOMIC_POISON,
> NR_MFILL_ATOMIC_MODES,
> };
>
> @@ -83,6 +84,9 @@ extern ssize_t mfill_atomic_zeropage(struct mm_struct *dst_mm,
> extern ssize_t mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long dst_start,
> unsigned long len, atomic_t *mmap_changing,
> uffd_flags_t flags);
> +extern ssize_t mfill_atomic_poison(struct mm_struct *dst_mm, unsigned long start,
> + unsigned long len, atomic_t *mmap_changing,
> + uffd_flags_t flags);
> extern int mwriteprotect_range(struct mm_struct *dst_mm,
> unsigned long start, unsigned long len,
> bool enable_wp, atomic_t *mmap_changing);
> diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h
> index 66dd4cd277bd..62151706c5a3 100644
> --- a/include/uapi/linux/userfaultfd.h
> +++ b/include/uapi/linux/userfaultfd.h
> @@ -39,7 +39,8 @@
> UFFD_FEATURE_MINOR_SHMEM | \
> UFFD_FEATURE_EXACT_ADDRESS | \
> UFFD_FEATURE_WP_HUGETLBFS_SHMEM | \
> - UFFD_FEATURE_WP_UNPOPULATED)
> + UFFD_FEATURE_WP_UNPOPULATED | \
> + UFFD_FEATURE_POISON)
> #define UFFD_API_IOCTLS \
> ((__u64)1 << _UFFDIO_REGISTER | \
> (__u64)1 << _UFFDIO_UNREGISTER | \
> @@ -49,12 +50,14 @@
> (__u64)1 << _UFFDIO_COPY | \
> (__u64)1 << _UFFDIO_ZEROPAGE | \
> (__u64)1 << _UFFDIO_WRITEPROTECT | \
> - (__u64)1 << _UFFDIO_CONTINUE)
> + (__u64)1 << _UFFDIO_CONTINUE | \
> + (__u64)1 << _UFFDIO_POISON)
> #define UFFD_API_RANGE_IOCTLS_BASIC \
> ((__u64)1 << _UFFDIO_WAKE | \
> (__u64)1 << _UFFDIO_COPY | \
> + (__u64)1 << _UFFDIO_WRITEPROTECT | \
> (__u64)1 << _UFFDIO_CONTINUE | \
> - (__u64)1 << _UFFDIO_WRITEPROTECT)
> + (__u64)1 << _UFFDIO_POISON)
>
> /*
> * Valid ioctl command number range with this API is from 0x00 to
> @@ -71,6 +74,7 @@
> #define _UFFDIO_ZEROPAGE (0x04)
> #define _UFFDIO_WRITEPROTECT (0x06)
> #define _UFFDIO_CONTINUE (0x07)
> +#define _UFFDIO_POISON (0x08)
> #define _UFFDIO_API (0x3F)
>
> /* userfaultfd ioctl ids */
> @@ -91,6 +95,8 @@
> struct uffdio_writeprotect)
> #define UFFDIO_CONTINUE _IOWR(UFFDIO, _UFFDIO_CONTINUE, \
> struct uffdio_continue)
> +#define UFFDIO_POISON _IOWR(UFFDIO, _UFFDIO_POISON, \
> + struct uffdio_poison)
>
> /* read() structure */
> struct uffd_msg {
> @@ -225,6 +231,7 @@ struct uffdio_api {
> #define UFFD_FEATURE_EXACT_ADDRESS (1<<11)
> #define UFFD_FEATURE_WP_HUGETLBFS_SHMEM (1<<12)
> #define UFFD_FEATURE_WP_UNPOPULATED (1<<13)
> +#define UFFD_FEATURE_POISON (1<<14)
> __u64 features;
>
> __u64 ioctls;
> @@ -321,6 +328,18 @@ struct uffdio_continue {
> __s64 mapped;
> };
>
> +struct uffdio_poison {
> + struct uffdio_range range;
> +#define UFFDIO_POISON_MODE_DONTWAKE ((__u64)1<<0)
> + __u64 mode;
> +
> + /*
> + * Fields below here are written by the ioctl and must be at the end:
> + * the copy_from_user will not read past here.
> + */
> + __s64 updated;
> +};
> +
> /*
> * Flags for the userfaultfd(2) system call itself.
> */
> diff --git a/mm/memory.c b/mm/memory.c
> index d8a9a770b1f1..7fbda39e060d 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -3692,6 +3692,10 @@ static vm_fault_t handle_pte_marker(struct vm_fault *vmf)
> if (WARN_ON_ONCE(!marker))
> return VM_FAULT_SIGBUS;
>
> + /* Poison emulation explicitly requested for this PTE. */
> + if (marker & PTE_MARKER_UFFD_POISON)
> + return VM_FAULT_HWPOISON;
> +
> /* Higher priority than uffd-wp when data corrupted */
> if (marker & PTE_MARKER_SWAPIN_ERROR)
> return VM_FAULT_SIGBUS;
> diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
> index a2bf37ee276d..87b62ca1e09e 100644
> --- a/mm/userfaultfd.c
> +++ b/mm/userfaultfd.c
> @@ -286,6 +286,51 @@ static int mfill_atomic_pte_continue(pmd_t *dst_pmd,
> goto out;
> }
>
> +/* Handles UFFDIO_POISON for all non-hugetlb VMAs. */
> +static int mfill_atomic_pte_poison(pmd_t *dst_pmd,
> + struct vm_area_struct *dst_vma,
> + unsigned long dst_addr,
> + uffd_flags_t flags)
> +{
> + int ret;
> + struct mm_struct *dst_mm = dst_vma->vm_mm;
> + pte_t _dst_pte, *dst_pte;
> + spinlock_t *ptl;
> +
> + _dst_pte = make_pte_marker(PTE_MARKER_UFFD_POISON);
> + dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
> +
> + if (vma_is_shmem(dst_vma)) {
> + struct inode *inode;
> + pgoff_t offset, max_off;
> +
> + /* serialize against truncate with the page table lock */
> + inode = dst_vma->vm_file->f_inode;
> + offset = linear_page_index(dst_vma, dst_addr);
> + max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
> + ret = -EFAULT;
> + if (unlikely(offset >= max_off))
> + goto out_unlock;
> + }
> +
> + ret = -EEXIST;
> + /*
> + * For now, we don't handle unmapping pages, so only support filling in
> + * none PTEs, or replacing PTE markers.
> + */
> + if (!pte_none_mostly(*dst_pte))
> + goto out_unlock;
> +
> + set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
> +
> + /* No need to invalidate - it was non-present before */
> + update_mmu_cache(dst_vma, dst_addr, dst_pte);
> + ret = 0;
> +out_unlock:
> + pte_unmap_unlock(dst_pte, ptl);
> + return ret;
> +}
> +
> static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
> {
> pgd_t *pgd;
> @@ -336,8 +381,12 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
> * supported by hugetlb. A PMD_SIZE huge pages may exist as used
> * by THP. Since we can not reliably insert a zero page, this
> * feature is not supported.
> + *
> + * PTE marker handling for hugetlb is a bit special, so for now
> + * UFFDIO_POISON is not supported.
> */
> - if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE)) {
> + if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE) ||
> + uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
> mmap_read_unlock(dst_mm);
> return -EINVAL;
> }
> @@ -481,6 +530,9 @@ static __always_inline ssize_t mfill_atomic_pte(pmd_t *dst_pmd,
> if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) {
> return mfill_atomic_pte_continue(dst_pmd, dst_vma,
> dst_addr, flags);
> + } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
> + return mfill_atomic_pte_poison(dst_pmd, dst_vma,
> + dst_addr, flags);
> }
>
> /*
> @@ -702,6 +754,14 @@ ssize_t mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long start,
> uffd_flags_set_mode(flags, MFILL_ATOMIC_CONTINUE));
> }
>
> +ssize_t mfill_atomic_poison(struct mm_struct *dst_mm, unsigned long start,
> + unsigned long len, atomic_t *mmap_changing,
> + uffd_flags_t flags)
> +{
> + return mfill_atomic(dst_mm, start, 0, len, mmap_changing,
> + uffd_flags_set_mode(flags, MFILL_ATOMIC_POISON));
> +}
> +
> long uffd_wp_range(struct vm_area_struct *dst_vma,
> unsigned long start, unsigned long len, bool enable_wp)
> {
> --
> 2.41.0.255.g8b1d071c50-goog
>

2023-07-05 18:38:15

by Axel Rasmussen

[permalink] [raw]
Subject: Re: [PATCH v2 1/6] mm: userfaultfd: add new UFFDIO_POISON ioctl

On Wed, Jul 5, 2023 at 11:18 AM Jiaqi Yan <[email protected]> wrote:
>
> On Thu, Jun 29, 2023 at 1:50 PM Axel Rasmussen <[email protected]> wrote:
> >
> > The basic idea here is to "simulate" memory poisoning for VMs. A VM
> > running on some host might encounter a memory error, after which some
> > page(s) are poisoned (i.e., future accesses SIGBUS). They expect that
> > once poisoned, pages can never become "un-poisoned". So, when we live
> > migrate the VM, we need to preserve the poisoned status of these pages.
> >
> > When live migrating, we try to get the guest running on its new host as
> > quickly as possible. So, we start it running before all memory has been
> > copied, and before we're certain which pages should be poisoned or not.
> >
> > So the basic way to use this new feature is:
> >
> > - On the new host, the guest's memory is registered with userfaultfd, in
> > either MISSING or MINOR mode (doesn't really matter for this purpose).
> > - On any first access, we get a userfaultfd event. At this point we can
> > communicate with the old host to find out if the page was poisoned.
> > - If so, we can respond with a UFFDIO_POISON - this places a swap marker
> > so any future accesses will SIGBUS. Because the pte is now "present",
> > future accesses won't generate more userfaultfd events, they'll just
> > SIGBUS directly.
> >
> > UFFDIO_POISON does not handle unmapping previously-present PTEs. This
>
> A minor suggestion, would UFFDIO_HWPOISON be better? so that readers
> won't be confused with CONFIG_PAGE_POISONING (a feature to fill the
> pages with poison patterns after free).

I was a bit wary of using "HWPOISON" because this is more like
"simulated" poisoning, not "real" hardware poisoning. :/

But, maybe folks feel that distinction is less important than the one
you've raised here Jiaqi. I don't have the strongest opinion.

>
> > isn't needed, because during live migration we want to intercept
> > all accesses with userfaultfd (not just writes, so WP mode isn't useful
> > for this). So whether minor or missing mode is being used (or both), the
> > PTE won't be present in any case, so handling that case isn't needed.
> >
> > Why return VM_FAULT_HWPOISON instead of VM_FAULT_SIGBUS when one of
> > these markers is encountered? For "normal" userspace programs there
> > isn't a big difference, both yield a SIGBUS. The difference for KVM is
> > key though: VM_FAULT_HWPOISON will result in an MCE being injected into
> > the guest (which is the behavior we want). With VM_FAULT_SIGBUS, the
> > hypervisor would need to catch the SIGBUS and deal with the MCE
> > injection itself.
> >
> > Signed-off-by: Axel Rasmussen <[email protected]>
> > ---
> > fs/userfaultfd.c | 63 ++++++++++++++++++++++++++++++++
> > include/linux/swapops.h | 3 +-
> > include/linux/userfaultfd_k.h | 4 ++
> > include/uapi/linux/userfaultfd.h | 25 +++++++++++--
> > mm/memory.c | 4 ++
> > mm/userfaultfd.c | 62 ++++++++++++++++++++++++++++++-
> > 6 files changed, 156 insertions(+), 5 deletions(-)
> >
> > diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
> > index 7cecd49e078b..c26a883399c9 100644
> > --- a/fs/userfaultfd.c
> > +++ b/fs/userfaultfd.c
> > @@ -1965,6 +1965,66 @@ static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg)
> > return ret;
> > }
> >
> > +static inline int userfaultfd_poison(struct userfaultfd_ctx *ctx, unsigned long arg)
> > +{
> > + __s64 ret;
> > + struct uffdio_poison uffdio_poison;
> > + struct uffdio_poison __user *user_uffdio_poison;
> > + struct userfaultfd_wake_range range;
> > +
> > + user_uffdio_poison = (struct uffdio_poison __user *)arg;
> > +
> > + ret = -EAGAIN;
> > + if (atomic_read(&ctx->mmap_changing))
> > + goto out;
> > +
> > + ret = -EFAULT;
> > + if (copy_from_user(&uffdio_poison, user_uffdio_poison,
> > + /* don't copy the output fields */
> > + sizeof(uffdio_poison) - (sizeof(__s64))))
> > + goto out;
> > +
> > + ret = validate_range(ctx->mm, uffdio_poison.range.start,
> > + uffdio_poison.range.len);
> > + if (ret)
> > + goto out;
> > +
> > + ret = -EINVAL;
> > + /* double check for wraparound just in case. */
> > + if (uffdio_poison.range.start + uffdio_poison.range.len <=
> > + uffdio_poison.range.start) {
> > + goto out;
> > + }
> > + if (uffdio_poison.mode & ~UFFDIO_POISON_MODE_DONTWAKE)
> > + goto out;
> > +
> > + if (mmget_not_zero(ctx->mm)) {
> > + ret = mfill_atomic_poison(ctx->mm, uffdio_poison.range.start,
> > + uffdio_poison.range.len,
> > + &ctx->mmap_changing, 0);
> > + mmput(ctx->mm);
> > + } else {
> > + return -ESRCH;
> > + }
> > +
> > + if (unlikely(put_user(ret, &user_uffdio_poison->updated)))
> > + return -EFAULT;
> > + if (ret < 0)
> > + goto out;
> > +
> > + /* len == 0 would wake all */
> > + BUG_ON(!ret);
> > + range.len = ret;
> > + if (!(uffdio_poison.mode & UFFDIO_POISON_MODE_DONTWAKE)) {
> > + range.start = uffdio_poison.range.start;
> > + wake_userfault(ctx, &range);
> > + }
> > + ret = range.len == uffdio_poison.range.len ? 0 : -EAGAIN;
> > +
> > +out:
> > + return ret;
> > +}
> > +
> > static inline unsigned int uffd_ctx_features(__u64 user_features)
> > {
> > /*
> > @@ -2066,6 +2126,9 @@ static long userfaultfd_ioctl(struct file *file, unsigned cmd,
> > case UFFDIO_CONTINUE:
> > ret = userfaultfd_continue(ctx, arg);
> > break;
> > + case UFFDIO_POISON:
> > + ret = userfaultfd_poison(ctx, arg);
> > + break;
> > }
> > return ret;
> > }
> > diff --git a/include/linux/swapops.h b/include/linux/swapops.h
> > index 4c932cb45e0b..8259fee32421 100644
> > --- a/include/linux/swapops.h
> > +++ b/include/linux/swapops.h
> > @@ -394,7 +394,8 @@ typedef unsigned long pte_marker;
> >
> > #define PTE_MARKER_UFFD_WP BIT(0)
> > #define PTE_MARKER_SWAPIN_ERROR BIT(1)
> > -#define PTE_MARKER_MASK (BIT(2) - 1)
> > +#define PTE_MARKER_UFFD_POISON BIT(2)
> > +#define PTE_MARKER_MASK (BIT(3) - 1)
> >
> > static inline swp_entry_t make_pte_marker_entry(pte_marker marker)
> > {
> > diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
> > index ac7b0c96d351..ac8c6854097c 100644
> > --- a/include/linux/userfaultfd_k.h
> > +++ b/include/linux/userfaultfd_k.h
> > @@ -46,6 +46,7 @@ enum mfill_atomic_mode {
> > MFILL_ATOMIC_COPY,
> > MFILL_ATOMIC_ZEROPAGE,
> > MFILL_ATOMIC_CONTINUE,
> > + MFILL_ATOMIC_POISON,
> > NR_MFILL_ATOMIC_MODES,
> > };
> >
> > @@ -83,6 +84,9 @@ extern ssize_t mfill_atomic_zeropage(struct mm_struct *dst_mm,
> > extern ssize_t mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long dst_start,
> > unsigned long len, atomic_t *mmap_changing,
> > uffd_flags_t flags);
> > +extern ssize_t mfill_atomic_poison(struct mm_struct *dst_mm, unsigned long start,
> > + unsigned long len, atomic_t *mmap_changing,
> > + uffd_flags_t flags);
> > extern int mwriteprotect_range(struct mm_struct *dst_mm,
> > unsigned long start, unsigned long len,
> > bool enable_wp, atomic_t *mmap_changing);
> > diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h
> > index 66dd4cd277bd..62151706c5a3 100644
> > --- a/include/uapi/linux/userfaultfd.h
> > +++ b/include/uapi/linux/userfaultfd.h
> > @@ -39,7 +39,8 @@
> > UFFD_FEATURE_MINOR_SHMEM | \
> > UFFD_FEATURE_EXACT_ADDRESS | \
> > UFFD_FEATURE_WP_HUGETLBFS_SHMEM | \
> > - UFFD_FEATURE_WP_UNPOPULATED)
> > + UFFD_FEATURE_WP_UNPOPULATED | \
> > + UFFD_FEATURE_POISON)
> > #define UFFD_API_IOCTLS \
> > ((__u64)1 << _UFFDIO_REGISTER | \
> > (__u64)1 << _UFFDIO_UNREGISTER | \
> > @@ -49,12 +50,14 @@
> > (__u64)1 << _UFFDIO_COPY | \
> > (__u64)1 << _UFFDIO_ZEROPAGE | \
> > (__u64)1 << _UFFDIO_WRITEPROTECT | \
> > - (__u64)1 << _UFFDIO_CONTINUE)
> > + (__u64)1 << _UFFDIO_CONTINUE | \
> > + (__u64)1 << _UFFDIO_POISON)
> > #define UFFD_API_RANGE_IOCTLS_BASIC \
> > ((__u64)1 << _UFFDIO_WAKE | \
> > (__u64)1 << _UFFDIO_COPY | \
> > + (__u64)1 << _UFFDIO_WRITEPROTECT | \
> > (__u64)1 << _UFFDIO_CONTINUE | \
> > - (__u64)1 << _UFFDIO_WRITEPROTECT)
> > + (__u64)1 << _UFFDIO_POISON)
> >
> > /*
> > * Valid ioctl command number range with this API is from 0x00 to
> > @@ -71,6 +74,7 @@
> > #define _UFFDIO_ZEROPAGE (0x04)
> > #define _UFFDIO_WRITEPROTECT (0x06)
> > #define _UFFDIO_CONTINUE (0x07)
> > +#define _UFFDIO_POISON (0x08)
> > #define _UFFDIO_API (0x3F)
> >
> > /* userfaultfd ioctl ids */
> > @@ -91,6 +95,8 @@
> > struct uffdio_writeprotect)
> > #define UFFDIO_CONTINUE _IOWR(UFFDIO, _UFFDIO_CONTINUE, \
> > struct uffdio_continue)
> > +#define UFFDIO_POISON _IOWR(UFFDIO, _UFFDIO_POISON, \
> > + struct uffdio_poison)
> >
> > /* read() structure */
> > struct uffd_msg {
> > @@ -225,6 +231,7 @@ struct uffdio_api {
> > #define UFFD_FEATURE_EXACT_ADDRESS (1<<11)
> > #define UFFD_FEATURE_WP_HUGETLBFS_SHMEM (1<<12)
> > #define UFFD_FEATURE_WP_UNPOPULATED (1<<13)
> > +#define UFFD_FEATURE_POISON (1<<14)
> > __u64 features;
> >
> > __u64 ioctls;
> > @@ -321,6 +328,18 @@ struct uffdio_continue {
> > __s64 mapped;
> > };
> >
> > +struct uffdio_poison {
> > + struct uffdio_range range;
> > +#define UFFDIO_POISON_MODE_DONTWAKE ((__u64)1<<0)
> > + __u64 mode;
> > +
> > + /*
> > + * Fields below here are written by the ioctl and must be at the end:
> > + * the copy_from_user will not read past here.
> > + */
> > + __s64 updated;
> > +};
> > +
> > /*
> > * Flags for the userfaultfd(2) system call itself.
> > */
> > diff --git a/mm/memory.c b/mm/memory.c
> > index d8a9a770b1f1..7fbda39e060d 100644
> > --- a/mm/memory.c
> > +++ b/mm/memory.c
> > @@ -3692,6 +3692,10 @@ static vm_fault_t handle_pte_marker(struct vm_fault *vmf)
> > if (WARN_ON_ONCE(!marker))
> > return VM_FAULT_SIGBUS;
> >
> > + /* Poison emulation explicitly requested for this PTE. */
> > + if (marker & PTE_MARKER_UFFD_POISON)
> > + return VM_FAULT_HWPOISON;
> > +
> > /* Higher priority than uffd-wp when data corrupted */
> > if (marker & PTE_MARKER_SWAPIN_ERROR)
> > return VM_FAULT_SIGBUS;
> > diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
> > index a2bf37ee276d..87b62ca1e09e 100644
> > --- a/mm/userfaultfd.c
> > +++ b/mm/userfaultfd.c
> > @@ -286,6 +286,51 @@ static int mfill_atomic_pte_continue(pmd_t *dst_pmd,
> > goto out;
> > }
> >
> > +/* Handles UFFDIO_POISON for all non-hugetlb VMAs. */
> > +static int mfill_atomic_pte_poison(pmd_t *dst_pmd,
> > + struct vm_area_struct *dst_vma,
> > + unsigned long dst_addr,
> > + uffd_flags_t flags)
> > +{
> > + int ret;
> > + struct mm_struct *dst_mm = dst_vma->vm_mm;
> > + pte_t _dst_pte, *dst_pte;
> > + spinlock_t *ptl;
> > +
> > + _dst_pte = make_pte_marker(PTE_MARKER_UFFD_POISON);
> > + dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
> > +
> > + if (vma_is_shmem(dst_vma)) {
> > + struct inode *inode;
> > + pgoff_t offset, max_off;
> > +
> > + /* serialize against truncate with the page table lock */
> > + inode = dst_vma->vm_file->f_inode;
> > + offset = linear_page_index(dst_vma, dst_addr);
> > + max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
> > + ret = -EFAULT;
> > + if (unlikely(offset >= max_off))
> > + goto out_unlock;
> > + }
> > +
> > + ret = -EEXIST;
> > + /*
> > + * For now, we don't handle unmapping pages, so only support filling in
> > + * none PTEs, or replacing PTE markers.
> > + */
> > + if (!pte_none_mostly(*dst_pte))
> > + goto out_unlock;
> > +
> > + set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
> > +
> > + /* No need to invalidate - it was non-present before */
> > + update_mmu_cache(dst_vma, dst_addr, dst_pte);
> > + ret = 0;
> > +out_unlock:
> > + pte_unmap_unlock(dst_pte, ptl);
> > + return ret;
> > +}
> > +
> > static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
> > {
> > pgd_t *pgd;
> > @@ -336,8 +381,12 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
> > * supported by hugetlb. A PMD_SIZE huge pages may exist as used
> > * by THP. Since we can not reliably insert a zero page, this
> > * feature is not supported.
> > + *
> > + * PTE marker handling for hugetlb is a bit special, so for now
> > + * UFFDIO_POISON is not supported.
> > */
> > - if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE)) {
> > + if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE) ||
> > + uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
> > mmap_read_unlock(dst_mm);
> > return -EINVAL;
> > }
> > @@ -481,6 +530,9 @@ static __always_inline ssize_t mfill_atomic_pte(pmd_t *dst_pmd,
> > if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) {
> > return mfill_atomic_pte_continue(dst_pmd, dst_vma,
> > dst_addr, flags);
> > + } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
> > + return mfill_atomic_pte_poison(dst_pmd, dst_vma,
> > + dst_addr, flags);
> > }
> >
> > /*
> > @@ -702,6 +754,14 @@ ssize_t mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long start,
> > uffd_flags_set_mode(flags, MFILL_ATOMIC_CONTINUE));
> > }
> >
> > +ssize_t mfill_atomic_poison(struct mm_struct *dst_mm, unsigned long start,
> > + unsigned long len, atomic_t *mmap_changing,
> > + uffd_flags_t flags)
> > +{
> > + return mfill_atomic(dst_mm, start, 0, len, mmap_changing,
> > + uffd_flags_set_mode(flags, MFILL_ATOMIC_POISON));
> > +}
> > +
> > long uffd_wp_range(struct vm_area_struct *dst_vma,
> > unsigned long start, unsigned long len, bool enable_wp)
> > {
> > --
> > 2.41.0.255.g8b1d071c50-goog
> >