Register private memslot to fd-based memory backing store and handle the
memfile notifiers to zap the existing mappings.
Currently the register is happened at memslot creating time and the
initial support does not include page migration/swap.
KVM_MEM_PRIVATE is not exposed by default, architecture code can turn
on it by implementing kvm_arch_private_mem_supported().
A 'kvm' reference is added in memslot structure since in
memfile_notifier callbacks we can only obtain a memslot reference while
kvm is need to do the zapping. The zapping itself reuses code from
existing mmu notifier handling.
Co-developed-by: Yu Zhang <[email protected]>
Signed-off-by: Yu Zhang <[email protected]>
Signed-off-by: Chao Peng <[email protected]>
---
include/linux/kvm_host.h | 10 ++-
virt/kvm/kvm_main.c | 132 ++++++++++++++++++++++++++++++++++++---
2 files changed, 131 insertions(+), 11 deletions(-)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index b0a7910505ed..00efb4b96bc7 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -246,7 +246,7 @@ bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
#endif
-#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
+#if defined(KVM_ARCH_WANT_MMU_NOTIFIER) || defined(CONFIG_MEMFILE_NOTIFIER)
struct kvm_gfn_range {
struct kvm_memory_slot *slot;
gfn_t start;
@@ -577,6 +577,7 @@ struct kvm_memory_slot {
struct file *private_file;
loff_t private_offset;
struct memfile_notifier notifier;
+ struct kvm *kvm;
};
static inline bool kvm_slot_is_private(const struct kvm_memory_slot *slot)
@@ -769,9 +770,13 @@ struct kvm {
struct hlist_head irq_ack_notifier_list;
#endif
+#if (defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)) ||\
+ defined(CONFIG_MEMFILE_NOTIFIER)
+ unsigned long mmu_notifier_seq;
+#endif
+
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
struct mmu_notifier mmu_notifier;
- unsigned long mmu_notifier_seq;
long mmu_notifier_count;
unsigned long mmu_notifier_range_start;
unsigned long mmu_notifier_range_end;
@@ -1438,6 +1443,7 @@ bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu);
int kvm_arch_post_init_vm(struct kvm *kvm);
void kvm_arch_pre_destroy_vm(struct kvm *kvm);
int kvm_arch_create_vm_debugfs(struct kvm *kvm);
+bool kvm_arch_private_mem_supported(struct kvm *kvm);
#ifndef __KVM_HAVE_ARCH_VM_ALLOC
/*
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index db9d39a2d3a6..f93ac7cdfb53 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -843,6 +843,73 @@ static int kvm_init_mmu_notifier(struct kvm *kvm)
#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
+#ifdef CONFIG_HAVE_KVM_PRIVATE_MEM
+static void kvm_private_mem_notifier_handler(struct memfile_notifier *notifier,
+ pgoff_t start, pgoff_t end)
+{
+ int idx;
+ struct kvm_memory_slot *slot = container_of(notifier,
+ struct kvm_memory_slot,
+ notifier);
+ struct kvm_gfn_range gfn_range = {
+ .slot = slot,
+ .start = start - (slot->private_offset >> PAGE_SHIFT),
+ .end = end - (slot->private_offset >> PAGE_SHIFT),
+ .may_block = true,
+ };
+ struct kvm *kvm = slot->kvm;
+
+ gfn_range.start = slot->base_gfn + gfn_range.start;
+ gfn_range.end = slot->base_gfn + min((unsigned long)gfn_range.end, slot->npages);
+
+ if (WARN_ON_ONCE(gfn_range.start >= gfn_range.end))
+ return;
+
+ idx = srcu_read_lock(&kvm->srcu);
+ KVM_MMU_LOCK(kvm);
+ if (kvm_unmap_gfn_range(kvm, &gfn_range))
+ kvm_flush_remote_tlbs(kvm);
+ kvm->mmu_notifier_seq++;
+ KVM_MMU_UNLOCK(kvm);
+ srcu_read_unlock(&kvm->srcu, idx);
+}
+
+static struct memfile_notifier_ops kvm_private_mem_notifier_ops = {
+ .populate = kvm_private_mem_notifier_handler,
+ .invalidate = kvm_private_mem_notifier_handler,
+};
+
+#define KVM_MEMFILE_FLAGS MEMFILE_F_USER_INACCESSIBLE | \
+ MEMFILE_F_UNMOVABLE | \
+ MEMFILE_F_UNRECLAIMABLE
+
+static inline int kvm_private_mem_register(struct kvm_memory_slot *slot)
+{
+ slot->notifier.ops = &kvm_private_mem_notifier_ops;
+ return memfile_register_notifier(slot->private_file, KVM_MEMFILE_FLAGS,
+ &slot->notifier);
+}
+
+static inline void kvm_private_mem_unregister(struct kvm_memory_slot *slot)
+{
+ memfile_unregister_notifier(&slot->notifier);
+}
+
+#else /* !CONFIG_HAVE_KVM_PRIVATE_MEM */
+
+static inline int kvm_private_mem_register(struct kvm_memory_slot *slot)
+{
+ WARN_ON_ONCE(1);
+ return -EOPNOTSUPP;
+}
+
+static inline void kvm_private_mem_unregister(struct kvm_memory_slot *slot)
+{
+ WARN_ON_ONCE(1);
+}
+
+#endif /* CONFIG_HAVE_KVM_PRIVATE_MEM */
+
#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
static int kvm_pm_notifier_call(struct notifier_block *bl,
unsigned long state,
@@ -887,6 +954,11 @@ static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
/* This does not remove the slot from struct kvm_memslots data structures */
static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
{
+ if (slot->flags & KVM_MEM_PRIVATE) {
+ kvm_private_mem_unregister(slot);
+ fput(slot->private_file);
+ }
+
kvm_destroy_dirty_bitmap(slot);
kvm_arch_free_memslot(kvm, slot);
@@ -1437,10 +1509,21 @@ static void kvm_replace_memslot(struct kvm *kvm,
}
}
-static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem)
+bool __weak kvm_arch_private_mem_supported(struct kvm *kvm)
+{
+ return false;
+}
+
+static int check_memory_region_flags(struct kvm *kvm,
+ const struct kvm_user_mem_region *mem)
{
u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
+#ifdef CONFIG_HAVE_KVM_PRIVATE_MEM
+ if (kvm_arch_private_mem_supported(kvm))
+ valid_flags |= KVM_MEM_PRIVATE;
+#endif
+
#ifdef __KVM_HAVE_READONLY_MEM
valid_flags |= KVM_MEM_READONLY;
#endif
@@ -1516,6 +1599,12 @@ static int kvm_prepare_memory_region(struct kvm *kvm,
{
int r;
+ if (change == KVM_MR_CREATE && new->flags & KVM_MEM_PRIVATE) {
+ r = kvm_private_mem_register(new);
+ if (r)
+ return r;
+ }
+
/*
* If dirty logging is disabled, nullify the bitmap; the old bitmap
* will be freed on "commit". If logging is enabled in both old and
@@ -1544,6 +1633,9 @@ static int kvm_prepare_memory_region(struct kvm *kvm,
if (r && new && new->dirty_bitmap && old && !old->dirty_bitmap)
kvm_destroy_dirty_bitmap(new);
+ if (r && change == KVM_MR_CREATE && new->flags & KVM_MEM_PRIVATE)
+ kvm_private_mem_unregister(new);
+
return r;
}
@@ -1840,7 +1932,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
int as_id, id;
int r;
- r = check_memory_region_flags(mem);
+ r = check_memory_region_flags(kvm, mem);
if (r)
return r;
@@ -1859,6 +1951,10 @@ int __kvm_set_memory_region(struct kvm *kvm,
!access_ok((void __user *)(unsigned long)mem->userspace_addr,
mem->memory_size))
return -EINVAL;
+ if (mem->flags & KVM_MEM_PRIVATE &&
+ (mem->private_offset & (PAGE_SIZE - 1) ||
+ mem->private_offset > U64_MAX - mem->memory_size))
+ return -EINVAL;
if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
return -EINVAL;
if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
@@ -1897,6 +1993,9 @@ int __kvm_set_memory_region(struct kvm *kvm,
if ((kvm->nr_memslot_pages + npages) < kvm->nr_memslot_pages)
return -EINVAL;
} else { /* Modify an existing slot. */
+ /* Private memslots are immutable, they can only be deleted. */
+ if (mem->flags & KVM_MEM_PRIVATE)
+ return -EINVAL;
if ((mem->userspace_addr != old->userspace_addr) ||
(npages != old->npages) ||
((mem->flags ^ old->flags) & KVM_MEM_READONLY))
@@ -1925,10 +2024,27 @@ int __kvm_set_memory_region(struct kvm *kvm,
new->npages = npages;
new->flags = mem->flags;
new->userspace_addr = mem->userspace_addr;
+ if (mem->flags & KVM_MEM_PRIVATE) {
+ new->private_file = fget(mem->private_fd);
+ if (!new->private_file) {
+ r = -EINVAL;
+ goto out;
+ }
+ new->private_offset = mem->private_offset;
+ }
+
+ new->kvm = kvm;
r = kvm_set_memslot(kvm, old, new, change);
if (r)
- kfree(new);
+ goto out;
+
+ return 0;
+
+out:
+ if (new->private_file)
+ fput(new->private_file);
+ kfree(new);
return r;
}
EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
@@ -4512,12 +4628,10 @@ static long kvm_vm_ioctl(struct file *filp,
(u32 __user *)(argp + offsetof(typeof(mem), flags))))
goto out;
- if (flags & KVM_MEM_PRIVATE) {
- r = -EINVAL;
- goto out;
- }
-
- size = sizeof(struct kvm_userspace_memory_region);
+ if (flags & KVM_MEM_PRIVATE)
+ size = sizeof(struct kvm_userspace_memory_region_ext);
+ else
+ size = sizeof(struct kvm_userspace_memory_region);
if (copy_from_user(&mem, argp, size))
goto out;
--
2.25.1
On Thu, May 19, 2022 at 11:37:12PM +0800, Chao Peng wrote:
> Register private memslot to fd-based memory backing store and handle the
> memfile notifiers to zap the existing mappings.
>
> Currently the register is happened at memslot creating time and the
> initial support does not include page migration/swap.
>
> KVM_MEM_PRIVATE is not exposed by default, architecture code can turn
> on it by implementing kvm_arch_private_mem_supported().
>
> A 'kvm' reference is added in memslot structure since in
> memfile_notifier callbacks we can only obtain a memslot reference while
> kvm is need to do the zapping. The zapping itself reuses code from
> existing mmu notifier handling.
>
> Co-developed-by: Yu Zhang <[email protected]>
> Signed-off-by: Yu Zhang <[email protected]>
> Signed-off-by: Chao Peng <[email protected]>
> ---
> include/linux/kvm_host.h | 10 ++-
> virt/kvm/kvm_main.c | 132 ++++++++++++++++++++++++++++++++++++---
> 2 files changed, 131 insertions(+), 11 deletions(-)
>
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index b0a7910505ed..00efb4b96bc7 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -246,7 +246,7 @@ bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
> int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
> #endif
>
> -#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
> +#if defined(KVM_ARCH_WANT_MMU_NOTIFIER) || defined(CONFIG_MEMFILE_NOTIFIER)
> struct kvm_gfn_range {
> struct kvm_memory_slot *slot;
> gfn_t start;
> @@ -577,6 +577,7 @@ struct kvm_memory_slot {
> struct file *private_file;
> loff_t private_offset;
> struct memfile_notifier notifier;
> + struct kvm *kvm;
> };
>
> static inline bool kvm_slot_is_private(const struct kvm_memory_slot *slot)
> @@ -769,9 +770,13 @@ struct kvm {
> struct hlist_head irq_ack_notifier_list;
> #endif
>
> +#if (defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)) ||\
> + defined(CONFIG_MEMFILE_NOTIFIER)
> + unsigned long mmu_notifier_seq;
> +#endif
> +
> #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
> struct mmu_notifier mmu_notifier;
> - unsigned long mmu_notifier_seq;
> long mmu_notifier_count;
> unsigned long mmu_notifier_range_start;
> unsigned long mmu_notifier_range_end;
> @@ -1438,6 +1443,7 @@ bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu);
> int kvm_arch_post_init_vm(struct kvm *kvm);
> void kvm_arch_pre_destroy_vm(struct kvm *kvm);
> int kvm_arch_create_vm_debugfs(struct kvm *kvm);
> +bool kvm_arch_private_mem_supported(struct kvm *kvm);
>
> #ifndef __KVM_HAVE_ARCH_VM_ALLOC
> /*
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index db9d39a2d3a6..f93ac7cdfb53 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -843,6 +843,73 @@ static int kvm_init_mmu_notifier(struct kvm *kvm)
>
> #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
>
> +#ifdef CONFIG_HAVE_KVM_PRIVATE_MEM
> +static void kvm_private_mem_notifier_handler(struct memfile_notifier *notifier,
> + pgoff_t start, pgoff_t end)
> +{
> + int idx;
> + struct kvm_memory_slot *slot = container_of(notifier,
> + struct kvm_memory_slot,
> + notifier);
> + struct kvm_gfn_range gfn_range = {
> + .slot = slot,
> + .start = start - (slot->private_offset >> PAGE_SHIFT),
> + .end = end - (slot->private_offset >> PAGE_SHIFT),
This code assumes that 'end' is greater than slot->private_offset, but
even if slot->private_offset is non-zero, nothing stops userspace from
allocating pages in the range of 0 through slot->private_offset, which
will still end up triggering this notifier. In that case gfn_range.end
will end up going negative, and the below code will limit that to
slot->npages and do a populate/invalidate for the entire range.
Not sure if this covers all the cases, but this fixes the issue for me:
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 903ffdb5f01c..4c744d8f7527 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -872,6 +872,19 @@ static void kvm_private_mem_notifier_handler(struct memfile_notifier *notifier,
.may_block = true,
};
struct kvm *kvm = slot->kvm;
+
+ if (slot->private_offset > end)
+ return;
+
On Thu, Jun 23, 2022 at 05:07:51PM -0500, Michael Roth wrote:
...
> > diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> > index db9d39a2d3a6..f93ac7cdfb53 100644
> > --- a/virt/kvm/kvm_main.c
> > +++ b/virt/kvm/kvm_main.c
> > @@ -843,6 +843,73 @@ static int kvm_init_mmu_notifier(struct kvm *kvm)
> >
> > #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
> >
> > +#ifdef CONFIG_HAVE_KVM_PRIVATE_MEM
> > +static void kvm_private_mem_notifier_handler(struct memfile_notifier *notifier,
> > + pgoff_t start, pgoff_t end)
> > +{
> > + int idx;
> > + struct kvm_memory_slot *slot = container_of(notifier,
> > + struct kvm_memory_slot,
> > + notifier);
> > + struct kvm_gfn_range gfn_range = {
> > + .slot = slot,
> > + .start = start - (slot->private_offset >> PAGE_SHIFT),
> > + .end = end - (slot->private_offset >> PAGE_SHIFT),
>
> This code assumes that 'end' is greater than slot->private_offset, but
> even if slot->private_offset is non-zero, nothing stops userspace from
> allocating pages in the range of 0 through slot->private_offset, which
> will still end up triggering this notifier. In that case gfn_range.end
> will end up going negative, and the below code will limit that to
> slot->npages and do a populate/invalidate for the entire range.
>
> Not sure if this covers all the cases, but this fixes the issue for me:
Right, already noticed this issue, will fix in next version. Thanks.
>
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 903ffdb5f01c..4c744d8f7527 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -872,6 +872,19 @@ static void kvm_private_mem_notifier_handler(struct memfile_notifier *notifier,
> .may_block = true,
> };
>
> struct kvm *kvm = slot->kvm;
> +
> + if (slot->private_offset > end)
> + return;
> +
>