2018-09-02 02:22:38

by Fengguang Wu

[permalink] [raw]
Subject: [RFC][PATCH 3/5] [PATCH 3/5] kvm-ept-idle: HVA indexed EPT read

For virtual machines, "accessed" bits will be set in guest page tables
and EPT/NPT. So for qemu-kvm process, convert HVA to GFN to GPA, then do
EPT/NPT walks. Thanks to the in-memslot linear HVA-GPA mapping, the conversion
can be done efficiently, outside of the loops for page table walks.

In this manner, we provide uniform interface for both virtual machines and
normal processes.

The use scenario would be per task/VM working set tracking and migration.
Very convenient for applying task/vma and VM granularity policies.

Signed-off-by: Peng DongX <[email protected]>
Signed-off-by: Fengguang Wu <[email protected]>
---
arch/x86/kvm/ept_idle.c | 118 ++++++++++++++++++++++++++++++++++++++++++++++++
arch/x86/kvm/ept_idle.h | 24 ++++++++++
2 files changed, 142 insertions(+)
create mode 100644 arch/x86/kvm/ept_idle.c
create mode 100644 arch/x86/kvm/ept_idle.h

diff --git a/arch/x86/kvm/ept_idle.c b/arch/x86/kvm/ept_idle.c
new file mode 100644
index 000000000000..5b97dd01011b
--- /dev/null
+++ b/arch/x86/kvm/ept_idle.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/proc_fs.h>
+#include <linux/uaccess.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/bitmap.h>
+
+#include "ept_idle.h"
+
+
+// mindless copy from kvm_handle_hva_range().
+// TODO: handle order and hole.
+static int ept_idle_walk_hva_range(struct ept_idle_ctrl *eic,
+ unsigned long start,
+ unsigned long end)
+{
+ struct kvm_memslots *slots;
+ struct kvm_memory_slot *memslot;
+ int ret = 0;
+
+ slots = kvm_memslots(eic->kvm);
+ kvm_for_each_memslot(memslot, slots) {
+ unsigned long hva_start, hva_end;
+ gfn_t gfn_start, gfn_end;
+
+ hva_start = max(start, memslot->userspace_addr);
+ hva_end = min(end, memslot->userspace_addr +
+ (memslot->npages << PAGE_SHIFT));
+ if (hva_start >= hva_end)
+ continue;
+ /*
+ * {gfn(page) | page intersects with [hva_start, hva_end)} =
+ * {gfn_start, gfn_start+1, ..., gfn_end-1}.
+ */
+ gfn_start = hva_to_gfn_memslot(hva_start, memslot);
+ gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
+
+ ret = ept_idle_walk_gfn_range(eic, gfn_start, gfn_end);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static ssize_t ept_idle_read(struct file *file, char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct task_struct *task = file->private_data;
+ struct ept_idle_ctrl *eic;
+ unsigned long hva_start = *ppos << BITMAP_BYTE2PVA_SHIFT;
+ unsigned long hva_end = hva_start + (count << BITMAP_BYTE2PVA_SHIFT);
+ int ret;
+
+ if (*ppos % IDLE_BITMAP_CHUNK_SIZE ||
+ count % IDLE_BITMAP_CHUNK_SIZE)
+ return -EINVAL;
+
+ eic = kzalloc(sizeof(*eic), GFP_KERNEL);
+ if (!eic)
+ return -EBUSY;
+
+ eic->buf = buf;
+ eic->buf_size = count;
+ eic->kvm = task_kvm(task);
+ if (!eic->kvm) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ ret = ept_idle_walk_hva_range(eic, hva_start, hva_end);
+ if (ret)
+ goto out_free;
+
+ ret = eic->bytes_copied;
+ *ppos += ret;
+out_free:
+ kfree(eic);
+
+ return ret;
+}
+
+static int ept_idle_open(struct inode *inode, struct file *file)
+{
+ if (!try_module_get(THIS_MODULE))
+ return -EBUSY;
+
+ return 0;
+}
+
+static int ept_idle_release(struct inode *inode, struct file *file)
+{
+ module_put(THIS_MODULE);
+ return 0;
+}
+
+extern struct file_operations proc_ept_idle_operations;
+
+static int ept_idle_entry(void)
+{
+ proc_ept_idle_operations.owner = THIS_MODULE;
+ proc_ept_idle_operations.read = ept_idle_read;
+ proc_ept_idle_operations.open = ept_idle_open;
+ proc_ept_idle_operations.release = ept_idle_release;
+
+ return 0;
+}
+
+static void ept_idle_exit(void)
+{
+ memset(&proc_ept_idle_operations, 0, sizeof(proc_ept_idle_operations));
+}
+
+MODULE_LICENSE("GPL");
+module_init(ept_idle_entry);
+module_exit(ept_idle_exit);
diff --git a/arch/x86/kvm/ept_idle.h b/arch/x86/kvm/ept_idle.h
new file mode 100644
index 000000000000..e0b9dcecf50b
--- /dev/null
+++ b/arch/x86/kvm/ept_idle.h
@@ -0,0 +1,24 @@
+#ifndef _EPT_IDLE_H
+#define _EPT_IDLE_H
+
+#define IDLE_BITMAP_CHUNK_SIZE sizeof(u64)
+#define IDLE_BITMAP_CHUNK_BITS (IDLE_BITMAP_CHUNK_SIZE * BITS_PER_BYTE)
+
+#define BITMAP_BYTE2PVA_SHIFT (3 + PAGE_SHIFT)
+
+#define EPT_IDLE_KBUF_FULL 1
+#define EPT_IDLE_KBUF_BYTES 8000
+#define EPT_IDLE_KBUF_BITS (EPT_IDLE_KBUF_BYTES * 8)
+
+struct ept_idle_ctrl {
+ struct kvm *kvm;
+
+ u64 kbuf[EPT_IDLE_KBUF_BITS / IDLE_BITMAP_CHUNK_BITS];
+ int bits_read;
+
+ void __user *buf;
+ int buf_size;
+ int bytes_copied;
+};
+
+#endif
--
2.15.0





2018-09-04 07:58:34

by Nikita Leshenko

[permalink] [raw]
Subject: Re: [RFC][PATCH 3/5] [PATCH 3/5] kvm-ept-idle: HVA indexed EPT read

On 1 Sep 2018, at 13:28, Fengguang Wu <[email protected]> wrote:
> +static ssize_t ept_idle_read(struct file *file, char *buf,
> + size_t count, loff_t *ppos)
> +{
> + struct task_struct *task = file->private_data;
> + struct ept_idle_ctrl *eic;
> + unsigned long hva_start = *ppos << BITMAP_BYTE2PVA_SHIFT;
> + unsigned long hva_end = hva_start + (count << BITMAP_BYTE2PVA_SHIFT);
> + int ret;
> +
> + if (*ppos % IDLE_BITMAP_CHUNK_SIZE ||
> + count % IDLE_BITMAP_CHUNK_SIZE)
> + return -EINVAL;
> +
> + eic = kzalloc(sizeof(*eic), GFP_KERNEL);
> + if (!eic)
> + return -EBUSY;
> +
> + eic->buf = buf;
> + eic->buf_size = count;
> + eic->kvm = task_kvm(task);
> + if (!eic->kvm) {
> + ret = -EINVAL;
> + goto out_free;
> + }
I think you need to increment the refcount while using kvm,
otherwise kvm can be destroyed from another thread while you're
walking it.

-Nikita
> +
> + ret = ept_idle_walk_hva_range(eic, hva_start, hva_end);
> + if (ret)
> + goto out_free;
> +
> + ret = eic->bytes_copied;
> + *ppos += ret;
> +out_free:
> + kfree(eic);
> +
> + return ret;
> +}

2018-09-04 08:13:57

by Peng, DongX

[permalink] [raw]
Subject: RE: [RFC][PATCH 3/5] [PATCH 3/5] kvm-ept-idle: HVA indexed EPT read

kvm_get_kvm() kvm_put_kvm()

-----Original Message-----
From: Nikita Leshenko [mailto:[email protected]]
Sent: Tuesday, September 4, 2018 3:57 PM
To: Wu, Fengguang <[email protected]>
Cc: Andrew Morton <[email protected]>; Linux Memory Management List <[email protected]>; Peng, DongX <[email protected]>; Liu, Jingqi <[email protected]>; Dong, Eddie <[email protected]>; Hansen, Dave <[email protected]>; Huang, Ying <[email protected]>; Brendan Gregg <[email protected]>; [email protected]; LKML <[email protected]>
Subject: Re: [RFC][PATCH 3/5] [PATCH 3/5] kvm-ept-idle: HVA indexed EPT read

On 1 Sep 2018, at 13:28, Fengguang Wu <[email protected]> wrote:
> +static ssize_t ept_idle_read(struct file *file, char *buf,
> + size_t count, loff_t *ppos)
> +{
> + struct task_struct *task = file->private_data;
> + struct ept_idle_ctrl *eic;
> + unsigned long hva_start = *ppos << BITMAP_BYTE2PVA_SHIFT;
> + unsigned long hva_end = hva_start + (count << BITMAP_BYTE2PVA_SHIFT);
> + int ret;
> +
> + if (*ppos % IDLE_BITMAP_CHUNK_SIZE ||
> + count % IDLE_BITMAP_CHUNK_SIZE)
> + return -EINVAL;
> +
> + eic = kzalloc(sizeof(*eic), GFP_KERNEL);
> + if (!eic)
> + return -EBUSY;
> +
> + eic->buf = buf;
> + eic->buf_size = count;
> + eic->kvm = task_kvm(task);
> + if (!eic->kvm) {
> + ret = -EINVAL;
> + goto out_free;
> + }
I think you need to increment the refcount while using kvm, otherwise kvm can be destroyed from another thread while you're walking it.

-Nikita
> +
> + ret = ept_idle_walk_hva_range(eic, hva_start, hva_end);
> + if (ret)
> + goto out_free;
> +
> + ret = eic->bytes_copied;
> + *ppos += ret;
> +out_free:
> + kfree(eic);
> +
> + return ret;
> +}

2018-09-04 08:16:36

by Fengguang Wu

[permalink] [raw]
Subject: Re: [RFC][PATCH 3/5] [PATCH 3/5] kvm-ept-idle: HVA indexed EPT read

Yeah thanks! Currently we are restructuring the related functions,
will add these calls when sorted out the walk order and hole issues.

Thanks,
Fengguang

On Tue, Sep 04, 2018 at 04:12:00PM +0800, Peng Dong wrote:
>kvm_get_kvm() kvm_put_kvm()
>
>-----Original Message-----
>From: Nikita Leshenko [mailto:[email protected]]
>Sent: Tuesday, September 4, 2018 3:57 PM
>To: Wu, Fengguang <[email protected]>
>Cc: Andrew Morton <[email protected]>; Linux Memory Management List <[email protected]>; Peng, DongX <[email protected]>; Liu, Jingqi <[email protected]>; Dong, Eddie <[email protected]>; Hansen, Dave <[email protected]>; Huang, Ying <[email protected]>; Brendan Gregg <[email protected]>; [email protected]; LKML <[email protected]>
>Subject: Re: [RFC][PATCH 3/5] [PATCH 3/5] kvm-ept-idle: HVA indexed EPT read
>
>On 1 Sep 2018, at 13:28, Fengguang Wu <[email protected]> wrote:
>> +static ssize_t ept_idle_read(struct file *file, char *buf,
>> + size_t count, loff_t *ppos)
>> +{
>> + struct task_struct *task = file->private_data;
>> + struct ept_idle_ctrl *eic;
>> + unsigned long hva_start = *ppos << BITMAP_BYTE2PVA_SHIFT;
>> + unsigned long hva_end = hva_start + (count << BITMAP_BYTE2PVA_SHIFT);
>> + int ret;
>> +
>> + if (*ppos % IDLE_BITMAP_CHUNK_SIZE ||
>> + count % IDLE_BITMAP_CHUNK_SIZE)
>> + return -EINVAL;
>> +
>> + eic = kzalloc(sizeof(*eic), GFP_KERNEL);
>> + if (!eic)
>> + return -EBUSY;
>> +
>> + eic->buf = buf;
>> + eic->buf_size = count;
>> + eic->kvm = task_kvm(task);
>> + if (!eic->kvm) {
>> + ret = -EINVAL;
>> + goto out_free;
>> + }
>I think you need to increment the refcount while using kvm, otherwise kvm can be destroyed from another thread while you're walking it.
>
>-Nikita
>> +
>> + ret = ept_idle_walk_hva_range(eic, hva_start, hva_end);
>> + if (ret)
>> + goto out_free;
>> +
>> + ret = eic->bytes_copied;
>> + *ppos += ret;
>> +out_free:
>> + kfree(eic);
>> +
>> + return ret;
>> +}
>