2021-02-19 16:18:09

by Daniel Jordan

[permalink] [raw]
Subject: [PATCH v2 2/3] vfio/type1: Prepare for batched pinning with struct vfio_batch

Get ready to pin more pages at once with struct vfio_batch, which
represents a batch of pinned pages.

The struct has a fallback page pointer to avoid two unlikely scenarios:
pointlessly allocating a page if disable_hugepages is enabled or failing
the whole pinning operation if the kernel can't allocate memory.

vaddr_get_pfn() becomes vaddr_get_pfns() to prepare for handling
multiple pages, though for now only one page is stored in the pages
array.

Signed-off-by: Daniel Jordan <[email protected]>
---
drivers/vfio/vfio_iommu_type1.c | 71 +++++++++++++++++++++++++++------
1 file changed, 58 insertions(+), 13 deletions(-)

diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 7abaaad518a6..b7247a2fc87e 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -103,6 +103,12 @@ struct vfio_dma {
unsigned long *bitmap;
};

+struct vfio_batch {
+ struct page **pages; /* for pin_user_pages_remote */
+ struct page *fallback_page; /* if pages alloc fails */
+ int capacity; /* length of pages array */
+};
+
struct vfio_group {
struct iommu_group *iommu_group;
struct list_head next;
@@ -459,6 +465,31 @@ static int put_pfn(unsigned long pfn, int prot)
return 0;
}

+#define VFIO_BATCH_MAX_CAPACITY (PAGE_SIZE / sizeof(struct page *))
+
+static void vfio_batch_init(struct vfio_batch *batch)
+{
+ if (unlikely(disable_hugepages))
+ goto fallback;
+
+ batch->pages = (struct page **) __get_free_page(GFP_KERNEL);
+ if (!batch->pages)
+ goto fallback;
+
+ batch->capacity = VFIO_BATCH_MAX_CAPACITY;
+ return;
+
+fallback:
+ batch->pages = &batch->fallback_page;
+ batch->capacity = 1;
+}
+
+static void vfio_batch_fini(struct vfio_batch *batch)
+{
+ if (batch->capacity == VFIO_BATCH_MAX_CAPACITY)
+ free_page((unsigned long)batch->pages);
+}
+
static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
unsigned long vaddr, unsigned long *pfn,
bool write_fault)
@@ -489,10 +520,10 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
* Returns the positive number of pfns successfully obtained or a negative
* error code.
*/
-static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
- int prot, unsigned long *pfn)
+static int vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr,
+ long npages, int prot, unsigned long *pfn,
+ struct page **pages)
{
- struct page *page[1];
struct vm_area_struct *vma;
unsigned int flags = 0;
int ret;
@@ -501,10 +532,10 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
flags |= FOLL_WRITE;

mmap_read_lock(mm);
- ret = pin_user_pages_remote(mm, vaddr, 1, flags | FOLL_LONGTERM,
- page, NULL, NULL);
- if (ret == 1) {
- *pfn = page_to_pfn(page[0]);
+ ret = pin_user_pages_remote(mm, vaddr, npages, flags | FOLL_LONGTERM,
+ pages, NULL, NULL);
+ if (ret > 0) {
+ *pfn = page_to_pfn(pages[0]);
goto done;
}

@@ -592,7 +623,7 @@ static int vfio_wait_all_valid(struct vfio_iommu *iommu)
*/
static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
long npage, unsigned long *pfn_base,
- unsigned long limit)
+ unsigned long limit, struct vfio_batch *batch)
{
unsigned long pfn = 0;
long ret, pinned = 0, lock_acct = 0;
@@ -603,7 +634,8 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
if (!current->mm)
return -ENODEV;

- ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, pfn_base);
+ ret = vaddr_get_pfns(current->mm, vaddr, 1, dma->prot, pfn_base,
+ batch->pages);
if (ret < 0)
return ret;

@@ -630,7 +662,8 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
/* Lock all the consecutive pages from pfn_base */
for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; pinned < npage;
pinned++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) {
- ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, &pfn);
+ ret = vaddr_get_pfns(current->mm, vaddr, 1, dma->prot, &pfn,
+ batch->pages);
if (ret < 0)
break;

@@ -693,6 +726,7 @@ static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova,
static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
unsigned long *pfn_base, bool do_accounting)
{
+ struct page *pages[1];
struct mm_struct *mm;
int ret;

@@ -700,7 +734,7 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
if (!mm)
return -ENODEV;

- ret = vaddr_get_pfn(mm, vaddr, dma->prot, pfn_base);
+ ret = vaddr_get_pfns(mm, vaddr, 1, dma->prot, pfn_base, pages);
if (ret == 1 && do_accounting && !is_invalid_reserved_pfn(*pfn_base)) {
ret = vfio_lock_acct(dma, 1, true);
if (ret) {
@@ -1394,15 +1428,19 @@ static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma,
{
dma_addr_t iova = dma->iova;
unsigned long vaddr = dma->vaddr;
+ struct vfio_batch batch;
size_t size = map_size;
long npage;
unsigned long pfn, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
int ret = 0;

+ vfio_batch_init(&batch);
+
while (size) {
/* Pin a contiguous chunk of memory */
npage = vfio_pin_pages_remote(dma, vaddr + dma->size,
- size >> PAGE_SHIFT, &pfn, limit);
+ size >> PAGE_SHIFT, &pfn, limit,
+ &batch);
if (npage <= 0) {
WARN_ON(!npage);
ret = (int)npage;
@@ -1422,6 +1460,7 @@ static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma,
dma->size += npage << PAGE_SHIFT;
}

+ vfio_batch_fini(&batch);
dma->iommu_mapped = true;

if (ret)
@@ -1598,6 +1637,7 @@ static int vfio_bus_type(struct device *dev, void *data)
static int vfio_iommu_replay(struct vfio_iommu *iommu,
struct vfio_domain *domain)
{
+ struct vfio_batch batch;
struct vfio_domain *d = NULL;
struct rb_node *n;
unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
@@ -1612,6 +1652,8 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
d = list_first_entry(&iommu->domain_list,
struct vfio_domain, next);

+ vfio_batch_init(&batch);
+
n = rb_first(&iommu->dma_list);

for (; n; n = rb_next(n)) {
@@ -1659,7 +1701,8 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,

npage = vfio_pin_pages_remote(dma, vaddr,
n >> PAGE_SHIFT,
- &pfn, limit);
+ &pfn, limit,
+ &batch);
if (npage <= 0) {
WARN_ON(!npage);
ret = (int)npage;
@@ -1692,6 +1735,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
dma->iommu_mapped = true;
}

+ vfio_batch_fini(&batch);
return 0;

unwind:
@@ -1732,6 +1776,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
}
}

+ vfio_batch_fini(&batch);
return ret;
}

--
2.30.1