2021-09-29 02:40:53

by David Stevens

[permalink] [raw]
Subject: [PATCH v8 0/7] Fixes for dma-iommu swiotlb bounce buffers

From: David Stevens <[email protected]>

This patch set includes various fixes for dma-iommu's swiotlb bounce
buffers for untrusted devices.

The min_align_mask issue was found when running fio on an untrusted nvme
device with bs=512. The other issues were found via code inspection, so
I don't have any specific use cases where things were not working, nor
any concrete performance numbers.

There are two issues related to min_align_mask that this patch series
does not attempt to fix. First, it does not address the case where
min_align_mask is larger than the IOVA granule. Doing so requires
changes to IOVA allocation, and is not specific to when swiotlb bounce
buffers are used. This is not a problem in practice today, since the
only driver which uses min_align_mask is nvme, which sets it to 4096.

The second issue this series does not address is the fact that extra
swiotlb slots adjacent to a bounce buffer can be exposed to untrusted
devices whose drivers use min_align_mask. Fixing this requires being
able to allocate padding slots at the beginning of a swiotlb allocation.
This is a rather significant change that I am not comfortable making.
Without being able to handle this, there is also little point to
clearing the padding at the start of such a buffer, since we can only
clear based on (IO_TLB_SIZE - 1) instead of iova_mask.

v7 -> v8:
- Rebase on v5.15-rc3 and resolve conflicts with restricted dma

v6 -> v7:
- Remove unsafe attempt to clear padding at start of swiotlb buffer
- Rewrite commit message for min_align_mask commit to better explain
the problem it's fixing
- Rebase on iommu/core
- Acknowledge unsolved issues in cover letter

v5 -> v6:
- Remove unnecessary line break
- Remove redundant config check

v4 -> v5:
- Fix xen build error
- Move _swiotlb refactor into its own patch

v3 -> v4:
- Fold _swiotlb functions into _page functions
- Add patch to align swiotlb buffer to iovad granule
- Combine if checks in iommu_dma_sync_sg_* functions

v2 -> v3:
- Add new patch to address min_align_mask bug
- Set SKIP_CPU_SYNC flag after syncing in map/unmap
- Properly call arch_sync_dma_for_cpu in iommu_dma_sync_sg_for_cpu

v1 -> v2:
- Split fixes into dedicated patches
- Less invasive changes to fix arch_sync when mapping
- Leave dev_is_untrusted check for strict iommu

David Stevens (7):
dma-iommu: fix sync_sg with swiotlb
dma-iommu: fix arch_sync_dma for map
dma-iommu: skip extra sync during unmap w/swiotlb
dma-iommu: fold _swiotlb helpers into callers
dma-iommu: Check CONFIG_SWIOTLB more broadly
swiotlb: support aligned swiotlb buffers
dma-iommu: account for min_align_mask w/swiotlb

drivers/iommu/dma-iommu.c | 188 +++++++++++++++++---------------------
drivers/xen/swiotlb-xen.c | 2 +-
include/linux/swiotlb.h | 3 +-
kernel/dma/swiotlb.c | 13 ++-
4 files changed, 94 insertions(+), 112 deletions(-)

--
2.33.0.685.g46640cef36-goog


2021-09-29 02:41:04

by David Stevens

[permalink] [raw]
Subject: [PATCH v8 4/7] dma-iommu: fold _swiotlb helpers into callers

From: David Stevens <[email protected]>

Fold the _swiotlb helper functions into the respective _page functions,
since recent fixes have moved all logic from the _page functions to the
_swiotlb functions.

Signed-off-by: David Stevens <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
Reviewed-by: Robin Murphy <[email protected]>
---
drivers/iommu/dma-iommu.c | 135 +++++++++++++++++---------------------
1 file changed, 59 insertions(+), 76 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 027b489714b7..4f77c44eaf14 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -510,26 +510,6 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
iommu_dma_free_iova(cookie, dma_addr, size, &iotlb_gather);
}

-static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- struct iommu_domain *domain = iommu_get_dma_domain(dev);
- phys_addr_t phys;
-
- phys = iommu_iova_to_phys(domain, dma_addr);
- if (WARN_ON(!phys))
- return;
-
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev))
- arch_sync_dma_for_cpu(phys, size, dir);
-
- __iommu_dma_unmap(dev, dma_addr, size);
-
- if (unlikely(is_swiotlb_buffer(dev, phys)))
- swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
-}
-
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
size_t size, int prot, u64 dma_mask)
{
@@ -556,55 +536,6 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
return iova + iova_off;
}

-static dma_addr_t __iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,
- size_t org_size, dma_addr_t dma_mask, bool coherent,
- enum dma_data_direction dir, unsigned long attrs)
-{
- int prot = dma_info_to_prot(dir, coherent, attrs);
- struct iommu_domain *domain = iommu_get_dma_domain(dev);
- struct iommu_dma_cookie *cookie = domain->iova_cookie;
- struct iova_domain *iovad = &cookie->iovad;
- size_t aligned_size = org_size;
- void *padding_start;
- size_t padding_size;
- dma_addr_t iova;
-
- /*
- * If both the physical buffer start address and size are
- * page aligned, we don't need to use a bounce page.
- */
- if (IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev) &&
- iova_offset(iovad, phys | org_size)) {
- aligned_size = iova_align(iovad, org_size);
- phys = swiotlb_tbl_map_single(dev, phys, org_size,
- aligned_size, dir, attrs);
-
- if (phys == DMA_MAPPING_ERROR)
- return DMA_MAPPING_ERROR;
-
- /* Cleanup the padding area. */
- padding_start = phys_to_virt(phys);
- padding_size = aligned_size;
-
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
- (dir == DMA_TO_DEVICE ||
- dir == DMA_BIDIRECTIONAL)) {
- padding_start += org_size;
- padding_size -= org_size;
- }
-
- memset(padding_start, 0, padding_size);
- }
-
- if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- arch_sync_dma_for_device(phys, org_size, dir);
-
- iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask);
- if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys))
- swiotlb_tbl_unmap_single(dev, phys, org_size, dir, attrs);
- return iova;
-}
-
static void __iommu_dma_free_pages(struct page **pages, int count)
{
while (count--)
@@ -866,15 +797,68 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
{
phys_addr_t phys = page_to_phys(page) + offset;
bool coherent = dev_is_dma_coherent(dev);
+ int prot = dma_info_to_prot(dir, coherent, attrs);
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
+ size_t aligned_size = size;
+ dma_addr_t iova, dma_mask = dma_get_mask(dev);
+
+ /*
+ * If both the physical buffer start address and size are
+ * page aligned, we don't need to use a bounce page.
+ */
+ if (IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev) &&
+ iova_offset(iovad, phys | size)) {
+ void *padding_start;
+ size_t padding_size;
+
+ aligned_size = iova_align(iovad, size);
+ phys = swiotlb_tbl_map_single(dev, phys, size,
+ aligned_size, dir, attrs);
+
+ if (phys == DMA_MAPPING_ERROR)
+ return DMA_MAPPING_ERROR;

- return __iommu_dma_map_swiotlb(dev, phys, size, dma_get_mask(dev),
- coherent, dir, attrs);
+ /* Cleanup the padding area. */
+ padding_start = phys_to_virt(phys);
+ padding_size = aligned_size;
+
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+ (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) {
+ padding_start += size;
+ padding_size -= size;
+ }
+
+ memset(padding_start, 0, padding_size);
+ }
+
+ if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ arch_sync_dma_for_device(phys, size, dir);
+
+ iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask);
+ if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys))
+ swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
+ return iova;
}

static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- __iommu_dma_unmap_swiotlb(dev, dma_handle, size, dir, attrs);
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ phys_addr_t phys;
+
+ phys = iommu_iova_to_phys(domain, dma_handle);
+ if (WARN_ON(!phys))
+ return;
+
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev))
+ arch_sync_dma_for_cpu(phys, size, dir);
+
+ __iommu_dma_unmap(dev, dma_handle, size);
+
+ if (unlikely(is_swiotlb_buffer(dev, phys)))
+ swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
}

/*
@@ -959,7 +943,7 @@ static void iommu_dma_unmap_sg_swiotlb(struct device *dev, struct scatterlist *s
int i;

for_each_sg(sg, s, nents, i)
- __iommu_dma_unmap_swiotlb(dev, sg_dma_address(s),
+ iommu_dma_unmap_page(dev, sg_dma_address(s),
sg_dma_len(s), dir, attrs);
}

@@ -970,9 +954,8 @@ static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg,
int i;

for_each_sg(sg, s, nents, i) {
- sg_dma_address(s) = __iommu_dma_map_swiotlb(dev, sg_phys(s),
- s->length, dma_get_mask(dev),
- dev_is_dma_coherent(dev), dir, attrs);
+ sg_dma_address(s) = iommu_dma_map_page(dev, sg_page(s),
+ s->offset, s->length, dir, attrs);
if (sg_dma_address(s) == DMA_MAPPING_ERROR)
goto out_unmap;
sg_dma_len(s) = s->length;
--
2.33.0.685.g46640cef36-goog

2021-09-29 02:42:22

by David Stevens

[permalink] [raw]
Subject: [PATCH v8 1/7] dma-iommu: fix sync_sg with swiotlb

From: David Stevens <[email protected]>

The is_swiotlb_buffer function takes the physical address of the swiotlb
buffer, not the physical address of the original buffer. The sglist
contains the physical addresses of the original buffer, so for the
sync_sg functions to work properly when a bounce buffer might have been
used, we need to use iommu_iova_to_phys to look up the physical address.
This is what sync_single does, so call that function on each sglist
segment.

The previous code mostly worked because swiotlb does the transfer on map
and unmap. However, any callers which use DMA_ATTR_SKIP_CPU_SYNC with
sglists or which call sync_sg would not have had anything copied to the
bounce buffer.

Fixes: 82612d66d51d ("iommu: Allow the dma-iommu api to use bounce buffers")
Signed-off-by: David Stevens <[email protected]>
Reviewed-by: Robin Murphy <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
---
drivers/iommu/dma-iommu.c | 33 +++++++++++++--------------------
1 file changed, 13 insertions(+), 20 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 896bea04c347..c4d205b63c58 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -828,17 +828,13 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sg;
int i;

- if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
- return;
-
- for_each_sg(sgl, sg, nelems, i) {
- if (!dev_is_dma_coherent(dev))
+ if (dev_is_untrusted(dev))
+ for_each_sg(sgl, sg, nelems, i)
+ iommu_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
+ sg->length, dir);
+ else if (!dev_is_dma_coherent(dev))
+ for_each_sg(sgl, sg, nelems, i)
arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
-
- if (is_swiotlb_buffer(dev, sg_phys(sg)))
- swiotlb_sync_single_for_cpu(dev, sg_phys(sg),
- sg->length, dir);
- }
}

static void iommu_dma_sync_sg_for_device(struct device *dev,
@@ -848,17 +844,14 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
struct scatterlist *sg;
int i;

- if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
- return;
-
- for_each_sg(sgl, sg, nelems, i) {
- if (is_swiotlb_buffer(dev, sg_phys(sg)))
- swiotlb_sync_single_for_device(dev, sg_phys(sg),
- sg->length, dir);
-
- if (!dev_is_dma_coherent(dev))
+ if (dev_is_untrusted(dev))
+ for_each_sg(sgl, sg, nelems, i)
+ iommu_dma_sync_single_for_device(dev,
+ sg_dma_address(sg),
+ sg->length, dir);
+ else if (!dev_is_dma_coherent(dev))
+ for_each_sg(sgl, sg, nelems, i)
arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
- }
}

static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
--
2.33.0.685.g46640cef36-goog