Hi,
here is the second version of my patch-set to fix a DMA
mapping size issue triggered by the virtio-blk driver.
The problem is a limitation of the SWIOTLB implementation,
which does not support allocations larger than 256kb. When
the virtio-blk driver tries to read/write a block larger
than that, the allocation of the dma-handle fails and an IO
error is reported.
v1 of the patch-set can be found here:
https://lore.kernel.org/lkml/[email protected]/
The change to v1 is that the maximum mapping size is now
officially propagated through the DMA-API, as suggested by
Christoph Hellwig.
Please review.
Thanks,
Joerg
Joerg Roedel (3):
swiotlb: Introduce swiotlb_max_mapping_size()
dma: Introduce dma_max_mapping_size()
virtio-blk: Consider dma_max_mapping_size() for maximum segment size
drivers/block/virtio_blk.c | 10 ++++++----
include/linux/dma-mapping.h | 16 ++++++++++++++++
include/linux/swiotlb.h | 5 +++++
kernel/dma/direct.c | 10 ++++++++++
kernel/dma/swiotlb.c | 5 +++++
5 files changed, 42 insertions(+), 4 deletions(-)
--
2.17.1
From: Joerg Roedel <[email protected]>
The function returns the maximum size that can be mapped
using DMA-API functions. The patch also adds the
implementation for direct DMA and a new dma_map_ops pointer
so that other implementations can expose their limit.
Signed-off-by: Joerg Roedel <[email protected]>
---
include/linux/dma-mapping.h | 16 ++++++++++++++++
kernel/dma/direct.c | 10 ++++++++++
2 files changed, 26 insertions(+)
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index f6ded992c183..a3ca8a71a704 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -130,6 +130,7 @@ struct dma_map_ops {
enum dma_data_direction direction);
int (*dma_supported)(struct device *dev, u64 mask);
u64 (*get_required_mask)(struct device *dev);
+ size_t (*max_mapping_size)(struct device *dev);
};
#define DMA_MAPPING_ERROR (~(dma_addr_t)0)
@@ -257,6 +258,8 @@ static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
}
#endif
+size_t dma_direct_max_mapping_size(struct device *dev);
+
#ifdef CONFIG_HAS_DMA
#include <asm/dma-mapping.h>
@@ -440,6 +443,19 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
return 0;
}
+static inline size_t dma_max_mapping_size(struct device *dev)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+ size_t size = SIZE_MAX;
+
+ if (dma_is_direct(ops))
+ size = dma_direct_max_mapping_size(dev);
+ else if (ops && ops->max_mapping_size)
+ size = ops->max_mapping_size(dev);
+
+ return size;
+}
+
void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t flag, unsigned long attrs);
void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 355d16acee6d..84917e1003c4 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -380,3 +380,13 @@ int dma_direct_supported(struct device *dev, u64 mask)
*/
return mask >= __phys_to_dma(dev, min_mask);
}
+
+size_t dma_direct_max_mapping_size(struct device *dev)
+{
+ /*
+ * Return the minimum of the direct DMA limit and the SWIOTLB limit.
+ * Since direct DMA has no limit, it is fine to just return the SWIOTLB
+ * limit.
+ */
+ return swiotlb_max_mapping_size(dev);
+}
--
2.17.1
> +size_t dma_direct_max_mapping_size(struct device *dev)
> +{
> + /*
> + * Return the minimum of the direct DMA limit and the SWIOTLB limit.
> + * Since direct DMA has no limit, it is fine to just return the SWIOTLB
> + * limit.
> + */
> + return swiotlb_max_mapping_size(dev);
Well, if we don't actually use the swiotlb buffers despite it being
compiled in or even allocated we don't need the limit.
From: Joerg Roedel <[email protected]>
The function returns the maximum size that can be remapped
by the SWIOTLB implementation. This function will be later
exposed to users through the DMA-API.
Signed-off-by: Joerg Roedel <[email protected]>
---
include/linux/swiotlb.h | 5 +++++
kernel/dma/swiotlb.c | 5 +++++
2 files changed, 10 insertions(+)
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 7c007ed7505f..ceb623321f38 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -62,6 +62,7 @@ extern void swiotlb_tbl_sync_single(struct device *hwdev,
extern int
swiotlb_dma_supported(struct device *hwdev, u64 mask);
+extern size_t swiotlb_max_mapping_size(struct device *dev);
#ifdef CONFIG_SWIOTLB
extern enum swiotlb_force swiotlb_force;
@@ -95,6 +96,10 @@ static inline unsigned int swiotlb_max_segment(void)
{
return 0;
}
+static inline size_t swiotlb_max_mapping_size(struct device *dev)
+{
+ return SIZE_MAX;
+}
#endif /* CONFIG_SWIOTLB */
extern void swiotlb_print_info(void);
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index d6361776dc5c..c950b3e9f683 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -660,3 +660,8 @@ swiotlb_dma_supported(struct device *hwdev, u64 mask)
{
return __phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
}
+
+size_t swiotlb_max_mapping_size(struct device *dev)
+{
+ return ((size_t)1 << IO_TLB_SHIFT) * IO_TLB_SEGSIZE;
+}
--
2.17.1
From: Joerg Roedel <[email protected]>
Segments can't be larger than the maximum DMA mapping size
supported on the platform. Take that into account when
setting the maximum segment size for a block device.
Signed-off-by: Joerg Roedel <[email protected]>
---
drivers/block/virtio_blk.c | 10 ++++++----
1 file changed, 6 insertions(+), 4 deletions(-)
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index b16a887bbd02..6193962a7fec 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -723,7 +723,7 @@ static int virtblk_probe(struct virtio_device *vdev)
struct request_queue *q;
int err, index;
- u32 v, blk_size, sg_elems, opt_io_size;
+ u32 v, blk_size, max_size, sg_elems, opt_io_size;
u16 min_io_size;
u8 physical_block_exp, alignment_offset;
@@ -826,14 +826,16 @@ static int virtblk_probe(struct virtio_device *vdev)
/* No real sector limit. */
blk_queue_max_hw_sectors(q, -1U);
+ max_size = dma_max_mapping_size(&vdev->dev);
+
/* Host can optionally specify maximum segment size and number of
* segments. */
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
struct virtio_blk_config, size_max, &v);
if (!err)
- blk_queue_max_segment_size(q, v);
- else
- blk_queue_max_segment_size(q, -1U);
+ max_size = min(max_size, v);
+
+ blk_queue_max_segment_size(q, max_size);
/* Host can optionally specify the block size of the device */
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
--
2.17.1
On Tue, Jan 15, 2019 at 02:37:54PM +0100, Christoph Hellwig wrote:
> > +size_t dma_direct_max_mapping_size(struct device *dev)
> > +{
> > + /*
> > + * Return the minimum of the direct DMA limit and the SWIOTLB limit.
> > + * Since direct DMA has no limit, it is fine to just return the SWIOTLB
> > + * limit.
> > + */
> > + return swiotlb_max_mapping_size(dev);
>
> Well, if we don't actually use the swiotlb buffers despite it being
> compiled in or even allocated we don't need the limit.
Right, I thought about that too, but didn't find a generic way to check
for all the cases. There are various checks that could be done:
1) Check if SWIOTLB is initialized at all, if not, return
SIZE_MAX as the limit. This can't be checked from dma-direct
code right now, but could be easily implemented.
2) Check for swiotlb=force needs to be done.
3) Check whether the device can access all of available RAM. I
have no idea how to check that in an architecture independent
way. It also has to take memory hotplug into account as well
as the DMA mask of the device.
An easy approximation could be to omit the limit if the
dma-mask covers all of the physical address bits available
on the platform. It would require to pass the dma-mask as an
additional parameter like it is done in dma_supported().
Any better ideas for how to implement 3)?
Regards,
Joerg
On Tue, Jan 15, 2019 at 05:23:22PM +0100, Joerg Roedel wrote:
> Right, I thought about that too, but didn't find a generic way to check
> for all the cases. There are various checks that could be done:
>
> 1) Check if SWIOTLB is initialized at all, if not, return
> SIZE_MAX as the limit. This can't be checked from dma-direct
> code right now, but could be easily implemented.
Yes, this is the low hanging fruit.
> 2) Check for swiotlb=force needs to be done.
>
> 3) Check whether the device can access all of available RAM. I
> have no idea how to check that in an architecture independent
> way. It also has to take memory hotplug into account as well
> as the DMA mask of the device.
>
> An easy approximation could be to omit the limit if the
> dma-mask covers all of the physical address bits available
> on the platform. It would require to pass the dma-mask as an
> additional parameter like it is done in dma_supported().
>
> Any better ideas for how to implement 3)?
And yeah, this is hard. So I'd just go for the low hanging fruit
for now and only implement 1) with a comment mentioning that
we are a little pessimistic.
On Tue, Jan 15, 2019 at 02:22:57PM +0100, Joerg Roedel wrote:
> From: Joerg Roedel <[email protected]>
>
> Segments can't be larger than the maximum DMA mapping size
> supported on the platform. Take that into account when
> setting the maximum segment size for a block device.
>
> Signed-off-by: Joerg Roedel <[email protected]>
> ---
> drivers/block/virtio_blk.c | 10 ++++++----
> 1 file changed, 6 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
> index b16a887bbd02..6193962a7fec 100644
> --- a/drivers/block/virtio_blk.c
> +++ b/drivers/block/virtio_blk.c
> @@ -723,7 +723,7 @@ static int virtblk_probe(struct virtio_device *vdev)
> struct request_queue *q;
> int err, index;
>
> - u32 v, blk_size, sg_elems, opt_io_size;
> + u32 v, blk_size, max_size, sg_elems, opt_io_size;
> u16 min_io_size;
> u8 physical_block_exp, alignment_offset;
>
> @@ -826,14 +826,16 @@ static int virtblk_probe(struct virtio_device *vdev)
> /* No real sector limit. */
> blk_queue_max_hw_sectors(q, -1U);
>
> + max_size = dma_max_mapping_size(&vdev->dev);
> +
Should this be limited to ACCESS_PLATFORM?
I see no reason to limit this without as guest can
access any memory.
I'd like a bit of time to consider this point.
> /* Host can optionally specify maximum segment size and number of
> * segments. */
> err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
> struct virtio_blk_config, size_max, &v);
> if (!err)
> - blk_queue_max_segment_size(q, v);
> - else
> - blk_queue_max_segment_size(q, -1U);
> + max_size = min(max_size, v);
> +
> + blk_queue_max_segment_size(q, max_size);
>
> /* Host can optionally specify the block size of the device */
> err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
> --
> 2.17.1
On Wed, Jan 16, 2019 at 09:05:40AM -0500, Michael S. Tsirkin wrote:
> On Tue, Jan 15, 2019 at 02:22:57PM +0100, Joerg Roedel wrote:
> > + max_size = dma_max_mapping_size(&vdev->dev);
> > +
>
>
> Should this be limited to ACCESS_PLATFORM?
>
> I see no reason to limit this without as guest can
> access any memory.
Actually, yes. This should be inside a use_dma_api check. I had it in
v1, but it went away without the vring layer for propagating the limit.
I'll add that again.
Thanks,
Joerg