On architectures that have a larger dma_addr_t than phys_addr_t,
the swiotlb_tbl_map_single() function truncates its return code
in the failure path, making it impossible to identify the error
later, as we compare to the original value:
kernel/dma/swiotlb.c:551:9: error: implicit conversion from 'dma_addr_t' (aka 'unsigned long long') to 'phys_addr_t' (aka 'unsigned int') changes value from 18446744073709551615 to 4294967295 [-Werror,-Wconstant-conversion]
return DMA_MAPPING_ERROR;
Use an explicit typecast here to convert it to the narrower type,
and use the same expression in the error handling later.
Fixes: b907e20508d0 ("swiotlb: remove SWIOTLB_MAP_ERROR")
Signed-off-by: Arnd Bergmann <[email protected]>
---
I still think that reverting the original commit would have
provided clearer semantics for this corner case, but at least
this patch restores the correct behavior.
---
drivers/xen/swiotlb-xen.c | 2 +-
kernel/dma/swiotlb.c | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index d53f3493a6b9..cfbe46785a3b 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -402,7 +402,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir,
attrs);
- if (map == DMA_MAPPING_ERROR)
+ if (map == (phys_addr_t)DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR;
dev_addr = xen_phys_to_bus(map);
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index e906ef2e6315..a3be651973ad 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -548,7 +548,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
size, io_tlb_nslabs, tmp_io_tlb_used);
- return DMA_MAPPING_ERROR;
+ return (phys_addr_t)DMA_MAPPING_ERROR;
found:
io_tlb_used += nslots;
spin_unlock_irqrestore(&io_tlb_lock, flags);
@@ -666,7 +666,7 @@ bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
/* Oh well, have to allocate and map a bounce buffer. */
*phys = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start),
*phys, size, dir, attrs);
- if (*phys == DMA_MAPPING_ERROR)
+ if (*phys == (phys_addr_t)DMA_MAPPING_ERROR)
return false;
/* Ensure that the address returned is DMA'ble */
--
2.20.0
On Mon, 17 Jun 2019, Arnd Bergmann wrote:
> On architectures that have a larger dma_addr_t than phys_addr_t,
> the swiotlb_tbl_map_single() function truncates its return code
> in the failure path, making it impossible to identify the error
> later, as we compare to the original value:
>
> kernel/dma/swiotlb.c:551:9: error: implicit conversion from 'dma_addr_t' (aka 'unsigned long long') to 'phys_addr_t' (aka 'unsigned int') changes value from 18446744073709551615 to 4294967295 [-Werror,-Wconstant-conversion]
> return DMA_MAPPING_ERROR;
>
> Use an explicit typecast here to convert it to the narrower type,
> and use the same expression in the error handling later.
>
> Fixes: b907e20508d0 ("swiotlb: remove SWIOTLB_MAP_ERROR")
> Signed-off-by: Arnd Bergmann <[email protected]>
Acked-by: Stefano Stabellini <[email protected]>
> ---
> I still think that reverting the original commit would have
> provided clearer semantics for this corner case, but at least
> this patch restores the correct behavior.
> ---
> drivers/xen/swiotlb-xen.c | 2 +-
> kernel/dma/swiotlb.c | 4 ++--
> 2 files changed, 3 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
> index d53f3493a6b9..cfbe46785a3b 100644
> --- a/drivers/xen/swiotlb-xen.c
> +++ b/drivers/xen/swiotlb-xen.c
> @@ -402,7 +402,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
>
> map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir,
> attrs);
> - if (map == DMA_MAPPING_ERROR)
> + if (map == (phys_addr_t)DMA_MAPPING_ERROR)
> return DMA_MAPPING_ERROR;
>
> dev_addr = xen_phys_to_bus(map);
> diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
> index e906ef2e6315..a3be651973ad 100644
> --- a/kernel/dma/swiotlb.c
> +++ b/kernel/dma/swiotlb.c
> @@ -548,7 +548,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
> if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
> dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
> size, io_tlb_nslabs, tmp_io_tlb_used);
> - return DMA_MAPPING_ERROR;
> + return (phys_addr_t)DMA_MAPPING_ERROR;
> found:
> io_tlb_used += nslots;
> spin_unlock_irqrestore(&io_tlb_lock, flags);
> @@ -666,7 +666,7 @@ bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
> /* Oh well, have to allocate and map a bounce buffer. */
> *phys = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start),
> *phys, size, dir, attrs);
> - if (*phys == DMA_MAPPING_ERROR)
> + if (*phys == (phys_addr_t)DMA_MAPPING_ERROR)
> return false;
>
> /* Ensure that the address returned is DMA'ble */
> --
> 2.20.0
>
On Mon, Jun 17, 2019 at 09:13:16AM -0700, Stefano Stabellini wrote:
> On Mon, 17 Jun 2019, Arnd Bergmann wrote:
> > On architectures that have a larger dma_addr_t than phys_addr_t,
> > the swiotlb_tbl_map_single() function truncates its return code
> > in the failure path, making it impossible to identify the error
> > later, as we compare to the original value:
> >
> > kernel/dma/swiotlb.c:551:9: error: implicit conversion from 'dma_addr_t' (aka 'unsigned long long') to 'phys_addr_t' (aka 'unsigned int') changes value from 18446744073709551615 to 4294967295 [-Werror,-Wconstant-conversion]
> > return DMA_MAPPING_ERROR;
> >
> > Use an explicit typecast here to convert it to the narrower type,
> > and use the same expression in the error handling later.
> >
> > Fixes: b907e20508d0 ("swiotlb: remove SWIOTLB_MAP_ERROR")
> > Signed-off-by: Arnd Bergmann <[email protected]>
>
> Acked-by: Stefano Stabellini <[email protected]>
queued.
>
>
> > ---
> > I still think that reverting the original commit would have
> > provided clearer semantics for this corner case, but at least
> > this patch restores the correct behavior.
> > ---
> > drivers/xen/swiotlb-xen.c | 2 +-
> > kernel/dma/swiotlb.c | 4 ++--
> > 2 files changed, 3 insertions(+), 3 deletions(-)
> >
> > diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
> > index d53f3493a6b9..cfbe46785a3b 100644
> > --- a/drivers/xen/swiotlb-xen.c
> > +++ b/drivers/xen/swiotlb-xen.c
> > @@ -402,7 +402,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
> >
> > map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir,
> > attrs);
> > - if (map == DMA_MAPPING_ERROR)
> > + if (map == (phys_addr_t)DMA_MAPPING_ERROR)
> > return DMA_MAPPING_ERROR;
> >
> > dev_addr = xen_phys_to_bus(map);
> > diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
> > index e906ef2e6315..a3be651973ad 100644
> > --- a/kernel/dma/swiotlb.c
> > +++ b/kernel/dma/swiotlb.c
> > @@ -548,7 +548,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
> > if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
> > dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
> > size, io_tlb_nslabs, tmp_io_tlb_used);
> > - return DMA_MAPPING_ERROR;
> > + return (phys_addr_t)DMA_MAPPING_ERROR;
> > found:
> > io_tlb_used += nslots;
> > spin_unlock_irqrestore(&io_tlb_lock, flags);
> > @@ -666,7 +666,7 @@ bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
> > /* Oh well, have to allocate and map a bounce buffer. */
> > *phys = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start),
> > *phys, size, dir, attrs);
> > - if (*phys == DMA_MAPPING_ERROR)
> > + if (*phys == (phys_addr_t)DMA_MAPPING_ERROR)
> > return false;
> >
> > /* Ensure that the address returned is DMA'ble */
> > --
> > 2.20.0
> >