Hi Arnd,
any update on this one?
Thanks,
Michal
On 11/14/2014 12:39 PM, Michal Simek wrote:
> Check that dma_ops are initialized correctly.
>
> Signed-off-by: Michal Simek <[email protected]>
> ---
>
> Functions dma_mmap_attrs(), dma_get_sgtable_attrs()
> already have this checking.
>
> The whole discussion was originally here:
> https://lkml.org/lkml/2013/6/3/255
>
> We have middle solution where some functions have this checking
> and some not. Based on get_maintainer scripts Arnd should do that decision
> to accept or reject this patch.
>
> ---
> include/asm-generic/dma-mapping-common.h | 12 ++++++++++++
> 1 file changed, 12 insertions(+)
>
> diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
> index de8bf89940f8..d430cabd2975 100644
> --- a/include/asm-generic/dma-mapping-common.h
> +++ b/include/asm-generic/dma-mapping-common.h
> @@ -16,6 +16,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
> dma_addr_t addr;
>
> kmemcheck_mark_initialized(ptr, size);
> + BUG_ON(!ops);
> BUG_ON(!valid_dma_direction(dir));
> addr = ops->map_page(dev, virt_to_page(ptr),
> (unsigned long)ptr & ~PAGE_MASK, size,
> @@ -33,6 +34,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
> {
> struct dma_map_ops *ops = get_dma_ops(dev);
>
> + BUG_ON(!ops);
> BUG_ON(!valid_dma_direction(dir));
> if (ops->unmap_page)
> ops->unmap_page(dev, addr, size, dir, attrs);
> @@ -49,6 +51,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
>
> for_each_sg(sg, s, nents, i)
> kmemcheck_mark_initialized(sg_virt(s), s->length);
> + BUG_ON(!ops);
> BUG_ON(!valid_dma_direction(dir));
> ents = ops->map_sg(dev, sg, nents, dir, attrs);
> debug_dma_map_sg(dev, sg, nents, ents, dir);
> @@ -62,6 +65,7 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
> {
> struct dma_map_ops *ops = get_dma_ops(dev);
>
> + BUG_ON(!ops);
> BUG_ON(!valid_dma_direction(dir));
> debug_dma_unmap_sg(dev, sg, nents, dir);
> if (ops->unmap_sg)
> @@ -76,6 +80,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
> dma_addr_t addr;
>
> kmemcheck_mark_initialized(page_address(page) + offset, size);
> + BUG_ON(!ops);
> BUG_ON(!valid_dma_direction(dir));
> addr = ops->map_page(dev, page, offset, size, dir, NULL);
> debug_dma_map_page(dev, page, offset, size, dir, addr, false);
> @@ -88,6 +93,7 @@ static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
> {
> struct dma_map_ops *ops = get_dma_ops(dev);
>
> + BUG_ON(!ops);
> BUG_ON(!valid_dma_direction(dir));
> if (ops->unmap_page)
> ops->unmap_page(dev, addr, size, dir, NULL);
> @@ -100,6 +106,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
> {
> struct dma_map_ops *ops = get_dma_ops(dev);
>
> + BUG_ON(!ops);
> BUG_ON(!valid_dma_direction(dir));
> if (ops->sync_single_for_cpu)
> ops->sync_single_for_cpu(dev, addr, size, dir);
> @@ -112,6 +119,7 @@ static inline void dma_sync_single_for_device(struct device *dev,
> {
> struct dma_map_ops *ops = get_dma_ops(dev);
>
> + BUG_ON(!ops);
> BUG_ON(!valid_dma_direction(dir));
> if (ops->sync_single_for_device)
> ops->sync_single_for_device(dev, addr, size, dir);
> @@ -126,6 +134,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
> {
> const struct dma_map_ops *ops = get_dma_ops(dev);
>
> + BUG_ON(!ops);
> BUG_ON(!valid_dma_direction(dir));
> if (ops->sync_single_for_cpu)
> ops->sync_single_for_cpu(dev, addr + offset, size, dir);
> @@ -140,6 +149,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
> {
> const struct dma_map_ops *ops = get_dma_ops(dev);
>
> + BUG_ON(!ops);
> BUG_ON(!valid_dma_direction(dir));
> if (ops->sync_single_for_device)
> ops->sync_single_for_device(dev, addr + offset, size, dir);
> @@ -152,6 +162,7 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
> {
> struct dma_map_ops *ops = get_dma_ops(dev);
>
> + BUG_ON(!ops);
> BUG_ON(!valid_dma_direction(dir));
> if (ops->sync_sg_for_cpu)
> ops->sync_sg_for_cpu(dev, sg, nelems, dir);
> @@ -164,6 +175,7 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
> {
> struct dma_map_ops *ops = get_dma_ops(dev);
>
> + BUG_ON(!ops);
> BUG_ON(!valid_dma_direction(dir));
> if (ops->sync_sg_for_device)
> ops->sync_sg_for_device(dev, sg, nelems, dir);
> --
> 1.8.2.3
>
--
Michal Simek, Ing. (M.Eng), OpenPGP -> KeyID: FE3D1F91
w: http://www.monstr.eu p: +42-0-721842854
Maintainer of Linux kernel - Microblaze cpu - http://www.monstr.eu/fdt/
Maintainer of Linux kernel - Xilinx Zynq ARM architecture
Microblaze U-BOOT custodian and responsible for u-boot arm zynq platform