From: Joerg Roedel <[email protected]>
A recent commit added a gfp parameter to amd_iommu_map() to make it
callable from atomic context, but forgot to pass it down to
iommu_map_page() and left GFP_KERNEL there. This caused
sleep-while-atomic warnings and needs to be fixed.
Reported-by: Qian Cai <[email protected]>
Reported-by: Dan Carpenter <[email protected]>
Fixes: 781ca2de89ba ("iommu: Add gfp parameter to iommu_ops::map")
Signed-off-by: Joerg Roedel <[email protected]>
---
drivers/iommu/amd_iommu.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 0d2479546b77..fb54df5c2e11 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -2561,7 +2561,7 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
if (iommu_prot & IOMMU_WRITE)
prot |= IOMMU_PROT_IW;
- ret = iommu_map_page(domain, iova, paddr, page_size, prot, GFP_KERNEL);
+ ret = iommu_map_page(domain, iova, paddr, page_size, prot, gfp);
domain_flush_np_cache(domain, iova, page_size);
--
2.16.4
On Fri Oct 18 19, Joerg Roedel wrote:
>From: Joerg Roedel <[email protected]>
>
>A recent commit added a gfp parameter to amd_iommu_map() to make it
>callable from atomic context, but forgot to pass it down to
>iommu_map_page() and left GFP_KERNEL there. This caused
>sleep-while-atomic warnings and needs to be fixed.
>
>Reported-by: Qian Cai <[email protected]>
>Reported-by: Dan Carpenter <[email protected]>
>Fixes: 781ca2de89ba ("iommu: Add gfp parameter to iommu_ops::map")
>Signed-off-by: Joerg Roedel <[email protected]>
>---
> drivers/iommu/amd_iommu.c | 2 +-
> 1 file changed, 1 insertion(+), 1 deletion(-)
>
>diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
>index 0d2479546b77..fb54df5c2e11 100644
>--- a/drivers/iommu/amd_iommu.c
>+++ b/drivers/iommu/amd_iommu.c
>@@ -2561,7 +2561,7 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
> if (iommu_prot & IOMMU_WRITE)
> prot |= IOMMU_PROT_IW;
>
>- ret = iommu_map_page(domain, iova, paddr, page_size, prot, GFP_KERNEL);
>+ ret = iommu_map_page(domain, iova, paddr, page_size, prot, gfp);
>
> domain_flush_np_cache(domain, iova, page_size);
>
>--
>2.16.4
>
>_______________________________________________
>iommu mailing list
>[email protected]
>https://lists.linuxfoundation.org/mailman/listinfo/iommu
Reviewed-by: Jerry Snitselaar <[email protected]>
Did you get a chance to look at iommu_dma_alloc_remap() as well?
drivers/iommu/dma-iommu.c
584 static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
585 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
^^^^^^^^^
586 {
587 struct iommu_domain *domain = iommu_get_dma_domain(dev);
588 struct iommu_dma_cookie *cookie = domain->iova_cookie;
589 struct iova_domain *iovad = &cookie->iovad;
590 bool coherent = dev_is_dma_coherent(dev);
591 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
592 pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
593 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
594 struct page **pages;
595 struct sg_table sgt;
596 dma_addr_t iova;
597 void *vaddr;
598
599 *dma_handle = DMA_MAPPING_ERROR;
600
601 if (unlikely(iommu_dma_deferred_attach(dev, domain)))
602 return NULL;
603
604 min_size = alloc_sizes & -alloc_sizes;
605 if (min_size < PAGE_SIZE) {
606 min_size = PAGE_SIZE;
607 alloc_sizes |= PAGE_SIZE;
608 } else {
609 size = ALIGN(size, min_size);
610 }
611 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
612 alloc_sizes = min_size;
613
614 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
615 pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT,
616 gfp);
617 if (!pages)
618 return NULL;
619
620 size = iova_align(iovad, size);
621 iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
622 if (!iova)
623 goto out_free_pages;
624
625 if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
^^^^^^^^^^
gfp here instead of GFP_KERNEL?
626 goto out_free_iova;
627
628 if (!(ioprot & IOMMU_CACHE)) {
regards,
dan carpenter
On 2019-10-18 10:27 am, Dan Carpenter wrote:
> Did you get a chance to look at iommu_dma_alloc_remap() as well?
>
> drivers/iommu/dma-iommu.c
> 584 static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
> 585 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
> ^^^^^^^^^
> 586 {
> 587 struct iommu_domain *domain = iommu_get_dma_domain(dev);
> 588 struct iommu_dma_cookie *cookie = domain->iova_cookie;
> 589 struct iova_domain *iovad = &cookie->iovad;
> 590 bool coherent = dev_is_dma_coherent(dev);
> 591 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
> 592 pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
> 593 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
> 594 struct page **pages;
> 595 struct sg_table sgt;
> 596 dma_addr_t iova;
> 597 void *vaddr;
> 598
> 599 *dma_handle = DMA_MAPPING_ERROR;
> 600
> 601 if (unlikely(iommu_dma_deferred_attach(dev, domain)))
> 602 return NULL;
> 603
> 604 min_size = alloc_sizes & -alloc_sizes;
> 605 if (min_size < PAGE_SIZE) {
> 606 min_size = PAGE_SIZE;
> 607 alloc_sizes |= PAGE_SIZE;
> 608 } else {
> 609 size = ALIGN(size, min_size);
> 610 }
> 611 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
> 612 alloc_sizes = min_size;
> 613
> 614 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
> 615 pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT,
> 616 gfp);
> 617 if (!pages)
> 618 return NULL;
> 619
> 620 size = iova_align(iovad, size);
> 621 iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
> 622 if (!iova)
> 623 goto out_free_pages;
> 624
> 625 if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
> ^^^^^^^^^^
> gfp here instead of GFP_KERNEL?
This is, from what I remember, intentional - it's a temporary allocation
which doesn't need to have the same restrictions as the actual buffer
being allocated (e.g. GFP_DMA32 etc.). We don't need to worry about
GFP_ATOMIC since the whole thing is only ever called in sleeping contexts.
Robin.
>
> 626 goto out_free_iova;
> 627
> 628 if (!(ioprot & IOMMU_CACHE)) {
>
> regards,
> dan carpenter
>
> _______________________________________________
> iommu mailing list
> [email protected]
> https://lists.linuxfoundation.org/mailman/listinfo/iommu
>