iommu_map_page calls into __domain_flush_pages, which requires the
domain lock since it traverses the device list, which the lock protects.
Signed-off-by: Filippo Sironi <[email protected]>
---
drivers/iommu/amd_iommu.c | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index d4f25767622e..3714ae5ded31 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -2562,6 +2562,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
unsigned long address;
u64 dma_mask;
int ret;
+ unsigned long flags;
domain = get_domain(dev);
if (IS_ERR(domain))
@@ -2587,7 +2588,9 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
phys_addr = (sg_phys(s) & PAGE_MASK) + (j << PAGE_SHIFT);
+ spin_lock_irqsave(&domain->lock, flags);
ret = iommu_map_page(domain, bus_addr, phys_addr, PAGE_SIZE, prot, GFP_ATOMIC);
+ spin_unlock_irqrestore(&domain->lock, flags);
if (ret)
goto out_unmap;
@@ -3095,7 +3098,9 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
prot |= IOMMU_PROT_IW;
mutex_lock(&domain->api_lock);
+ spin_lock(&domain->lock);
ret = iommu_map_page(domain, iova, paddr, page_size, prot, GFP_KERNEL);
+ spin_unlock(&domain->lock);
mutex_unlock(&domain->api_lock);
domain_flush_np_cache(domain, iova, page_size);
--
2.7.4
> On 10. Sep 2019, at 19:49, Filippo Sironi <[email protected]> wrote:
>
> iommu_map_page calls into __domain_flush_pages, which requires the
> domain lock since it traverses the device list, which the lock protects.
>
> Signed-off-by: Filippo Sironi <[email protected]>
> ---
> drivers/iommu/amd_iommu.c | 5 +++++
> 1 file changed, 5 insertions(+)
>
> diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
> index d4f25767622e..3714ae5ded31 100644
> --- a/drivers/iommu/amd_iommu.c
> +++ b/drivers/iommu/amd_iommu.c
> @@ -2562,6 +2562,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
> unsigned long address;
> u64 dma_mask;
> int ret;
> + unsigned long flags;
>
> domain = get_domain(dev);
> if (IS_ERR(domain))
> @@ -2587,7 +2588,9 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
>
> bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
> phys_addr = (sg_phys(s) & PAGE_MASK) + (j << PAGE_SHIFT);
> + spin_lock_irqsave(&domain->lock, flags);
> ret = iommu_map_page(domain, bus_addr, phys_addr, PAGE_SIZE, prot, GFP_ATOMIC);
> + spin_unlock_irqrestore(&domain->lock, flags);
> if (ret)
> goto out_unmap;
>
> @@ -3095,7 +3098,9 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
> prot |= IOMMU_PROT_IW;
>
> mutex_lock(&domain->api_lock);
> + spin_lock(&domain->lock);
> ret = iommu_map_page(domain, iova, paddr, page_size, prot, GFP_KERNEL);
> + spin_unlock(&domain->lock);
> mutex_unlock(&domain->api_lock);
The spin_lock/spin_unlock aren't the correct choice.
These should be spin_lock_irqsave and spin_unlock_irqrestore.
Of course, with the variant Joerg suggested, this isn't a
problem anymore.
> domain_flush_np_cache(domain, iova, page_size);
> --
> 2.7.4
>
Amazon Development Center Germany GmbH
Krausenstr. 38
10117 Berlin
Geschaeftsfuehrung: Christian Schlaeger, Ralf Herbrich
Eingetragen am Amtsgericht Charlottenburg unter HRB 149173 B
Sitz: Berlin
Ust-ID: DE 289 237 879