The per domain spinlock is acquired in iommu_flush_dev_iotlb(), which
is possbile to be called in the interrupt context. For instance,
<IRQ>
iommu_flush_dev_iotlb
iommu_flush_iotlb_psi
intel_iommu_tlb_sync
iommu_iotlb_sync
__iommu_dma_unmap
? nvme_unmap_data
nvme_unmap_data
nvme_pci_complete_rq
nvme_irq
__handle_irq_event_percpu
handle_irq_event_percpu
handle_irq_event
handle_edge_irq
__common_interrupt
common_interrupt
This coverts the spin_lock/unlock() into the irq save/restore varieties
to avoid the possible recursive locking issues.
Fixes: ffd5869d93530 ("iommu/vt-d: Replace spin_lock_irqsave() with spin_lock()")
Signed-off-by: Lu Baolu <[email protected]>
---
drivers/iommu/intel/iommu.c | 39 ++++++++++++++++++++++---------------
1 file changed, 23 insertions(+), 16 deletions(-)
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 2d0d2ef820d2..8d4f6f0b6c1c 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -495,8 +495,9 @@ static int domain_update_device_node(struct dmar_domain *domain)
{
struct device_domain_info *info;
int nid = NUMA_NO_NODE;
+ unsigned long flags;
- spin_lock(&domain->lock);
+ spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(info, &domain->devices, link) {
/*
* There could possibly be multiple device numa nodes as devices
@@ -508,7 +509,7 @@ static int domain_update_device_node(struct dmar_domain *domain)
if (nid != NUMA_NO_NODE)
break;
}
- spin_unlock(&domain->lock);
+ spin_unlock_irqrestore(&domain->lock, flags);
return nid;
}
@@ -1336,19 +1337,20 @@ iommu_support_dev_iotlb(struct dmar_domain *domain, struct intel_iommu *iommu,
u8 bus, u8 devfn)
{
struct device_domain_info *info;
+ unsigned long flags;
if (!iommu->qi)
return NULL;
- spin_lock(&domain->lock);
+ spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(info, &domain->devices, link) {
if (info->iommu == iommu && info->bus == bus &&
info->devfn == devfn) {
- spin_unlock(&domain->lock);
+ spin_unlock_irqrestore(&domain->lock, flags);
return info->ats_supported ? info : NULL;
}
}
- spin_unlock(&domain->lock);
+ spin_unlock_irqrestore(&domain->lock, flags);
return NULL;
}
@@ -1357,8 +1359,9 @@ static void domain_update_iotlb(struct dmar_domain *domain)
{
struct device_domain_info *info;
bool has_iotlb_device = false;
+ unsigned long flags;
- spin_lock(&domain->lock);
+ spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(info, &domain->devices, link) {
if (info->ats_enabled) {
has_iotlb_device = true;
@@ -1366,7 +1369,7 @@ static void domain_update_iotlb(struct dmar_domain *domain)
}
}
domain->has_iotlb_device = has_iotlb_device;
- spin_unlock(&domain->lock);
+ spin_unlock_irqrestore(&domain->lock, flags);
}
static void iommu_enable_dev_iotlb(struct device_domain_info *info)
@@ -1458,14 +1461,15 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
u64 addr, unsigned mask)
{
struct device_domain_info *info;
+ unsigned long flags;
if (!domain->has_iotlb_device)
return;
- spin_lock(&domain->lock);
+ spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(info, &domain->devices, link)
__iommu_flush_dev_iotlb(info, addr, mask);
- spin_unlock(&domain->lock);
+ spin_unlock_irqrestore(&domain->lock, flags);
}
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
@@ -2427,6 +2431,7 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu;
+ unsigned long flags;
u8 bus, devfn;
int ret;
@@ -2438,9 +2443,9 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
if (ret)
return ret;
info->domain = domain;
- spin_lock(&domain->lock);
+ spin_lock_irqsave(&domain->lock, flags);
list_add(&info->link, &domain->devices);
- spin_unlock(&domain->lock);
+ spin_unlock_irqrestore(&domain->lock, flags);
/* PASID table is mandatory for a PCI device in scalable mode. */
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
@@ -4064,6 +4069,7 @@ static void dmar_remove_one_dev_info(struct device *dev)
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct dmar_domain *domain = info->domain;
struct intel_iommu *iommu = info->iommu;
+ unsigned long flags;
if (!dev_is_real_dma_subdevice(info->dev)) {
if (dev_is_pci(info->dev) && sm_supported(iommu))
@@ -4075,9 +4081,9 @@ static void dmar_remove_one_dev_info(struct device *dev)
intel_pasid_free_table(info->dev);
}
- spin_lock(&domain->lock);
+ spin_lock_irqsave(&domain->lock, flags);
list_del(&info->link);
- spin_unlock(&domain->lock);
+ spin_unlock_irqrestore(&domain->lock, flags);
domain_detach_iommu(domain, iommu);
info->domain = NULL;
@@ -4396,19 +4402,20 @@ static void domain_set_force_snooping(struct dmar_domain *domain)
static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+ unsigned long flags;
if (dmar_domain->force_snooping)
return true;
- spin_lock(&dmar_domain->lock);
+ spin_lock_irqsave(&dmar_domain->lock, flags);
if (!domain_support_force_snooping(dmar_domain)) {
- spin_unlock(&dmar_domain->lock);
+ spin_unlock_irqrestore(&dmar_domain->lock, flags);
return false;
}
domain_set_force_snooping(dmar_domain);
dmar_domain->force_snooping = true;
- spin_unlock(&dmar_domain->lock);
+ spin_unlock_irqrestore(&dmar_domain->lock, flags);
return true;
}
--
2.25.1
On Wed, Aug 17, 2022 at 10:56:50AM +0800, Lu Baolu wrote:
>The per domain spinlock is acquired in iommu_flush_dev_iotlb(), which
>is possbile to be called in the interrupt context. For instance,
>
> <IRQ>
> iommu_flush_dev_iotlb
> iommu_flush_iotlb_psi
> intel_iommu_tlb_sync
> iommu_iotlb_sync
> __iommu_dma_unmap
> ? nvme_unmap_data
> nvme_unmap_data
> nvme_pci_complete_rq
> nvme_irq
> __handle_irq_event_percpu
> handle_irq_event_percpu
> handle_irq_event
> handle_edge_irq
> __common_interrupt
> common_interrupt
>
>This coverts the spin_lock/unlock() into the irq save/restore varieties
>to avoid the possible recursive locking issues.
>
>Fixes: ffd5869d93530 ("iommu/vt-d: Replace spin_lock_irqsave() with spin_lock()")
>Signed-off-by: Lu Baolu <[email protected]>
drm-intel's CI system got completely blocked after the backmerge of
6.0-rc1 with this error:
<4> [15.072336] ================================
<4> [15.072337] WARNING: inconsistent lock state
<4> [15.072339] 6.0.0-rc1-CI_DRM_11990-g6590d43d39b9+ #1 Not tainted
<4> [15.072342] --------------------------------
<4> [15.072344] inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage.
<4> [15.072346] swapper/6/0 [HC0[0]:SC1[1]:HE1:SE0] takes:
<4> [15.072349] ffff88810440d678 (&domain->lock){+.?.}-{2:2}, at: iommu_flush_dev_iotlb.part.61+0x23/0x80
<4> [15.072356] {SOFTIRQ-ON-W} state was registered at:
<4> [15.072359] lock_acquire+0xd3/0x310
<4> [15.072361] _raw_spin_lock+0x2a/0x40
<4> [15.072364] domain_update_iommu_cap+0x20b/0x2c0
<4> [15.072366] intel_iommu_attach_device+0x5bd/0x860
<4> [15.072369] __iommu_attach_device+0x18/0xe0
<4> [15.072372] bus_iommu_probe+0x1f3/0x2d0
<4> [15.072374] bus_set_iommu+0x82/0xd0
<4> [15.072377] intel_iommu_init+0xe45/0x102a
<4> [15.072381] pci_iommu_init+0x9/0x31
<4> [15.072384] do_one_initcall+0x53/0x2f0
<4> [15.072387] kernel_init_freeable+0x18f/0x1e1
<4> [15.072389] kernel_init+0x11/0x120
<4> [15.072392] ret_from_fork+0x1f/0x30
<4> [15.072394] irq event stamp: 162354
<4> [15.072396] hardirqs last enabled at (162354): [<ffffffff81b59274>] _raw_spin_unlock_irqrestore+0x54/0x70
<4> [15.072400] hardirqs last disabled at (162353): [<ffffffff81b5901b>] _raw_spin_lock_irqsave+0x4b/0x50
<4> [15.072404] softirqs last enabled at (162338): [<ffffffff81e00323>] __do_softirq+0x323/0x48e
<4> [15.072408] softirqs last disabled at (162349): [<ffffffff810c1588>] irq_exit_rcu+0xb8/0xe0
<4> [15.072412]
other info that might help us debug this:
<4> [15.072414] Possible unsafe locking scenario:
<4> [15.072416] CPU0
<4> [15.072417] ----
<4> [15.072418] lock(&domain->lock);
<4> [15.072420] <Interrupt>
<4> [15.072422] lock(&domain->lock);
<4> [15.072423]
*** DEADLOCK ***
<4> [15.072426] 1 lock held by swapper/6/0:
<4> [15.072427] #0: ffffc900002b8ea8 ((&cookie->fq_timer)){+.-.}-{0:0}, at: call_timer_fn+0x0/0x2c0
<4> [15.072433]
After locally applying this patch the error is gone and machines are
back to life.
Acked-by: Lucas De Marchi <[email protected]>
thanks
Lucas De Marchi
>---
> drivers/iommu/intel/iommu.c | 39 ++++++++++++++++++++++---------------
> 1 file changed, 23 insertions(+), 16 deletions(-)
>
>diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
>index 2d0d2ef820d2..8d4f6f0b6c1c 100644
>--- a/drivers/iommu/intel/iommu.c
>+++ b/drivers/iommu/intel/iommu.c
>@@ -495,8 +495,9 @@ static int domain_update_device_node(struct dmar_domain *domain)
> {
> struct device_domain_info *info;
> int nid = NUMA_NO_NODE;
>+ unsigned long flags;
>
>- spin_lock(&domain->lock);
>+ spin_lock_irqsave(&domain->lock, flags);
> list_for_each_entry(info, &domain->devices, link) {
> /*
> * There could possibly be multiple device numa nodes as devices
>@@ -508,7 +509,7 @@ static int domain_update_device_node(struct dmar_domain *domain)
> if (nid != NUMA_NO_NODE)
> break;
> }
>- spin_unlock(&domain->lock);
>+ spin_unlock_irqrestore(&domain->lock, flags);
>
> return nid;
> }
>@@ -1336,19 +1337,20 @@ iommu_support_dev_iotlb(struct dmar_domain *domain, struct intel_iommu *iommu,
> u8 bus, u8 devfn)
> {
> struct device_domain_info *info;
>+ unsigned long flags;
>
> if (!iommu->qi)
> return NULL;
>
>- spin_lock(&domain->lock);
>+ spin_lock_irqsave(&domain->lock, flags);
> list_for_each_entry(info, &domain->devices, link) {
> if (info->iommu == iommu && info->bus == bus &&
> info->devfn == devfn) {
>- spin_unlock(&domain->lock);
>+ spin_unlock_irqrestore(&domain->lock, flags);
> return info->ats_supported ? info : NULL;
> }
> }
>- spin_unlock(&domain->lock);
>+ spin_unlock_irqrestore(&domain->lock, flags);
>
> return NULL;
> }
>@@ -1357,8 +1359,9 @@ static void domain_update_iotlb(struct dmar_domain *domain)
> {
> struct device_domain_info *info;
> bool has_iotlb_device = false;
>+ unsigned long flags;
>
>- spin_lock(&domain->lock);
>+ spin_lock_irqsave(&domain->lock, flags);
> list_for_each_entry(info, &domain->devices, link) {
> if (info->ats_enabled) {
> has_iotlb_device = true;
>@@ -1366,7 +1369,7 @@ static void domain_update_iotlb(struct dmar_domain *domain)
> }
> }
> domain->has_iotlb_device = has_iotlb_device;
>- spin_unlock(&domain->lock);
>+ spin_unlock_irqrestore(&domain->lock, flags);
> }
>
> static void iommu_enable_dev_iotlb(struct device_domain_info *info)
>@@ -1458,14 +1461,15 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
> u64 addr, unsigned mask)
> {
> struct device_domain_info *info;
>+ unsigned long flags;
>
> if (!domain->has_iotlb_device)
> return;
>
>- spin_lock(&domain->lock);
>+ spin_lock_irqsave(&domain->lock, flags);
> list_for_each_entry(info, &domain->devices, link)
> __iommu_flush_dev_iotlb(info, addr, mask);
>- spin_unlock(&domain->lock);
>+ spin_unlock_irqrestore(&domain->lock, flags);
> }
>
> static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
>@@ -2427,6 +2431,7 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
> {
> struct device_domain_info *info = dev_iommu_priv_get(dev);
> struct intel_iommu *iommu;
>+ unsigned long flags;
> u8 bus, devfn;
> int ret;
>
>@@ -2438,9 +2443,9 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
> if (ret)
> return ret;
> info->domain = domain;
>- spin_lock(&domain->lock);
>+ spin_lock_irqsave(&domain->lock, flags);
> list_add(&info->link, &domain->devices);
>- spin_unlock(&domain->lock);
>+ spin_unlock_irqrestore(&domain->lock, flags);
>
> /* PASID table is mandatory for a PCI device in scalable mode. */
> if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
>@@ -4064,6 +4069,7 @@ static void dmar_remove_one_dev_info(struct device *dev)
> struct device_domain_info *info = dev_iommu_priv_get(dev);
> struct dmar_domain *domain = info->domain;
> struct intel_iommu *iommu = info->iommu;
>+ unsigned long flags;
>
> if (!dev_is_real_dma_subdevice(info->dev)) {
> if (dev_is_pci(info->dev) && sm_supported(iommu))
>@@ -4075,9 +4081,9 @@ static void dmar_remove_one_dev_info(struct device *dev)
> intel_pasid_free_table(info->dev);
> }
>
>- spin_lock(&domain->lock);
>+ spin_lock_irqsave(&domain->lock, flags);
> list_del(&info->link);
>- spin_unlock(&domain->lock);
>+ spin_unlock_irqrestore(&domain->lock, flags);
>
> domain_detach_iommu(domain, iommu);
> info->domain = NULL;
>@@ -4396,19 +4402,20 @@ static void domain_set_force_snooping(struct dmar_domain *domain)
> static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain)
> {
> struct dmar_domain *dmar_domain = to_dmar_domain(domain);
>+ unsigned long flags;
>
> if (dmar_domain->force_snooping)
> return true;
>
>- spin_lock(&dmar_domain->lock);
>+ spin_lock_irqsave(&dmar_domain->lock, flags);
> if (!domain_support_force_snooping(dmar_domain)) {
>- spin_unlock(&dmar_domain->lock);
>+ spin_unlock_irqrestore(&dmar_domain->lock, flags);
> return false;
> }
>
> domain_set_force_snooping(dmar_domain);
> dmar_domain->force_snooping = true;
>- spin_unlock(&dmar_domain->lock);
>+ spin_unlock_irqrestore(&dmar_domain->lock, flags);
>
> return true;
> }
>--
>2.25.1
>
>
On 2022/8/17 10:56, Lu Baolu wrote:
> The per domain spinlock is acquired in iommu_flush_dev_iotlb(), which
> is possbile to be called in the interrupt context. For instance,
>
> <IRQ>
> iommu_flush_dev_iotlb
> iommu_flush_iotlb_psi
> intel_iommu_tlb_sync
> iommu_iotlb_sync
> __iommu_dma_unmap
> ? nvme_unmap_data
> nvme_unmap_data
> nvme_pci_complete_rq
> nvme_irq
> __handle_irq_event_percpu
> handle_irq_event_percpu
> handle_irq_event
> handle_edge_irq
> __common_interrupt
> common_interrupt
>
> This coverts the spin_lock/unlock() into the irq save/restore varieties
> to avoid the possible recursive locking issues.
>
> Fixes: ffd5869d93530 ("iommu/vt-d: Replace spin_lock_irqsave() with spin_lock()")
> Signed-off-by: Lu Baolu<[email protected]>
This patch has been queued:
https://lore.kernel.org/linux-iommu/[email protected]/
Best regards,
baolu