Hi,folks
This patchset is used to fix vt-d hard lockup reported when surpprise
unplug ATS capable endpoint device connects to system via PCIe switch
as following topology.
+-[0000:15]-+-00.0 Intel Corporation Ice Lake Memory Map/VT-d
| +-00.1 Intel Corporation Ice Lake Mesh 2 PCIe
| +-00.2 Intel Corporation Ice Lake RAS
| +-00.4 Intel Corporation Device 0b23
| \-01.0-[16-1b]----00.0-[17-1b]--+-00.0-[18]----00.0
NVIDIA Corporation Device 2324
| +-01.0-[19]----00.0
Mellanox Technologies MT2910 Family [ConnectX-7]
User brought endpoint device 19:00.0's link down by flapping it's hotplug
capable slot 17:01.0 link control register, as sequence DLLSC response,
pciehp_ist() will unload device driver and power it off, durning device
driver is unloading an iommu devTlb flush request issued to that link
down device, thus a long time completion/timeout waiting in interrupt
context causes continuous hard lockup warnning and system hang.
[ 4211.433662] pcieport 0000:17:01.0: pciehp: Slot(108): Link Down
[ 4211.433664] pcieport 0000:17:01.0: pciehp: Slot(108): Card not present
[ 4223.822591] NMI watchdog: Watchdog detected hard LOCKUP on cpu 144
[ 4223.822622] CPU: 144 PID: 1422 Comm: irq/57-pciehp Kdump: loaded Tainted: G S
OE kernel version xxxx
[ 4223.822623] Hardware name: vendorname xxxx 666-106,
BIOS 01.01.02.03.01 05/15/2023
[ 4223.822623] RIP: 0010:qi_submit_sync+0x2c0/0x490
[ 4223.822624] Code: 48 be 00 00 00 00 00 08 00 00 49 85 74 24 20 0f 95 c1 48 8b
57 10 83 c1 04 83 3c 1a 03 0f 84 a2 01 00 00 49 8b 04 24 8b 70 34 <40> f6 c6 1
0 74 17 49 8b 04 24 8b 80 80 00 00 00 89 c2 d3 fa 41 39
[ 4223.822624] RSP: 0018:ffffc4f074f0bbb8 EFLAGS: 00000093
[ 4223.822625] RAX: ffffc4f040059000 RBX: 0000000000000014 RCX: 0000000000000005
[ 4223.822625] RDX: ffff9f3841315800 RSI: 0000000000000000 RDI: ffff9f38401a8340
[ 4223.822625] RBP: ffff9f38401a8340 R08: ffffc4f074f0bc00 R09: 0000000000000000
[ 4223.822626] R10: 0000000000000010 R11: 0000000000000018 R12: ffff9f384005e200
[ 4223.822626] R13: 0000000000000004 R14: 0000000000000046 R15: 0000000000000004
[ 4223.822626] FS: 0000000000000000(0000) GS:ffffa237ae400000(0000)
knlGS:0000000000000000
[ 4223.822627] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 4223.822627] CR2: 00007ffe86515d80 CR3: 000002fd3000a001 CR4: 0000000000770ee0
[ 4223.822627] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
[ 4223.822628] DR3: 0000000000000000 DR6: 00000000fffe07f0 DR7: 0000000000000400
[ 4223.822628] PKRU: 55555554
[ 4223.822628] Call Trace:
[ 4223.822628] qi_flush_dev_iotlb+0xb1/0xd0
[ 4223.822628] __dmar_remove_one_dev_info+0x224/0x250
[ 4223.822629] dmar_remove_one_dev_info+0x3e/0x50
[ 4223.822629] intel_iommu_release_device+0x1f/0x30
[ 4223.822629] iommu_release_device+0x33/0x60
[ 4223.822629] iommu_bus_notifier+0x7f/0x90
[ 4223.822630] blocking_notifier_call_chain+0x60/0x90
[ 4223.822630] device_del+0x2e5/0x420
[ 4223.822630] pci_remove_bus_device+0x70/0x110
[ 4223.822630] pciehp_unconfigure_device+0x7c/0x130
[ 4223.822631] pciehp_disable_slot+0x6b/0x100
[ 4223.822631] pciehp_handle_presence_or_link_change+0xd8/0x320
[ 4223.822631] pciehp_ist+0x176/0x180
[ 4223.822631] ? irq_finalize_oneshot.part.50+0x110/0x110
[ 4223.822632] irq_thread_fn+0x19/0x50
[ 4223.822632] irq_thread+0x104/0x190
[ 4223.822632] ? irq_forced_thread_fn+0x90/0x90
[ 4223.822632] ? irq_thread_check_affinity+0xe0/0xe0
[ 4223.822633] kthread+0x114/0x130
[ 4223.822633] ? __kthread_cancel_work+0x40/0x40
[ 4223.822633] ret_from_fork+0x1f/0x30
[ 4223.822633] Kernel panic - not syncing: Hard LOCKUP
[ 4223.822634] CPU: 144 PID: 1422 Comm: irq/57-pciehp Kdump: loaded Tainted: G S
OE kernel version xxxx
[ 4223.822634] Hardware name: vendorname xxxx 666-106,
BIOS 01.01.02.03.01 05/15/2023
[ 4223.822634] Call Trace:
[ 4223.822634] <NMI>
[ 4223.822635] dump_stack+0x6d/0x88
[ 4223.822635] panic+0x101/0x2d0
[ 4223.822635] ? ret_from_fork+0x11/0x30
[ 4223.822635] nmi_panic.cold.14+0xc/0xc
[ 4223.822636] watchdog_overflow_callback.cold.8+0x6d/0x81
[ 4223.822636] __perf_event_overflow+0x4f/0xf0
[ 4223.822636] handle_pmi_common+0x1ef/0x290
[ 4223.822636] ? __set_pte_vaddr+0x28/0x40
[ 4223.822637] ? flush_tlb_one_kernel+0xa/0x20
[ 4223.822637] ? __native_set_fixmap+0x24/0x30
[ 4223.822637] ? ghes_copy_tofrom_phys+0x70/0x100
[ 4223.822637] ? __ghes_peek_estatus.isra.16+0x49/0xa0
[ 4223.822637] intel_pmu_handle_irq+0xba/0x2b0
[ 4223.822638] perf_event_nmi_handler+0x24/0x40
[ 4223.822638] nmi_handle+0x4d/0xf0
[ 4223.822638] default_do_nmi+0x49/0x100
[ 4223.822638] exc_nmi+0x134/0x180
[ 4223.822639] end_repeat_nmi+0x16/0x67
[ 4223.822639] RIP: 0010:qi_submit_sync+0x2c0/0x490
[ 4223.822639] Code: 48 be 00 00 00 00 00 08 00 00 49 85 74 24 20 0f 95 c1 48 8b
57 10 83 c1 04 83 3c 1a 03 0f 84 a2 01 00 00 49 8b 04 24 8b 70 34 <40> f6 c6 10
74 17 49 8b 04 24 8b 80 80 00 00 00 89 c2 d3 fa 41 39
[ 4223.822640] RSP: 0018:ffffc4f074f0bbb8 EFLAGS: 00000093
[ 4223.822640] RAX: ffffc4f040059000 RBX: 0000000000000014 RCX: 0000000000000005
[ 4223.822640] RDX: ffff9f3841315800 RSI: 0000000000000000 RDI: ffff9f38401a8340
[ 4223.822641] RBP: ffff9f38401a8340 R08: ffffc4f074f0bc00 R09: 0000000000000000
[ 4223.822641] R10: 0000000000000010 R11: 0000000000000018 R12: ffff9f384005e200
[ 4223.822641] R13: 0000000000000004 R14: 0000000000000046 R15: 0000000000000004
[ 4223.822641] ? qi_submit_sync+0x2c0/0x490
[ 4223.822642] ? qi_submit_sync+0x2c0/0x490
[ 4223.822642] </NMI>
[ 4223.822642] qi_flush_dev_iotlb+0xb1/0xd0
[ 4223.822642] __dmar_remove_one_dev_info+0x224/0x250
[ 4223.822643] dmar_remove_one_dev_info+0x3e/0x50
[ 4223.822643] intel_iommu_release_device+0x1f/0x30
[ 4223.822643] iommu_release_device+0x33/0x60
[ 4223.822643] iommu_bus_notifier+0x7f/0x90
[ 4223.822644] blocking_notifier_call_chain+0x60/0x90
[ 4223.822644] device_del+0x2e5/0x420
[ 4223.822644] pci_remove_bus_device+0x70/0x110
[ 4223.822644] pciehp_unconfigure_device+0x7c/0x130
[ 4223.822644] pciehp_disable_slot+0x6b/0x100
[ 4223.822645] pciehp_handle_presence_or_link_change+0xd8/0x320
[ 4223.822645] pciehp_ist+0x176/0x180
[ 4223.822645] ? irq_finalize_oneshot.part.50+0x110/0x110
[ 4223.822645] irq_thread_fn+0x19/0x50
[ 4223.822646] irq_thread+0x104/0x190
[ 4223.822646] ? irq_forced_thread_fn+0x90/0x90
[ 4223.822646] ? irq_thread_check_affinity+0xe0/0xe0
[ 4223.822646] kthread+0x114/0x130
[ 4223.822647] Kernel Offset: 0x6400000 from 0xffffffff81000000 (relocation
range: 0xffffffff80000000-0xffffffffbfffffff)
Make a quick fix by checking the device's error_state in
devtlb_invalidation_with_pasid() to avoid sending meaningless devTLB flush
request to link down device that is set to pci_channel_io_perm_failure and
then powered off in
pciehp_ist()
pciehp_handle_presence_or_link_change()
pciehp_disable_slot()
remove_board()
pciehp_unconfigure_device()
safe_removal unplug doesn't trigger such issue.
and this fix works for all supprise_removal unplug operation.
patchset [patch 1&2] was tested by [email protected] on stable-6.7rc4.
patch[3] only passed compiling on stable-6.7rc4, not test yet.
change log:
v5:
- add a patch try to fix the rare case (supprise-remove a device in
safe_removal process).
v4:
- move the PCI device state checking after ATS per Baolu's suggestion.
v3:
- fix commit description typo.
v2:
- revise commit[1] description part according to Lukas' suggestion.
- revise commit[2] description to clarify the issue's impact.
v1:
- https://lore.kernel.org/lkml/[email protected]/T/
Thanks,
Ethan
Ethan Zhao (3):
PCI: make pci_dev_is_disconnected() helper public for other drivers
iommu/vt-d: don's issue devTLB flush request when device is
disconnected
iommu/vt-d: abort the devTLB invalidation waiting if device is removed
drivers/iommu/intel/dmar.c | 3 ++-
drivers/iommu/intel/iommu.c | 37 +++++++++++++++++++++++++++++++++++++
drivers/iommu/intel/pasid.c | 3 +++
drivers/pci/pci.h | 5 -----
include/linux/pci.h | 5 +++++
5 files changed, 47 insertions(+), 6 deletions(-)
--
2.31.1
move pci_dev_is_disconnected() from driver/pci/pci.h to public
include/linux/pci.h for other driver's reference.
no function change.
Tested-by: Haorong Ye <[email protected]>
Signed-off-by: Ethan Zhao <[email protected]>
---
drivers/pci/pci.h | 5 -----
include/linux/pci.h | 5 +++++
2 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 5ecbcf041179..75fa2084492f 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -366,11 +366,6 @@ static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused)
return 0;
}
-static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
-{
- return dev->error_state == pci_channel_io_perm_failure;
-}
-
/* pci_dev priv_flags */
#define PCI_DEV_ADDED 0
#define PCI_DPC_RECOVERED 1
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 60ca768bc867..869f2ec97a84 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -2503,6 +2503,11 @@ static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
return NULL;
}
+static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
+{
+ return dev->error_state == pci_channel_io_perm_failure;
+}
+
void pci_request_acs(void);
bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
bool pci_acs_path_enabled(struct pci_dev *start,
--
2.31.1
For those endpoint devices connect to system via hotplug capable ports,
users could request a warm reset to the device by flapping device's link
through setting the slot's link control register, as pciehpt_ist() DLLSC
interrupt sequence response, pciehp will unload the device driver and
then power it off. thus cause an IOMMU devTLB flush request for device to
be sent and a long time completion/timeout waiting in interrupt context.
That would cause following continuous hard lockup warning and system hang
[ 4211.433662] pcieport 0000:17:01.0: pciehp: Slot(108): Link Down
[ 4211.433664] pcieport 0000:17:01.0: pciehp: Slot(108): Card not present
[ 4223.822591] NMI watchdog: Watchdog detected hard LOCKUP on cpu 144
[ 4223.822622] CPU: 144 PID: 1422 Comm: irq/57-pciehp Kdump: loaded Tainted: G S
OE kernel version xxxx
[ 4223.822623] Hardware name: vendorname xxxx 666-106,
BIOS 01.01.02.03.01 05/15/2023
[ 4223.822623] RIP: 0010:qi_submit_sync+0x2c0/0x490
[ 4223.822624] Code: 48 be 00 00 00 00 00 08 00 00 49 85 74 24 20 0f 95 c1 48 8b
57 10 83 c1 04 83 3c 1a 03 0f 84 a2 01 00 00 49 8b 04 24 8b 70 34 <40> f6 c6 1
0 74 17 49 8b 04 24 8b 80 80 00 00 00 89 c2 d3 fa 41 39
[ 4223.822624] RSP: 0018:ffffc4f074f0bbb8 EFLAGS: 00000093
[ 4223.822625] RAX: ffffc4f040059000 RBX: 0000000000000014 RCX: 0000000000000005
[ 4223.822625] RDX: ffff9f3841315800 RSI: 0000000000000000 RDI: ffff9f38401a8340
[ 4223.822625] RBP: ffff9f38401a8340 R08: ffffc4f074f0bc00 R09: 0000000000000000
[ 4223.822626] R10: 0000000000000010 R11: 0000000000000018 R12: ffff9f384005e200
[ 4223.822626] R13: 0000000000000004 R14: 0000000000000046 R15: 0000000000000004
[ 4223.822626] FS: 0000000000000000(0000) GS:ffffa237ae400000(0000)
knlGS:0000000000000000
[ 4223.822627] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 4223.822627] CR2: 00007ffe86515d80 CR3: 000002fd3000a001 CR4: 0000000000770ee0
[ 4223.822627] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
[ 4223.822628] DR3: 0000000000000000 DR6: 00000000fffe07f0 DR7: 0000000000000400
[ 4223.822628] PKRU: 55555554
[ 4223.822628] Call Trace:
[ 4223.822628] qi_flush_dev_iotlb+0xb1/0xd0
[ 4223.822628] __dmar_remove_one_dev_info+0x224/0x250
[ 4223.822629] dmar_remove_one_dev_info+0x3e/0x50
[ 4223.822629] intel_iommu_release_device+0x1f/0x30
[ 4223.822629] iommu_release_device+0x33/0x60
[ 4223.822629] iommu_bus_notifier+0x7f/0x90
[ 4223.822630] blocking_notifier_call_chain+0x60/0x90
[ 4223.822630] device_del+0x2e5/0x420
[ 4223.822630] pci_remove_bus_device+0x70/0x110
[ 4223.822630] pciehp_unconfigure_device+0x7c/0x130
[ 4223.822631] pciehp_disable_slot+0x6b/0x100
[ 4223.822631] pciehp_handle_presence_or_link_change+0xd8/0x320
[ 4223.822631] pciehp_ist+0x176/0x180
[ 4223.822631] ? irq_finalize_oneshot.part.50+0x110/0x110
[ 4223.822632] irq_thread_fn+0x19/0x50
[ 4223.822632] irq_thread+0x104/0x190
[ 4223.822632] ? irq_forced_thread_fn+0x90/0x90
[ 4223.822632] ? irq_thread_check_affinity+0xe0/0xe0
[ 4223.822633] kthread+0x114/0x130
[ 4223.822633] ? __kthread_cancel_work+0x40/0x40
[ 4223.822633] ret_from_fork+0x1f/0x30
[ 4223.822633] Kernel panic - not syncing: Hard LOCKUP
[ 4223.822634] CPU: 144 PID: 1422 Comm: irq/57-pciehp Kdump: loaded Tainted: G S
OE kernel version xxxx
[ 4223.822634] Hardware name: vendorname xxxx 666-106,
BIOS 01.01.02.03.01 05/15/2023
[ 4223.822634] Call Trace:
[ 4223.822634] <NMI>
[ 4223.822635] dump_stack+0x6d/0x88
[ 4223.822635] panic+0x101/0x2d0
[ 4223.822635] ? ret_from_fork+0x11/0x30
[ 4223.822635] nmi_panic.cold.14+0xc/0xc
[ 4223.822636] watchdog_overflow_callback.cold.8+0x6d/0x81
[ 4223.822636] __perf_event_overflow+0x4f/0xf0
[ 4223.822636] handle_pmi_common+0x1ef/0x290
[ 4223.822636] ? __set_pte_vaddr+0x28/0x40
[ 4223.822637] ? flush_tlb_one_kernel+0xa/0x20
[ 4223.822637] ? __native_set_fixmap+0x24/0x30
[ 4223.822637] ? ghes_copy_tofrom_phys+0x70/0x100
[ 4223.822637] ? __ghes_peek_estatus.isra.16+0x49/0xa0
[ 4223.822637] intel_pmu_handle_irq+0xba/0x2b0
[ 4223.822638] perf_event_nmi_handler+0x24/0x40
[ 4223.822638] nmi_handle+0x4d/0xf0
[ 4223.822638] default_do_nmi+0x49/0x100
[ 4223.822638] exc_nmi+0x134/0x180
[ 4223.822639] end_repeat_nmi+0x16/0x67
[ 4223.822639] RIP: 0010:qi_submit_sync+0x2c0/0x490
[ 4223.822639] Code: 48 be 00 00 00 00 00 08 00 00 49 85 74 24 20 0f 95 c1 48 8b
57 10 83 c1 04 83 3c 1a 03 0f 84 a2 01 00 00 49 8b 04 24 8b 70 34 <40> f6 c6 10
74 17 49 8b 04 24 8b 80 80 00 00 00 89 c2 d3 fa 41 39
[ 4223.822640] RSP: 0018:ffffc4f074f0bbb8 EFLAGS: 00000093
[ 4223.822640] RAX: ffffc4f040059000 RBX: 0000000000000014 RCX: 0000000000000005
[ 4223.822640] RDX: ffff9f3841315800 RSI: 0000000000000000 RDI: ffff9f38401a8340
[ 4223.822641] RBP: ffff9f38401a8340 R08: ffffc4f074f0bc00 R09: 0000000000000000
[ 4223.822641] R10: 0000000000000010 R11: 0000000000000018 R12: ffff9f384005e200
[ 4223.822641] R13: 0000000000000004 R14: 0000000000000046 R15: 0000000000000004
[ 4223.822641] ? qi_submit_sync+0x2c0/0x490
[ 4223.822642] ? qi_submit_sync+0x2c0/0x490
[ 4223.822642] </NMI>
[ 4223.822642] qi_flush_dev_iotlb+0xb1/0xd0
[ 4223.822642] __dmar_remove_one_dev_info+0x224/0x250
[ 4223.822643] dmar_remove_one_dev_info+0x3e/0x50
[ 4223.822643] intel_iommu_release_device+0x1f/0x30
[ 4223.822643] iommu_release_device+0x33/0x60
[ 4223.822643] iommu_bus_notifier+0x7f/0x90
[ 4223.822644] blocking_notifier_call_chain+0x60/0x90
[ 4223.822644] device_del+0x2e5/0x420
[ 4223.822644] pci_remove_bus_device+0x70/0x110
[ 4223.822644] pciehp_unconfigure_device+0x7c/0x130
[ 4223.822644] pciehp_disable_slot+0x6b/0x100
[ 4223.822645] pciehp_handle_presence_or_link_change+0xd8/0x320
[ 4223.822645] pciehp_ist+0x176/0x180
[ 4223.822645] ? irq_finalize_oneshot.part.50+0x110/0x110
[ 4223.822645] irq_thread_fn+0x19/0x50
[ 4223.822646] irq_thread+0x104/0x190
[ 4223.822646] ? irq_forced_thread_fn+0x90/0x90
[ 4223.822646] ? irq_thread_check_affinity+0xe0/0xe0
[ 4223.822646] kthread+0x114/0x130
[ 4223.822647] ? __kthread_cancel_work+0x40/0x40
[ 4223.822647] ret_from_fork+0x1f/0x30
[ 4223.822647] Kernel Offset: 0x6400000 from 0xffffffff81000000 (relocation
range: 0xffffffff80000000-0xffffffffbfffffff)
Fix it by checking the device's error_state in
devtlb_invalidation_with_pasid() to avoid sending meaningless devTLB flush
request to link down device that is set to pci_channel_io_perm_failure and
then powered off in
pciehp_ist()
pciehp_handle_presence_or_link_change()
pciehp_disable_slot()
remove_board()
pciehp_unconfigure_device()
For SAVE_REMOVAL unplug, link is alive when iommu releases devcie and
issues devTLB invalidate request, wouldn't trigger such issue.
This patch works for all links of SURPPRISE_REMOVAL unplug operations.
Tested-by: Haorong Ye <[email protected]>
Signed-off-by: Ethan Zhao <[email protected]>
---
drivers/iommu/intel/pasid.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index 74e8e4c17e81..7dbee9931eb6 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -481,6 +481,9 @@ devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
if (!info || !info->ats_enabled)
return;
+ if (pci_dev_is_disconnected(to_pci_dev(dev)))
+ return;
+
sid = info->bus << 8 | info->devfn;
qdep = info->ats_qdep;
pfsid = info->pfsid;
--
2.31.1
Even the devTLB invalidation request is just submitted and waiting it
to be done/timeout in qi_submit_sync(), it is possible device is removed
or powered-off. try to break it out in such rare but possible case.
This patch is sent for more comment. not tested, only passed compiling.
Signed-off-by: Ethan Zhao <[email protected]>
---
drivers/iommu/intel/dmar.c | 3 ++-
drivers/iommu/intel/iommu.c | 36 ++++++++++++++++++++++++++++++++++++
2 files changed, 38 insertions(+), 1 deletion(-)
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 23cb80d62a9a..d8637ab93387 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -1422,7 +1422,8 @@ int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
*/
writel(qi->free_head << shift, iommu->reg + DMAR_IQT_REG);
- while (qi->desc_status[wait_index] != QI_DONE) {
+ while (qi->desc_status[wait_index] != QI_DONE &&
+ qi->desc_status[wait_index] != QI_ABORT) {
/*
* We will leave the interrupts disabled, to prevent interrupt
* context to queue another cmd while a cmd is already submitted
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 897159dba47d..33075d0688bc 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -4472,10 +4472,46 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
return &iommu->iommu;
}
+static void intel_iommu_abort_devtlib_invalidate(struct device *dev)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct intel_iommu *iommu = info->iommu;
+ struct q_inval *qi = iommu->qi;
+ struct qi_desc *desc, *idesc;
+ int index, offset, shift;
+ u16 sid, qdep, pfsid
+ unsigned long flags;
+
+ if (!dev_is_pci(info->dev) || !info->ats_enabled || !qi)
+ return;
+ if (!pci_dev_is_disconnected(to_pci_dev(dev)))
+ return;
+
+ sid = info->bus << 8 | info->devfn;
+ qdep = info->ats_qdep;
+ pfsid = info->pfsid;
+
+ raw_spin_lock_irqsave(&qi->q_lock, flags);
+ for (index = 1; index < QI_LENGTH; index++) {
+ offset = index << shift;
+ desc = qi->desc + offset;
+ if (desc->qw0 & QI_IWD_TYPE) {
+ offset = (index-1) << shift;
+ idesc = qi->desc + offset;
+ if (idesc->qw0 & QI_DEV_EIOTLB_SID(sid)) {
+ if (qi->desc_status[index] == QI_IN_USE)
+ qi->desc_status[index] = QI_ABORT;
+ }
+ }
+ }
+ raw_spin_unlock_irqrestore(&qi->q_lock, flags);
+
+}
static void intel_iommu_release_device(struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
+ intel_iommu_abort_devtlib_invalidate(dev);
dmar_remove_one_dev_info(dev);
intel_pasid_free_table(dev);
intel_iommu_debugfs_remove_dev(info);
--
2.31.1
On 12/22/2023 6:41 PM, Ethan Zhao wrote:
> Even the devTLB invalidation request is just submitted and waiting it
> to be done/timeout in qi_submit_sync(), it is possible device is removed
> or powered-off. try to break it out in such rare but possible case.
>
> This patch is sent for more comment. not tested, only passed compiling.
>
> Signed-off-by: Ethan Zhao <[email protected]>
> ---
> drivers/iommu/intel/dmar.c | 3 ++-
> drivers/iommu/intel/iommu.c | 36 ++++++++++++++++++++++++++++++++++++
> 2 files changed, 38 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
> index 23cb80d62a9a..d8637ab93387 100644
> --- a/drivers/iommu/intel/dmar.c
> +++ b/drivers/iommu/intel/dmar.c
> @@ -1422,7 +1422,8 @@ int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
> */
> writel(qi->free_head << shift, iommu->reg + DMAR_IQT_REG);
>
> - while (qi->desc_status[wait_index] != QI_DONE) {
> + while (qi->desc_status[wait_index] != QI_DONE &&
> + qi->desc_status[wait_index] != QI_ABORT) {
Another way is checking pci_device_is_present() here and bail out,
how about it ?
> /*
> * We will leave the interrupts disabled, to prevent interrupt
> * context to queue another cmd while a cmd is already submitted
> diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
> index 897159dba47d..33075d0688bc 100644
> --- a/drivers/iommu/intel/iommu.c
> +++ b/drivers/iommu/intel/iommu.c
> @@ -4472,10 +4472,46 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
> return &iommu->iommu;
> }
>
> +static void intel_iommu_abort_devtlib_invalidate(struct device *dev)
> +{
> + struct device_domain_info *info = dev_iommu_priv_get(dev);
> + struct intel_iommu *iommu = info->iommu;
> + struct q_inval *qi = iommu->qi;
> + struct qi_desc *desc, *idesc;
> + int index, offset, shift;
> + u16 sid, qdep, pfsid
> + unsigned long flags;
> +
> + if (!dev_is_pci(info->dev) || !info->ats_enabled || !qi)
> + return;
> + if (!pci_dev_is_disconnected(to_pci_dev(dev)))
> + return;
> +
> + sid = info->bus << 8 | info->devfn;
> + qdep = info->ats_qdep;
> + pfsid = info->pfsid;
> +
> + raw_spin_lock_irqsave(&qi->q_lock, flags);
> + for (index = 1; index < QI_LENGTH; index++) {
> + offset = index << shift;
> + desc = qi->desc + offset;
> + if (desc->qw0 & QI_IWD_TYPE) {
> + offset = (index-1) << shift;
> + idesc = qi->desc + offset;
> + if (idesc->qw0 & QI_DEV_EIOTLB_SID(sid)) {
> + if (qi->desc_status[index] == QI_IN_USE)
> + qi->desc_status[index] = QI_ABORT;
> + }
> + }
> + }
> + raw_spin_unlock_irqrestore(&qi->q_lock, flags);
> +
> +}
> static void intel_iommu_release_device(struct device *dev)
> {
> struct device_domain_info *info = dev_iommu_priv_get(dev);
>
> + intel_iommu_abort_devtlib_invalidate(dev);
Wonder if there is lock something prevent pciehp_ist() supprise_removal
interrupt response re-enter to get here when another safe_removal is in
process,
if so , see above
> dmar_remove_one_dev_info(dev);
> intel_pasid_free_table(dev);
> intel_iommu_debugfs_remove_dev(info);
On 12/23/2023 7:35 AM, Ethan Zhao wrote:
>
> On 12/22/2023 6:41 PM, Ethan Zhao wrote:
>> Even the devTLB invalidation request is just submitted and waiting it
>> to be done/timeout in qi_submit_sync(), it is possible device is removed
>> or powered-off. try to break it out in such rare but possible case.
>>
>> This patch is sent for more comment. not tested, only passed compiling.
>>
>> Signed-off-by: Ethan Zhao <[email protected]>
>> ---
>> drivers/iommu/intel/dmar.c | 3 ++-
>> drivers/iommu/intel/iommu.c | 36 ++++++++++++++++++++++++++++++++++++
>> 2 files changed, 38 insertions(+), 1 deletion(-)
>>
>> diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
>> index 23cb80d62a9a..d8637ab93387 100644
>> --- a/drivers/iommu/intel/dmar.c
>> +++ b/drivers/iommu/intel/dmar.c
>> @@ -1422,7 +1422,8 @@ int qi_submit_sync(struct intel_iommu *iommu,
>> struct qi_desc *desc,
>> */
>> writel(qi->free_head << shift, iommu->reg + DMAR_IQT_REG);
>> - while (qi->desc_status[wait_index] != QI_DONE) {
>> + while (qi->desc_status[wait_index] != QI_DONE &&
>> + qi->desc_status[wait_index] != QI_ABORT) {
>
> Another way is checking pci_device_is_present() here and bail out,
>
> how about it ?
>
>> /*
>> * We will leave the interrupts disabled, to prevent interrupt
>> * context to queue another cmd while a cmd is already
>> submitted
>> diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
>> index 897159dba47d..33075d0688bc 100644
>> --- a/drivers/iommu/intel/iommu.c
>> +++ b/drivers/iommu/intel/iommu.c
>> @@ -4472,10 +4472,46 @@ static struct iommu_device
>> *intel_iommu_probe_device(struct device *dev)
>> return &iommu->iommu;
>> }
>> +static void intel_iommu_abort_devtlib_invalidate(struct device *dev)
>> +{
>> + struct device_domain_info *info = dev_iommu_priv_get(dev);
>> + struct intel_iommu *iommu = info->iommu;
>> + struct q_inval *qi = iommu->qi;
>> + struct qi_desc *desc, *idesc;
>> + int index, offset, shift;
>> + u16 sid, qdep, pfsid
>> + unsigned long flags;
>> +
>> + if (!dev_is_pci(info->dev) || !info->ats_enabled || !qi)
>> + return;
>> + if (!pci_dev_is_disconnected(to_pci_dev(dev)))
>> + return;
>> +
>> + sid = info->bus << 8 | info->devfn;
>> + qdep = info->ats_qdep;
>> + pfsid = info->pfsid;
>> +
>> + raw_spin_lock_irqsave(&qi->q_lock, flags);
>> + for (index = 1; index < QI_LENGTH; index++) {
>> + offset = index << shift;
>> + desc = qi->desc + offset;
>> + if (desc->qw0 & QI_IWD_TYPE) {
>> + offset = (index-1) << shift;
>> + idesc = qi->desc + offset;
>> + if (idesc->qw0 & QI_DEV_EIOTLB_SID(sid)) {
>> + if (qi->desc_status[index] == QI_IN_USE)
>> + qi->desc_status[index] = QI_ABORT;
>> + }
>> + }
>> + }
>> + raw_spin_unlock_irqrestore(&qi->q_lock, flags);
>> +
>> +}
>> static void intel_iommu_release_device(struct device *dev)
>> {
>> struct device_domain_info *info = dev_iommu_priv_get(dev);
>> + intel_iommu_abort_devtlib_invalidate(dev);
>
> Wonder if there is lock something prevent pciehp_ist() supprise_removal
pci_lock_rescan_remove() will block another thread to re-enter the function
pci_stop_and_remove_bus_device() to get here, this patch doesn't work.
>
> interrupt response re-enter to get here when another safe_removal is
> in process,
>
> if so , see above
>
>> dmar_remove_one_dev_info(dev);
>> intel_pasid_free_table(dev);
>> intel_iommu_debugfs_remove_dev(info);
>