Previously a race existed between __free_irq() and __setup_irq() wherein
the thread_mask of a just removed action could be handed out to a newly
added action and the freed irq thread would then tread on the oneshot
mask bit of the newly added irq thread in irq_finalize_oneshot():
time
| __free_irq()
| raw_spin_lock_irqsave(&desc->lock, flags);
| <remove action from linked list>
| raw_spin_unlock_irqrestore(&desc->lock, flags);
|
| __setup_irq()
| raw_spin_lock_irqsave(&desc->lock, flags);
| <traverse linked list to determine oneshot mask bit>
| raw_spin_unlock_irqrestore(&desc->lock, flags);
|
| irq_thread() of freed irq (__free_irq() waits in synchronize_irq())
| irq_thread_fn()
| irq_finalize_oneshot()
| raw_spin_lock_irq(&desc->lock);
| desc->threads_oneshot &= ~action->thread_mask;
| raw_spin_unlock_irq(&desc->lock);
v
The race was known at least since 2012 when it was documented in a code
comment by commit e04268b0effc ("genirq: Remove paranoid warnons and
bogus fixups").
But it wasn't until 2017 that it was fixed by commit 9114014cf4e6
("genirq: Add mutex to irq desc to serialize request/free_irq()"),
apparently inadvertantly so because the race is neither mentioned in the
commit message nor was the code comment updated. Make up for that.
Signed-off-by: Lukas Wunner <[email protected]>
---
kernel/irq/manage.c | 9 ++++-----
1 file changed, 4 insertions(+), 5 deletions(-)
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 591cfe901162..123a227d3357 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1025,10 +1025,7 @@ static int irq_thread(void *data)
* This is the regular exit path. __free_irq() is stopping the
* thread via kthread_stop() after calling
* synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
- * oneshot mask bit can be set. We cannot verify that as we
- * cannot touch the oneshot mask at this point anymore as
- * __setup_irq() might have given out currents thread_mask
- * again.
+ * oneshot mask bit can be set.
*/
task_work_cancel(current, irq_thread_dtor);
return 0;
@@ -1245,7 +1242,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
/*
* Protects against a concurrent __free_irq() call which might wait
* for synchronize_irq() to complete without holding the optional
- * chip bus lock and desc->lock.
+ * chip bus lock and desc->lock. Also protects against handing out
+ * a recycled oneshot thread_mask bit while it's still in use by
+ * its previous owner.
*/
mutex_lock(&desc->request_mutex);
--
2.17.1
When pciehp is converted to threaded IRQ handling, removal of unplugged
devices below a PCIe hotplug port happens synchronously in the IRQ
thread. Removal of devices typically entails a call to free_irq() by
their drivers.
If those devices share their IRQ with the hotplug port, __free_irq()
deadlocks because it calls synchronize_irq() to wait for all hard IRQ
handlers as well as all threads sharing the IRQ to finish.
Actually it's sufficient to wait only for the IRQ thread of the removed
device, so call synchronize_hardirq() to wait for all hard IRQ handlers
to finish, but no longer for any threads. Compensate by rearranging the
control flow in irq_wait_for_interrupt() such that the device's thread
is allowed to run one last time after kthread_stop() has been called.
kthread_stop() blocks until the IRQ thread has completed. On completion
the IRQ thread clears its oneshot thread_mask bit. This is safe because
__free_irq() holds the request_mutex, thereby preventing __setup_irq()
from handing out the same oneshot thread_mask bit to a newly requested
action.
Stack trace for posterity:
INFO: task irq/17-pciehp:94 blocked for more than 120 seconds.
schedule+0x28/0x80
synchronize_irq+0x6e/0xa0
__free_irq+0x15a/0x2b0
free_irq+0x33/0x70
pciehp_release_ctrl+0x98/0xb0
pcie_port_remove_service+0x2f/0x40
device_release_driver_internal+0x157/0x220
bus_remove_device+0xe2/0x150
device_del+0x124/0x340
device_unregister+0x16/0x60
remove_iter+0x1a/0x20
device_for_each_child+0x4b/0x90
pcie_port_device_remove+0x1e/0x30
pci_device_remove+0x36/0xb0
device_release_driver_internal+0x157/0x220
pci_stop_bus_device+0x7d/0xa0
pci_stop_bus_device+0x3d/0xa0
pci_stop_and_remove_bus_device+0xe/0x20
pciehp_unconfigure_device+0xb8/0x160
pciehp_disable_slot+0x84/0x130
pciehp_ist+0x158/0x190
irq_thread_fn+0x1b/0x50
irq_thread+0x143/0x1a0
kthread+0x111/0x130
Cc: Bjorn Helgaas <[email protected]>
Cc: Mika Westerberg <[email protected]>
Signed-off-by: Lukas Wunner <[email protected]>
---
Changes v1 -> v2:
* Add code comment explaining the significance of holding the
request_mutex in __free_irq() until after kthread_stop(),
add explanation to commit message as well. (Thomas Gleixner)
* Update several code comments to refer to synchronize_hardirq()
or kthread_stop() instead of synchronize_irq().
kernel/irq/manage.c | 33 +++++++++++++++++++++++----------
1 file changed, 23 insertions(+), 10 deletions(-)
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 123a227d3357..9390f1595c50 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -790,9 +790,19 @@ static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
static int irq_wait_for_interrupt(struct irqaction *action)
{
- set_current_state(TASK_INTERRUPTIBLE);
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
- while (!kthread_should_stop()) {
+ if (kthread_should_stop()) {
+ /* may need to run one last time */
+ if (test_and_clear_bit(IRQTF_RUNTHREAD,
+ &action->thread_flags)) {
+ __set_current_state(TASK_RUNNING);
+ return 0;
+ }
+ __set_current_state(TASK_RUNNING);
+ return -1;
+ }
if (test_and_clear_bit(IRQTF_RUNTHREAD,
&action->thread_flags)) {
@@ -800,10 +810,7 @@ static int irq_wait_for_interrupt(struct irqaction *action)
return 0;
}
schedule();
- set_current_state(TASK_INTERRUPTIBLE);
}
- __set_current_state(TASK_RUNNING);
- return -1;
}
/*
@@ -1024,7 +1031,7 @@ static int irq_thread(void *data)
/*
* This is the regular exit path. __free_irq() is stopping the
* thread via kthread_stop() after calling
- * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
+ * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
* oneshot mask bit can be set.
*/
task_work_cancel(current, irq_thread_dtor);
@@ -1241,7 +1248,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
/*
* Protects against a concurrent __free_irq() call which might wait
- * for synchronize_irq() to complete without holding the optional
+ * for synchronize_hardirq() to complete without holding the optional
* chip bus lock and desc->lock. Also protects against handing out
* a recycled oneshot thread_mask bit while it's still in use by
* its previous owner.
@@ -1612,11 +1619,11 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
/*
* Drop bus_lock here so the changes which were done in the chip
* callbacks above are synced out to the irq chips which hang
- * behind a slow bus (I2C, SPI) before calling synchronize_irq().
+ * behind a slow bus (I2C, SPI) before calling synchronize_hardirq().
*
* Aside of that the bus_lock can also be taken from the threaded
* handler in irq_finalize_oneshot() which results in a deadlock
- * because synchronize_irq() would wait forever for the thread to
+ * because kthread_stop() would wait forever for the thread to
* complete, which is blocked on the bus lock.
*
* The still held desc->request_mutex() protects against a
@@ -1628,7 +1635,7 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
unregister_handler_proc(irq, action);
/* Make sure it's not being used on another CPU: */
- synchronize_irq(irq);
+ synchronize_hardirq(irq);
#ifdef CONFIG_DEBUG_SHIRQ
/*
@@ -1646,6 +1653,12 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
}
#endif
+ /*
+ * The action has already been removed above, but the thread writes
+ * its oneshot mask bit when it completes. Hold the request_mutex
+ * to prevent __setup_irq() from handing out the same bit to a
+ * newly requested action.
+ */
if (action->thread) {
kthread_stop(action->thread);
put_task_struct(action->thread);
--
2.17.1
Lukas,
On Sun, 24 Jun 2018, Lukas Wunner wrote:
> Previously a race existed between __free_irq() and __setup_irq() wherein
> the thread_mask of a just removed action could be handed out to a newly
> added action and the freed irq thread would then tread on the oneshot
> mask bit of the newly added irq thread in irq_finalize_oneshot():
>
> time
> | __free_irq()
> | raw_spin_lock_irqsave(&desc->lock, flags);
> | <remove action from linked list>
> | raw_spin_unlock_irqrestore(&desc->lock, flags);
> |
> | __setup_irq()
> | raw_spin_lock_irqsave(&desc->lock, flags);
> | <traverse linked list to determine oneshot mask bit>
> | raw_spin_unlock_irqrestore(&desc->lock, flags);
> |
> | irq_thread() of freed irq (__free_irq() waits in synchronize_irq())
> | irq_thread_fn()
> | irq_finalize_oneshot()
> | raw_spin_lock_irq(&desc->lock);
> | desc->threads_oneshot &= ~action->thread_mask;
> | raw_spin_unlock_irq(&desc->lock);
> v
>
> The race was known at least since 2012 when it was documented in a code
> comment by commit e04268b0effc ("genirq: Remove paranoid warnons and
> bogus fixups").
The race was known, but it was also harmless as nothing would touch stuff
after synchronize_irq().
> But it wasn't until 2017 that it was fixed by commit 9114014cf4e6
> ("genirq: Add mutex to irq desc to serialize request/free_irq()"),
> apparently inadvertantly so because the race is neither mentioned in the
> commit message nor was the code comment updated. Make up for that.
Thanks for following up. This update is very well done.
tglx
On Sun, 24 Jun 2018, Lukas Wunner wrote:
> When pciehp is converted to threaded IRQ handling, removal of unplugged
> devices below a PCIe hotplug port happens synchronously in the IRQ
> thread. Removal of devices typically entails a call to free_irq() by
> their drivers.
Is this an actual mainline problem or did you discover that in course of
upcoming work?
Thanks,
tglx
On Sun, Jun 24, 2018 at 11:49:10AM +0200, Thomas Gleixner wrote:
> On Sun, 24 Jun 2018, Lukas Wunner wrote:
> > When pciehp is converted to threaded IRQ handling, removal of unplugged
> > devices below a PCIe hotplug port happens synchronously in the IRQ
> > thread. Removal of devices typically entails a call to free_irq() by
> > their drivers.
>
> Is this an actual mainline problem or did you discover that in course of
> upcoming work?
It's needed for upcoming work, specifically the pciehp event handling
rework which will hopefully appear in 4.19, so nothing urgent.
Doesn't hurt at all to let this bake in linux-next for a few weeks.
There is something else, you introduced irq_wake_thread() four years ago
with a92444c6b222 for sdhci/sdio, but for some reason it was never used.
Before you or anyone else deletes it for lack of callers, please be aware
that I'm making heavy use of it now in pciehp. I forgot to cc you on the
relevant patch [17/32], but I'll bounce it to you in a minute in case
you want to take a look at it.
Of course this begs the question how irq_wake_thread() is serialized
against __free_irq(), but it seems that's safe because irq_wake_thread()
searches the action list while holding desc->lock: If it grabs the lock
before __free_irq(), it'll just wake the thread normally. If it grabs
the lock after __free_irq(), the action will be gone from the list,
so irq_wake_thread() essentially becomes a no-op.
Thanks,
Lukas
On Sun, 24 Jun 2018, Lukas Wunner wrote:
> On Sun, Jun 24, 2018 at 11:49:10AM +0200, Thomas Gleixner wrote:
> > On Sun, 24 Jun 2018, Lukas Wunner wrote:
> > > When pciehp is converted to threaded IRQ handling, removal of unplugged
> > > devices below a PCIe hotplug port happens synchronously in the IRQ
> > > thread. Removal of devices typically entails a call to free_irq() by
> > > their drivers.
> >
> > Is this an actual mainline problem or did you discover that in course of
> > upcoming work?
>
> It's needed for upcoming work, specifically the pciehp event handling
> rework which will hopefully appear in 4.19, so nothing urgent.
> Doesn't hurt at all to let this bake in linux-next for a few weeks.
Good.
> There is something else, you introduced irq_wake_thread() four years ago
> with a92444c6b222 for sdhci/sdio, but for some reason it was never used.
> Before you or anyone else deletes it for lack of callers, please be aware
> that I'm making heavy use of it now in pciehp.
Ha! I was looking at it yesterday for unrelated reasons and was wondering
about the huge amount of users ....
> I forgot to cc you on the relevant patch [17/32], but I'll bounce it to
> you in a minute in case you want to take a look at it.
Sure
> Of course this begs the question how irq_wake_thread() is serialized
> against __free_irq(), but it seems that's safe because irq_wake_thread()
> searches the action list while holding desc->lock: If it grabs the lock
> before __free_irq(), it'll just wake the thread normally. If it grabs
> the lock after __free_irq(), the action will be gone from the list,
> so irq_wake_thread() essentially becomes a no-op.
Exactly.
Thanks,
tglx
Commit-ID: 836557bd58e5e65c05c73af9f6ebed885dbfccfc
Gitweb: https://git.kernel.org/tip/836557bd58e5e65c05c73af9f6ebed885dbfccfc
Author: Lukas Wunner <[email protected]>
AuthorDate: Sun, 24 Jun 2018 10:35:18 +0200
Committer: Thomas Gleixner <[email protected]>
CommitDate: Sun, 24 Jun 2018 14:17:26 +0200
genirq: Update code comments wrt recycled thread_mask
Previously a race existed between __free_irq() and __setup_irq() wherein
the thread_mask of a just removed action could be handed out to a newly
added action and the freed irq thread would then tread on the oneshot
mask bit of the newly added irq thread in irq_finalize_oneshot():
time
| __free_irq()
| raw_spin_lock_irqsave(&desc->lock, flags);
| <remove action from linked list>
| raw_spin_unlock_irqrestore(&desc->lock, flags);
|
| __setup_irq()
| raw_spin_lock_irqsave(&desc->lock, flags);
| <traverse linked list to determine oneshot mask bit>
| raw_spin_unlock_irqrestore(&desc->lock, flags);
|
| irq_thread() of freed irq (__free_irq() waits in synchronize_irq())
| irq_thread_fn()
| irq_finalize_oneshot()
| raw_spin_lock_irq(&desc->lock);
| desc->threads_oneshot &= ~action->thread_mask;
| raw_spin_unlock_irq(&desc->lock);
v
The race was known at least since 2012 when it was documented in a code
comment by commit e04268b0effc ("genirq: Remove paranoid warnons and bogus
fixups"). The race itself is harmless as nothing touches any of the
potentially freed data after synchronize_irq().
In 2017 the race was close by commit 9114014cf4e6 ("genirq: Add mutex to
irq desc to serialize request/free_irq()"), apparently inadvertantly so
because the race is neither mentioned in the commit message nor was the
code comment updated. Make up for that.
Signed-off-by: Lukas Wunner <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
Cc: Bjorn Helgaas <[email protected]>
Cc: Mika Westerberg <[email protected]>
Cc: [email protected]
Link: https://lkml.kernel.org/r/32fc25aa35ecef4b2692f57687bb7fc2a57230e2.1529828292.git.lukas@wunner.de
---
kernel/irq/manage.c | 9 ++++-----
1 file changed, 4 insertions(+), 5 deletions(-)
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 591cfe901162..123a227d3357 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1025,10 +1025,7 @@ static int irq_thread(void *data)
* This is the regular exit path. __free_irq() is stopping the
* thread via kthread_stop() after calling
* synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
- * oneshot mask bit can be set. We cannot verify that as we
- * cannot touch the oneshot mask at this point anymore as
- * __setup_irq() might have given out currents thread_mask
- * again.
+ * oneshot mask bit can be set.
*/
task_work_cancel(current, irq_thread_dtor);
return 0;
@@ -1245,7 +1242,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
/*
* Protects against a concurrent __free_irq() call which might wait
* for synchronize_irq() to complete without holding the optional
- * chip bus lock and desc->lock.
+ * chip bus lock and desc->lock. Also protects against handing out
+ * a recycled oneshot thread_mask bit while it's still in use by
+ * its previous owner.
*/
mutex_lock(&desc->request_mutex);
Commit-ID: 519cc8652b3a1d3d0a02257afbe9573ad644da26
Gitweb: https://git.kernel.org/tip/519cc8652b3a1d3d0a02257afbe9573ad644da26
Author: Lukas Wunner <[email protected]>
AuthorDate: Sun, 24 Jun 2018 10:35:30 +0200
Committer: Thomas Gleixner <[email protected]>
CommitDate: Sun, 24 Jun 2018 14:17:27 +0200
genirq: Synchronize only with single thread on free_irq()
When pciehp is converted to threaded IRQ handling, removal of unplugged
devices below a PCIe hotplug port happens synchronously in the IRQ thread.
Removal of devices typically entails a call to free_irq() by their drivers.
If those devices share their IRQ with the hotplug port, __free_irq()
deadlocks because it calls synchronize_irq() to wait for all hard IRQ
handlers as well as all threads sharing the IRQ to finish.
Actually it's sufficient to wait only for the IRQ thread of the removed
device, so call synchronize_hardirq() to wait for all hard IRQ handlers to
finish, but no longer for any threads. Compensate by rearranging the
control flow in irq_wait_for_interrupt() such that the device's thread is
allowed to run one last time after kthread_stop() has been called.
kthread_stop() blocks until the IRQ thread has completed. On completion
the IRQ thread clears its oneshot thread_mask bit. This is safe because
__free_irq() holds the request_mutex, thereby preventing __setup_irq() from
handing out the same oneshot thread_mask bit to a newly requested action.
Stack trace for posterity:
INFO: task irq/17-pciehp:94 blocked for more than 120 seconds.
schedule+0x28/0x80
synchronize_irq+0x6e/0xa0
__free_irq+0x15a/0x2b0
free_irq+0x33/0x70
pciehp_release_ctrl+0x98/0xb0
pcie_port_remove_service+0x2f/0x40
device_release_driver_internal+0x157/0x220
bus_remove_device+0xe2/0x150
device_del+0x124/0x340
device_unregister+0x16/0x60
remove_iter+0x1a/0x20
device_for_each_child+0x4b/0x90
pcie_port_device_remove+0x1e/0x30
pci_device_remove+0x36/0xb0
device_release_driver_internal+0x157/0x220
pci_stop_bus_device+0x7d/0xa0
pci_stop_bus_device+0x3d/0xa0
pci_stop_and_remove_bus_device+0xe/0x20
pciehp_unconfigure_device+0xb8/0x160
pciehp_disable_slot+0x84/0x130
pciehp_ist+0x158/0x190
irq_thread_fn+0x1b/0x50
irq_thread+0x143/0x1a0
kthread+0x111/0x130
Signed-off-by: Lukas Wunner <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
Cc: Bjorn Helgaas <[email protected]>
Cc: Mika Westerberg <[email protected]>
Cc: [email protected]
Link: https://lkml.kernel.org/r/d72b41309f077c8d3bee6cc08ad3662d50b5d22a.1529828292.git.lukas@wunner.de
---
kernel/irq/manage.c | 33 +++++++++++++++++++++++----------
1 file changed, 23 insertions(+), 10 deletions(-)
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 123a227d3357..1f8be33572a7 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -790,9 +790,19 @@ static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
static int irq_wait_for_interrupt(struct irqaction *action)
{
- set_current_state(TASK_INTERRUPTIBLE);
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
- while (!kthread_should_stop()) {
+ if (kthread_should_stop()) {
+ /* may need to run one last time */
+ if (test_and_clear_bit(IRQTF_RUNTHREAD,
+ &action->thread_flags)) {
+ __set_current_state(TASK_RUNNING);
+ return 0;
+ }
+ __set_current_state(TASK_RUNNING);
+ return -1;
+ }
if (test_and_clear_bit(IRQTF_RUNTHREAD,
&action->thread_flags)) {
@@ -800,10 +810,7 @@ static int irq_wait_for_interrupt(struct irqaction *action)
return 0;
}
schedule();
- set_current_state(TASK_INTERRUPTIBLE);
}
- __set_current_state(TASK_RUNNING);
- return -1;
}
/*
@@ -1024,7 +1031,7 @@ static int irq_thread(void *data)
/*
* This is the regular exit path. __free_irq() is stopping the
* thread via kthread_stop() after calling
- * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
+ * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
* oneshot mask bit can be set.
*/
task_work_cancel(current, irq_thread_dtor);
@@ -1241,7 +1248,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
/*
* Protects against a concurrent __free_irq() call which might wait
- * for synchronize_irq() to complete without holding the optional
+ * for synchronize_hardirq() to complete without holding the optional
* chip bus lock and desc->lock. Also protects against handing out
* a recycled oneshot thread_mask bit while it's still in use by
* its previous owner.
@@ -1612,11 +1619,11 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
/*
* Drop bus_lock here so the changes which were done in the chip
* callbacks above are synced out to the irq chips which hang
- * behind a slow bus (I2C, SPI) before calling synchronize_irq().
+ * behind a slow bus (I2C, SPI) before calling synchronize_hardirq().
*
* Aside of that the bus_lock can also be taken from the threaded
* handler in irq_finalize_oneshot() which results in a deadlock
- * because synchronize_irq() would wait forever for the thread to
+ * because kthread_stop() would wait forever for the thread to
* complete, which is blocked on the bus lock.
*
* The still held desc->request_mutex() protects against a
@@ -1628,7 +1635,7 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
unregister_handler_proc(irq, action);
/* Make sure it's not being used on another CPU: */
- synchronize_irq(irq);
+ synchronize_hardirq(irq);
#ifdef CONFIG_DEBUG_SHIRQ
/*
@@ -1646,6 +1653,12 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
}
#endif
+ /*
+ * The action has already been removed above, but the thread writes
+ * its oneshot mask bit when it completes. Though request_mutex is
+ * held across this which prevents __setup_irq() from handing out
+ * the same bit to a newly requested action.
+ */
if (action->thread) {
kthread_stop(action->thread);
put_task_struct(action->thread);