2021-03-28 20:03:09

by Waiman Long

[permalink] [raw]
Subject: [PATCH] x86/apic/vector: Move pr_warn() outside of vector_lock

It was found that the following circular locking dependency warning
could happen in some systems:

[ 218.097878] ======================================================
[ 218.097879] WARNING: possible circular locking dependency detected
[ 218.097880] 4.18.0-228.el8.x86_64+debug #1 Not tainted
[ 218.097881] ------------------------------------------------------
[ 218.097882] systemd/1 is trying to acquire lock:
[ 218.097883] ffffffff84c27920 (console_owner){-.-.}, at: console_unlock+0x3fb/0x9f0
[ 218.097886]
[ 218.097887] but task is already holding lock:
[ 218.097888] ffffffff84afca78 (vector_lock){-.-.}, at: x86_vector_activate+0xca/0xab0
[ 218.097891]
[ 218.097892] which lock already depends on the new lock.
:
[ 218.097966] other info that might help us debug this:
[ 218.097967]
[ 218.097967] Chain exists of:
[ 218.097968] console_oc_lock_class --> vector_lock
[ 218.097972]
[ 218.097973] Possible unsafe locking scenario:
[ 218.097973]
[ 218.097974] CPU0 CPU1
[ 218.097975] ---- ----
[ 218.097975] lock(vector_lock);
[ 218.097977] lock(&irq_desc_lock_class);
[ 218.097980] lock(vector_lock);
[ 218.097981] lock(console_owner);
[ 218.097983]
[ 218.097984] *** DEADLOCK ***

This lockdep warning was causing by printing of the warning message:

[ 218.095152] irq 3: Affinity broken due to vector space exhaustion.

It looks that this warning message is relatively more common than
the other warnings in arch/x86/kernel/apic/vector.c. To avoid this
potential deadlock scenario, this patch moves all the pr_warn() calls
in the vector.c file outside of the vector_lock critical sections.

Signed-off-by: Waiman Long <[email protected]>
---
arch/x86/kernel/apic/vector.c | 33 ++++++++++++++++++++-------------
1 file changed, 20 insertions(+), 13 deletions(-)

diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 3c9c7492252f..948ec592ef61 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -385,7 +385,8 @@ static void x86_vector_deactivate(struct irq_domain *dom, struct irq_data *irqd)
raw_spin_unlock_irqrestore(&vector_lock, flags);
}

-static int activate_reserved(struct irq_data *irqd)
+static int activate_reserved(struct irq_data *irqd, unsigned long flags,
+ bool *unlocked)
{
struct apic_chip_data *apicd = apic_chip_data(irqd);
int ret;
@@ -410,6 +411,8 @@ static int activate_reserved(struct irq_data *irqd)
*/
if (!cpumask_subset(irq_data_get_effective_affinity_mask(irqd),
irq_data_get_affinity_mask(irqd))) {
+ raw_spin_unlock_irqrestore(&vector_lock, flags);
+ *unlocked = true;
pr_warn("irq %u: Affinity broken due to vector space exhaustion.\n",
irqd->irq);
}
@@ -446,6 +449,7 @@ static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd,
{
struct apic_chip_data *apicd = apic_chip_data(irqd);
unsigned long flags;
+ bool unlocked = false;
int ret = 0;

trace_vector_activate(irqd->irq, apicd->is_managed,
@@ -459,8 +463,9 @@ static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd,
else if (apicd->is_managed)
ret = activate_managed(irqd);
else if (apicd->has_reserved)
- ret = activate_reserved(irqd);
- raw_spin_unlock_irqrestore(&vector_lock, flags);
+ ret = activate_reserved(irqd, flags, &unlocked);
+ if (!unlocked)
+ raw_spin_unlock_irqrestore(&vector_lock, flags);
return ret;
}

@@ -989,6 +994,7 @@ void irq_force_complete_move(struct irq_desc *desc)
struct apic_chip_data *apicd;
struct irq_data *irqd;
unsigned int vector;
+ bool warn_move_in_progress = false;

/*
* The function is called for all descriptors regardless of which
@@ -1064,12 +1070,14 @@ void irq_force_complete_move(struct irq_desc *desc)
* so we have the necessary information when a problem in that
* area arises.
*/
- pr_warn("IRQ fixup: irq %d move in progress, old vector %d\n",
- irqd->irq, vector);
+ warn_move_in_progress = true;
}
free_moved_vector(apicd);
unlock:
raw_spin_unlock(&vector_lock);
+ if (warn_move_in_progress)
+ pr_warn("IRQ fixup: irq %d move in progress, old vector %d\n",
+ irqd->irq, vector);
}

#ifdef CONFIG_HOTPLUG_CPU
@@ -1079,25 +1087,24 @@ void irq_force_complete_move(struct irq_desc *desc)
*/
int lapic_can_unplug_cpu(void)
{
- unsigned int rsvd, avl, tomove, cpu = smp_processor_id();
+ unsigned int rsvd = 0, avl, tomove, cpu = smp_processor_id();
int ret = 0;

raw_spin_lock(&vector_lock);
tomove = irq_matrix_allocated(vector_matrix);
avl = irq_matrix_available(vector_matrix, true);
+ if (avl < tomove)
+ ret = -ENOSPC;
+ else
+ rsvd = irq_matrix_reserved(vector_matrix);
+ raw_spin_unlock(&vector_lock);
if (avl < tomove) {
pr_warn("CPU %u has %u vectors, %u available. Cannot disable CPU\n",
cpu, tomove, avl);
- ret = -ENOSPC;
- goto out;
- }
- rsvd = irq_matrix_reserved(vector_matrix);
- if (avl < rsvd) {
+ } else if (avl < rsvd) {
pr_warn("Reserved vectors %u > available %u. IRQ request may fail\n",
rsvd, avl);
}
-out:
- raw_spin_unlock(&vector_lock);
return ret;
}
#endif /* HOTPLUG_CPU */
--
2.18.1


2021-03-28 22:06:04

by Thomas Gleixner

[permalink] [raw]
Subject: Re: [PATCH] x86/apic/vector: Move pr_warn() outside of vector_lock

Waiman,

On Sun, Mar 28 2021 at 15:58, Waiman Long wrote:
> It was found that the following circular locking dependency warning
> could happen in some systems:
>
> [ 218.097878] ======================================================
> [ 218.097879] WARNING: possible circular locking dependency detected
> [ 218.097880] 4.18.0-228.el8.x86_64+debug #1 Not tainted
> [ 218.097881] ------------------------------------------------------
> [ 218.097882] systemd/1 is trying to acquire lock:
> [ 218.097883] ffffffff84c27920 (console_owner){-.-.}, at: console_unlock+0x3fb/0x9f0
> [ 218.097886]
> [ 218.097887] but task is already holding lock:
> [ 218.097888] ffffffff84afca78 (vector_lock){-.-.}, at: x86_vector_activate+0xca/0xab0
> [ 218.097891]
> [ 218.097892] which lock already depends on the new lock.
> :
> [ 218.097966] other info that might help us debug this:
> [ 218.097967]
> [ 218.097967] Chain exists of:
> [ 218.097968] console_oc_lock_class --> vector_lock
> [ 218.097972]
> [ 218.097973] Possible unsafe locking scenario:
> [ 218.097973]
> [ 218.097974] CPU0 CPU1
> [ 218.097975] ---- ----
> [ 218.097975] lock(vector_lock);
> [ 218.097977] lock(&irq_desc_lock_class);
> [ 218.097980] lock(vector_lock);
> [ 218.097981] lock(console_owner);
> [ 218.097983]
> [ 218.097984] *** DEADLOCK ***

can you please post the full lockdep output?

> This lockdep warning was causing by printing of the warning message:
>
> [ 218.095152] irq 3: Affinity broken due to vector space exhaustion.
>
> It looks that this warning message is relatively more common than
> the other warnings in arch/x86/kernel/apic/vector.c. To avoid this
> potential deadlock scenario, this patch moves all the pr_warn() calls
> in the vector.c file outside of the vector_lock critical sections.

Definitely not.

> -static int activate_reserved(struct irq_data *irqd)
> +static int activate_reserved(struct irq_data *irqd, unsigned long flags,
> + bool *unlocked)
> {
> struct apic_chip_data *apicd = apic_chip_data(irqd);
> int ret;
> @@ -410,6 +411,8 @@ static int activate_reserved(struct irq_data *irqd)
> */
> if (!cpumask_subset(irq_data_get_effective_affinity_mask(irqd),
> irq_data_get_affinity_mask(irqd))) {
> + raw_spin_unlock_irqrestore(&vector_lock, flags);
> + *unlocked = true;

What?

> pr_warn("irq %u: Affinity broken due to vector space exhaustion.\n",
> irqd->irq);
> }
> @@ -446,6 +449,7 @@ static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd,
> {
> struct apic_chip_data *apicd = apic_chip_data(irqd);
> unsigned long flags;
> + bool unlocked = false;
> int ret = 0;
>
> trace_vector_activate(irqd->irq, apicd->is_managed,
> @@ -459,8 +463,9 @@ static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd,
> else if (apicd->is_managed)
> ret = activate_managed(irqd);
> else if (apicd->has_reserved)
> - ret = activate_reserved(irqd);
> - raw_spin_unlock_irqrestore(&vector_lock, flags);
> + ret = activate_reserved(irqd, flags, &unlocked);
> + if (!unlocked)
> + raw_spin_unlock_irqrestore(&vector_lock, flags);

Even moar what?

> return ret;
> }

This turns that code into complete unreadable gunk. No way.

Thanks,

tglx

2021-03-29 00:50:14

by Waiman Long

[permalink] [raw]
Subject: Re: [PATCH] x86/apic/vector: Move pr_warn() outside of vector_lock

On 3/28/21 6:04 PM, Thomas Gleixner wrote:
> Waiman,
>
> On Sun, Mar 28 2021 at 15:58, Waiman Long wrote:
>> It was found that the following circular locking dependency warning
>> could happen in some systems:
>>
>> [ 218.097878] ======================================================
>> [ 218.097879] WARNING: possible circular locking dependency detected
>> [ 218.097880] 4.18.0-228.el8.x86_64+debug #1 Not tainted
>> [ 218.097881] ------------------------------------------------------
>> [ 218.097882] systemd/1 is trying to acquire lock:
>> [ 218.097883] ffffffff84c27920 (console_owner){-.-.}, at: console_unlock+0x3fb/0x9f0
>> [ 218.097886]
>> [ 218.097887] but task is already holding lock:
>> [ 218.097888] ffffffff84afca78 (vector_lock){-.-.}, at: x86_vector_activate+0xca/0xab0
>> [ 218.097891]
>> [ 218.097892] which lock already depends on the new lock.
>> :
>> [ 218.097966] other info that might help us debug this:
>> [ 218.097967]
>> [ 218.097967] Chain exists of:
>> [ 218.097968] console_oc_lock_class --> vector_lock
>> [ 218.097972]
>> [ 218.097973] Possible unsafe locking scenario:
>> [ 218.097973]
>> [ 218.097974] CPU0 CPU1
>> [ 218.097975] ---- ----
>> [ 218.097975] lock(vector_lock);
>> [ 218.097977] lock(&irq_desc_lock_class);
>> [ 218.097980] lock(vector_lock);
>> [ 218.097981] lock(console_owner);
>> [ 218.097983]
>> [ 218.097984] *** DEADLOCK ***
> can you please post the full lockdep output?

Will do.


>> This lockdep warning was causing by printing of the warning message:
>>
>> [ 218.095152] irq 3: Affinity broken due to vector space exhaustion.
>>
>> It looks that this warning message is relatively more common than
>> the other warnings in arch/x86/kernel/apic/vector.c. To avoid this
>> potential deadlock scenario, this patch moves all the pr_warn() calls
>> in the vector.c file outside of the vector_lock critical sections.
> Definitely not.
>
>> -static int activate_reserved(struct irq_data *irqd)
>> +static int activate_reserved(struct irq_data *irqd, unsigned long flags,
>> + bool *unlocked)
>> {
>> struct apic_chip_data *apicd = apic_chip_data(irqd);
>> int ret;
>> @@ -410,6 +411,8 @@ static int activate_reserved(struct irq_data *irqd)
>> */
>> if (!cpumask_subset(irq_data_get_effective_affinity_mask(irqd),
>> irq_data_get_affinity_mask(irqd))) {
>> + raw_spin_unlock_irqrestore(&vector_lock, flags);
>> + *unlocked = true;
> What?
>
>> pr_warn("irq %u: Affinity broken due to vector space exhaustion.\n",
>> irqd->irq);
>> }
>> @@ -446,6 +449,7 @@ static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd,
>> {
>> struct apic_chip_data *apicd = apic_chip_data(irqd);
>> unsigned long flags;
>> + bool unlocked = false;
>> int ret = 0;
>>
>> trace_vector_activate(irqd->irq, apicd->is_managed,
>> @@ -459,8 +463,9 @@ static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd,
>> else if (apicd->is_managed)
>> ret = activate_managed(irqd);
>> else if (apicd->has_reserved)
>> - ret = activate_reserved(irqd);
>> - raw_spin_unlock_irqrestore(&vector_lock, flags);
>> + ret = activate_reserved(irqd, flags, &unlocked);
>> + if (!unlocked)
>> + raw_spin_unlock_irqrestore(&vector_lock, flags);
> Even moar what?
>
>> return ret;
>> }
> This turns that code into complete unreadable gunk. No way.

I am sorry that this part of the patch is sloppy. I will revise it to
make it better.

Cheers,
Longman