2021-12-06 22:52:27

by Thomas Gleixner

[permalink] [raw]
Subject: [patch V2 10/31] PCI/MSI: Use msi_on_each_desc()

Use the new iterator functions which pave the way for dynamically extending
MSI-X vectors.

Signed-off-by: Thomas Gleixner <[email protected]>
---
drivers/pci/msi/irqdomain.c | 4 ++--
drivers/pci/msi/legacy.c | 19 ++++++++-----------
drivers/pci/msi/msi.c | 30 ++++++++++++++----------------
3 files changed, 24 insertions(+), 29 deletions(-)

--- a/drivers/pci/msi/irqdomain.c
+++ b/drivers/pci/msi/irqdomain.c
@@ -83,7 +83,7 @@ static int pci_msi_domain_check_cap(stru
struct msi_domain_info *info,
struct device *dev)
{
- struct msi_desc *desc = first_pci_msi_entry(to_pci_dev(dev));
+ struct msi_desc *desc = msi_first_desc(dev, MSI_DESC_ALL);

/* Special handling to support __pci_enable_msi_range() */
if (pci_msi_desc_is_multi_msi(desc) &&
@@ -98,7 +98,7 @@ static int pci_msi_domain_check_cap(stru
unsigned int idx = 0;

/* Check for gaps in the entry indices */
- for_each_msi_entry(desc, dev) {
+ msi_for_each_desc(desc, dev, MSI_DESC_ALL) {
if (desc->msi_index != idx++)
return -ENOTSUPP;
}
--- a/drivers/pci/msi/legacy.c
+++ b/drivers/pci/msi/legacy.c
@@ -28,7 +28,7 @@ int __weak arch_setup_msi_irqs(struct pc
if (type == PCI_CAP_ID_MSI && nvec > 1)
return 1;

- for_each_pci_msi_entry(desc, dev) {
+ msi_for_each_desc(desc, &dev->dev, MSI_DESC_NOTASSOCIATED) {
ret = arch_setup_msi_irq(dev, desc);
if (ret)
return ret < 0 ? ret : -ENOSPC;
@@ -42,27 +42,24 @@ void __weak arch_teardown_msi_irqs(struc
struct msi_desc *desc;
int i;

- for_each_pci_msi_entry(desc, dev) {
- if (desc->irq) {
- for (i = 0; i < desc->nvec_used; i++)
- arch_teardown_msi_irq(desc->irq + i);
- }
+ msi_for_each_desc(desc, &dev->dev, MSI_DESC_ASSOCIATED) {
+ for (i = 0; i < desc->nvec_used; i++)
+ arch_teardown_msi_irq(desc->irq + i);
}
}

static int pci_msi_setup_check_result(struct pci_dev *dev, int type, int ret)
{
- struct msi_desc *entry;
+ struct msi_desc *desc;
int avail = 0;

if (type != PCI_CAP_ID_MSIX || ret >= 0)
return ret;

/* Scan the MSI descriptors for successfully allocated ones. */
- for_each_pci_msi_entry(entry, dev) {
- if (entry->irq != 0)
- avail++;
- }
+ msi_for_each_desc(desc, &dev->dev, MSI_DESC_ASSOCIATED)
+ avail++;
+
return avail ? avail : ret;
}

--- a/drivers/pci/msi/msi.c
+++ b/drivers/pci/msi/msi.c
@@ -299,7 +299,6 @@ static void __pci_restore_msix_state(str

if (!dev->msix_enabled)
return;
- BUG_ON(list_empty(dev_to_msi_list(&dev->dev)));

/* route the table */
pci_intx_for_msi(dev, 0);
@@ -309,7 +308,7 @@ static void __pci_restore_msix_state(str
write_msg = arch_restore_msi_irqs(dev);

msi_lock_descs(&dev->dev);
- for_each_pci_msi_entry(entry, dev) {
+ msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) {
if (write_msg)
__pci_write_msi_msg(entry, &entry->msg);
pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl);
@@ -378,14 +377,14 @@ static int msi_verify_entries(struct pci
if (!dev->no_64bit_msi)
return 0;

- for_each_pci_msi_entry(entry, dev) {
+ msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) {
if (entry->msg.address_hi) {
pci_err(dev, "arch assigned 64-bit MSI address %#x%08x but device only supports 32 bits\n",
entry->msg.address_hi, entry->msg.address_lo);
- return -EIO;
+ break;
}
}
- return 0;
+ return !entry ? 0 : -EIO;
}

/**
@@ -418,7 +417,7 @@ static int msi_capability_init(struct pc
goto unlock;

/* All MSIs are unmasked by default; mask them all */
- entry = first_pci_msi_entry(dev);
+ entry = msi_first_desc(&dev->dev, MSI_DESC_ALL);
pci_msi_mask(entry, msi_multi_mask(entry));

/* Configure MSI capability structure */
@@ -508,11 +507,11 @@ static int msix_setup_msi_descs(struct p

static void msix_update_entries(struct pci_dev *dev, struct msix_entry *entries)
{
- struct msi_desc *entry;
+ struct msi_desc *desc;

if (entries) {
- for_each_pci_msi_entry(entry, dev) {
- entries->vector = entry->irq;
+ msi_for_each_desc(desc, &dev->dev, MSI_DESC_ALL) {
+ entries->vector = desc->irq;
entries++;
}
}
@@ -705,15 +704,14 @@ static void pci_msi_shutdown(struct pci_
if (!pci_msi_enable || !dev || !dev->msi_enabled)
return;

- BUG_ON(list_empty(dev_to_msi_list(&dev->dev)));
- desc = first_pci_msi_entry(dev);
-
pci_msi_set_enable(dev, 0);
pci_intx_for_msi(dev, 1);
dev->msi_enabled = 0;

/* Return the device with MSI unmasked as initial states */
- pci_msi_unmask(desc, msi_multi_mask(desc));
+ desc = msi_first_desc(&dev->dev, MSI_DESC_ALL);
+ if (!WARN_ON_ONCE(!desc))
+ pci_msi_unmask(desc, msi_multi_mask(desc));

/* Restore dev->irq to its default pin-assertion IRQ */
dev->irq = desc->pci.msi_attrib.default_irq;
@@ -789,7 +787,7 @@ static int __pci_enable_msix(struct pci_

static void pci_msix_shutdown(struct pci_dev *dev)
{
- struct msi_desc *entry;
+ struct msi_desc *desc;

if (!pci_msi_enable || !dev || !dev->msix_enabled)
return;
@@ -800,8 +798,8 @@ static void pci_msix_shutdown(struct pci
}

/* Return the device with MSI-X masked as initial states */
- for_each_pci_msi_entry(entry, dev)
- pci_msix_mask(entry);
+ msi_for_each_desc(desc, &dev->dev, MSI_DESC_ALL)
+ pci_msix_mask(desc);

pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
pci_intx_for_msi(dev, 1);



2021-12-07 21:07:54

by Bjorn Helgaas

[permalink] [raw]
Subject: Re: [patch V2 10/31] PCI/MSI: Use msi_on_each_desc()

On Mon, Dec 06, 2021 at 11:51:18PM +0100, Thomas Gleixner wrote:
> Use the new iterator functions which pave the way for dynamically extending
> MSI-X vectors.
>
> Signed-off-by: Thomas Gleixner <[email protected]>

Acked-by: Bjorn Helgaas <[email protected]>

> ---
> drivers/pci/msi/irqdomain.c | 4 ++--
> drivers/pci/msi/legacy.c | 19 ++++++++-----------
> drivers/pci/msi/msi.c | 30 ++++++++++++++----------------
> 3 files changed, 24 insertions(+), 29 deletions(-)
>
> --- a/drivers/pci/msi/irqdomain.c
> +++ b/drivers/pci/msi/irqdomain.c
> @@ -83,7 +83,7 @@ static int pci_msi_domain_check_cap(stru
> struct msi_domain_info *info,
> struct device *dev)
> {
> - struct msi_desc *desc = first_pci_msi_entry(to_pci_dev(dev));
> + struct msi_desc *desc = msi_first_desc(dev, MSI_DESC_ALL);
>
> /* Special handling to support __pci_enable_msi_range() */
> if (pci_msi_desc_is_multi_msi(desc) &&
> @@ -98,7 +98,7 @@ static int pci_msi_domain_check_cap(stru
> unsigned int idx = 0;
>
> /* Check for gaps in the entry indices */
> - for_each_msi_entry(desc, dev) {
> + msi_for_each_desc(desc, dev, MSI_DESC_ALL) {
> if (desc->msi_index != idx++)
> return -ENOTSUPP;
> }
> --- a/drivers/pci/msi/legacy.c
> +++ b/drivers/pci/msi/legacy.c
> @@ -28,7 +28,7 @@ int __weak arch_setup_msi_irqs(struct pc
> if (type == PCI_CAP_ID_MSI && nvec > 1)
> return 1;
>
> - for_each_pci_msi_entry(desc, dev) {
> + msi_for_each_desc(desc, &dev->dev, MSI_DESC_NOTASSOCIATED) {
> ret = arch_setup_msi_irq(dev, desc);
> if (ret)
> return ret < 0 ? ret : -ENOSPC;
> @@ -42,27 +42,24 @@ void __weak arch_teardown_msi_irqs(struc
> struct msi_desc *desc;
> int i;
>
> - for_each_pci_msi_entry(desc, dev) {
> - if (desc->irq) {
> - for (i = 0; i < desc->nvec_used; i++)
> - arch_teardown_msi_irq(desc->irq + i);
> - }
> + msi_for_each_desc(desc, &dev->dev, MSI_DESC_ASSOCIATED) {
> + for (i = 0; i < desc->nvec_used; i++)
> + arch_teardown_msi_irq(desc->irq + i);
> }
> }
>
> static int pci_msi_setup_check_result(struct pci_dev *dev, int type, int ret)
> {
> - struct msi_desc *entry;
> + struct msi_desc *desc;
> int avail = 0;
>
> if (type != PCI_CAP_ID_MSIX || ret >= 0)
> return ret;
>
> /* Scan the MSI descriptors for successfully allocated ones. */
> - for_each_pci_msi_entry(entry, dev) {
> - if (entry->irq != 0)
> - avail++;
> - }
> + msi_for_each_desc(desc, &dev->dev, MSI_DESC_ASSOCIATED)
> + avail++;
> +
> return avail ? avail : ret;
> }
>
> --- a/drivers/pci/msi/msi.c
> +++ b/drivers/pci/msi/msi.c
> @@ -299,7 +299,6 @@ static void __pci_restore_msix_state(str
>
> if (!dev->msix_enabled)
> return;
> - BUG_ON(list_empty(dev_to_msi_list(&dev->dev)));
>
> /* route the table */
> pci_intx_for_msi(dev, 0);
> @@ -309,7 +308,7 @@ static void __pci_restore_msix_state(str
> write_msg = arch_restore_msi_irqs(dev);
>
> msi_lock_descs(&dev->dev);
> - for_each_pci_msi_entry(entry, dev) {
> + msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) {
> if (write_msg)
> __pci_write_msi_msg(entry, &entry->msg);
> pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl);
> @@ -378,14 +377,14 @@ static int msi_verify_entries(struct pci
> if (!dev->no_64bit_msi)
> return 0;
>
> - for_each_pci_msi_entry(entry, dev) {
> + msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) {
> if (entry->msg.address_hi) {
> pci_err(dev, "arch assigned 64-bit MSI address %#x%08x but device only supports 32 bits\n",
> entry->msg.address_hi, entry->msg.address_lo);
> - return -EIO;
> + break;
> }
> }
> - return 0;
> + return !entry ? 0 : -EIO;
> }
>
> /**
> @@ -418,7 +417,7 @@ static int msi_capability_init(struct pc
> goto unlock;
>
> /* All MSIs are unmasked by default; mask them all */
> - entry = first_pci_msi_entry(dev);
> + entry = msi_first_desc(&dev->dev, MSI_DESC_ALL);
> pci_msi_mask(entry, msi_multi_mask(entry));
>
> /* Configure MSI capability structure */
> @@ -508,11 +507,11 @@ static int msix_setup_msi_descs(struct p
>
> static void msix_update_entries(struct pci_dev *dev, struct msix_entry *entries)
> {
> - struct msi_desc *entry;
> + struct msi_desc *desc;
>
> if (entries) {
> - for_each_pci_msi_entry(entry, dev) {
> - entries->vector = entry->irq;
> + msi_for_each_desc(desc, &dev->dev, MSI_DESC_ALL) {
> + entries->vector = desc->irq;
> entries++;
> }
> }
> @@ -705,15 +704,14 @@ static void pci_msi_shutdown(struct pci_
> if (!pci_msi_enable || !dev || !dev->msi_enabled)
> return;
>
> - BUG_ON(list_empty(dev_to_msi_list(&dev->dev)));
> - desc = first_pci_msi_entry(dev);
> -
> pci_msi_set_enable(dev, 0);
> pci_intx_for_msi(dev, 1);
> dev->msi_enabled = 0;
>
> /* Return the device with MSI unmasked as initial states */
> - pci_msi_unmask(desc, msi_multi_mask(desc));
> + desc = msi_first_desc(&dev->dev, MSI_DESC_ALL);
> + if (!WARN_ON_ONCE(!desc))
> + pci_msi_unmask(desc, msi_multi_mask(desc));
>
> /* Restore dev->irq to its default pin-assertion IRQ */
> dev->irq = desc->pci.msi_attrib.default_irq;
> @@ -789,7 +787,7 @@ static int __pci_enable_msix(struct pci_
>
> static void pci_msix_shutdown(struct pci_dev *dev)
> {
> - struct msi_desc *entry;
> + struct msi_desc *desc;
>
> if (!pci_msi_enable || !dev || !dev->msix_enabled)
> return;
> @@ -800,8 +798,8 @@ static void pci_msix_shutdown(struct pci
> }
>
> /* Return the device with MSI-X masked as initial states */
> - for_each_pci_msi_entry(entry, dev)
> - pci_msix_mask(entry);
> + msi_for_each_desc(desc, &dev->dev, MSI_DESC_ALL)
> + pci_msix_mask(desc);
>
> pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
> pci_intx_for_msi(dev, 1);
>

Subject: [tip: irq/msi] PCI/MSI: Use msi_on_each_desc()

The following commit has been merged into the irq/msi branch of tip:

Commit-ID: ae24e28fef14687a26f98050e64153b8e4fee7c7
Gitweb: https://git.kernel.org/tip/ae24e28fef14687a26f98050e64153b8e4fee7c7
Author: Thomas Gleixner <[email protected]>
AuthorDate: Mon, 06 Dec 2021 23:51:18 +01:00
Committer: Thomas Gleixner <[email protected]>
CommitterDate: Thu, 16 Dec 2021 22:22:18 +01:00

PCI/MSI: Use msi_on_each_desc()

Use the new iterator functions which pave the way for dynamically extending
MSI-X vectors.

Signed-off-by: Thomas Gleixner <[email protected]>
Tested-by: Michael Kelley <[email protected]>
Tested-by: Nishanth Menon <[email protected]>
Reviewed-by: Jason Gunthorpe <[email protected]>
Acked-by: Bjorn Helgaas <[email protected]>
Link: https://lore.kernel.org/r/[email protected]

---
drivers/pci/msi/irqdomain.c | 4 ++--
drivers/pci/msi/legacy.c | 19 ++++++++-----------
drivers/pci/msi/msi.c | 30 ++++++++++++++----------------
3 files changed, 24 insertions(+), 29 deletions(-)

diff --git a/drivers/pci/msi/irqdomain.c b/drivers/pci/msi/irqdomain.c
index 3aab617..0d63541 100644
--- a/drivers/pci/msi/irqdomain.c
+++ b/drivers/pci/msi/irqdomain.c
@@ -83,7 +83,7 @@ static int pci_msi_domain_check_cap(struct irq_domain *domain,
struct msi_domain_info *info,
struct device *dev)
{
- struct msi_desc *desc = first_pci_msi_entry(to_pci_dev(dev));
+ struct msi_desc *desc = msi_first_desc(dev, MSI_DESC_ALL);

/* Special handling to support __pci_enable_msi_range() */
if (pci_msi_desc_is_multi_msi(desc) &&
@@ -98,7 +98,7 @@ static int pci_msi_domain_check_cap(struct irq_domain *domain,
unsigned int idx = 0;

/* Check for gaps in the entry indices */
- for_each_msi_entry(desc, dev) {
+ msi_for_each_desc(desc, dev, MSI_DESC_ALL) {
if (desc->msi_index != idx++)
return -ENOTSUPP;
}
diff --git a/drivers/pci/msi/legacy.c b/drivers/pci/msi/legacy.c
index 91c20a3..cdbb468 100644
--- a/drivers/pci/msi/legacy.c
+++ b/drivers/pci/msi/legacy.c
@@ -28,7 +28,7 @@ int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
if (type == PCI_CAP_ID_MSI && nvec > 1)
return 1;

- for_each_pci_msi_entry(desc, dev) {
+ msi_for_each_desc(desc, &dev->dev, MSI_DESC_NOTASSOCIATED) {
ret = arch_setup_msi_irq(dev, desc);
if (ret)
return ret < 0 ? ret : -ENOSPC;
@@ -42,27 +42,24 @@ void __weak arch_teardown_msi_irqs(struct pci_dev *dev)
struct msi_desc *desc;
int i;

- for_each_pci_msi_entry(desc, dev) {
- if (desc->irq) {
- for (i = 0; i < desc->nvec_used; i++)
- arch_teardown_msi_irq(desc->irq + i);
- }
+ msi_for_each_desc(desc, &dev->dev, MSI_DESC_ASSOCIATED) {
+ for (i = 0; i < desc->nvec_used; i++)
+ arch_teardown_msi_irq(desc->irq + i);
}
}

static int pci_msi_setup_check_result(struct pci_dev *dev, int type, int ret)
{
- struct msi_desc *entry;
+ struct msi_desc *desc;
int avail = 0;

if (type != PCI_CAP_ID_MSIX || ret >= 0)
return ret;

/* Scan the MSI descriptors for successfully allocated ones. */
- for_each_pci_msi_entry(entry, dev) {
- if (entry->irq != 0)
- avail++;
- }
+ msi_for_each_desc(desc, &dev->dev, MSI_DESC_ASSOCIATED)
+ avail++;
+
return avail ? avail : ret;
}

diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c
index 95db9c3..7180241 100644
--- a/drivers/pci/msi/msi.c
+++ b/drivers/pci/msi/msi.c
@@ -297,7 +297,6 @@ static void __pci_restore_msix_state(struct pci_dev *dev)

if (!dev->msix_enabled)
return;
- BUG_ON(list_empty(dev_to_msi_list(&dev->dev)));

/* route the table */
pci_intx_for_msi(dev, 0);
@@ -307,7 +306,7 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
write_msg = arch_restore_msi_irqs(dev);

msi_lock_descs(&dev->dev);
- for_each_pci_msi_entry(entry, dev) {
+ msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) {
if (write_msg)
__pci_write_msi_msg(entry, &entry->msg);
pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl);
@@ -406,14 +405,14 @@ static int msi_verify_entries(struct pci_dev *dev)
if (!dev->no_64bit_msi)
return 0;

- for_each_pci_msi_entry(entry, dev) {
+ msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) {
if (entry->msg.address_hi) {
pci_err(dev, "arch assigned 64-bit MSI address %#x%08x but device only supports 32 bits\n",
entry->msg.address_hi, entry->msg.address_lo);
- return -EIO;
+ break;
}
}
- return 0;
+ return !entry ? 0 : -EIO;
}

/**
@@ -451,7 +450,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
goto fail;

/* All MSIs are unmasked by default; mask them all */
- entry = first_pci_msi_entry(dev);
+ entry = msi_first_desc(&dev->dev, MSI_DESC_ALL);
pci_msi_mask(entry, msi_multi_mask(entry));

/* Configure MSI capability structure */
@@ -541,11 +540,11 @@ static int msix_setup_msi_descs(struct pci_dev *dev, void __iomem *base,

static void msix_update_entries(struct pci_dev *dev, struct msix_entry *entries)
{
- struct msi_desc *entry;
+ struct msi_desc *desc;

if (entries) {
- for_each_pci_msi_entry(entry, dev) {
- entries->vector = entry->irq;
+ msi_for_each_desc(desc, &dev->dev, MSI_DESC_ALL) {
+ entries->vector = desc->irq;
entries++;
}
}
@@ -747,15 +746,14 @@ static void pci_msi_shutdown(struct pci_dev *dev)
if (!pci_msi_enable || !dev || !dev->msi_enabled)
return;

- BUG_ON(list_empty(dev_to_msi_list(&dev->dev)));
- desc = first_pci_msi_entry(dev);
-
pci_msi_set_enable(dev, 0);
pci_intx_for_msi(dev, 1);
dev->msi_enabled = 0;

/* Return the device with MSI unmasked as initial states */
- pci_msi_unmask(desc, msi_multi_mask(desc));
+ desc = msi_first_desc(&dev->dev, MSI_DESC_ALL);
+ if (!WARN_ON_ONCE(!desc))
+ pci_msi_unmask(desc, msi_multi_mask(desc));

/* Restore dev->irq to its default pin-assertion IRQ */
dev->irq = desc->pci.msi_attrib.default_irq;
@@ -831,7 +829,7 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,

static void pci_msix_shutdown(struct pci_dev *dev)
{
- struct msi_desc *entry;
+ struct msi_desc *desc;

if (!pci_msi_enable || !dev || !dev->msix_enabled)
return;
@@ -842,8 +840,8 @@ static void pci_msix_shutdown(struct pci_dev *dev)
}

/* Return the device with MSI-X masked as initial states */
- for_each_pci_msi_entry(entry, dev)
- pci_msix_mask(entry);
+ msi_for_each_desc(desc, &dev->dev, MSI_DESC_ALL)
+ pci_msix_mask(desc);

pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
pci_intx_for_msi(dev, 1);