2021-12-06 22:52:19

by Thomas Gleixner

[permalink] [raw]
Subject: [patch V2 08/31] PCI/MSI: Use msi_add_msi_desc()

Simplify the allocation of MSI descriptors by using msi_add_msi_desc()
which moves the storage handling to core code and prepares for dynamic
extension of the MSI-X vector space.

Signed-off-by: Thomas Gleixner <[email protected]>
---
drivers/pci/msi/msi.c | 122 ++++++++++++++++++++++++--------------------------
1 file changed, 59 insertions(+), 63 deletions(-)

--- a/drivers/pci/msi/msi.c
+++ b/drivers/pci/msi/msi.c
@@ -340,45 +340,51 @@ void pci_restore_msi_state(struct pci_de
}
EXPORT_SYMBOL_GPL(pci_restore_msi_state);

-static struct msi_desc *
-msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity_desc *masks)
+static int msi_setup_msi_desc(struct pci_dev *dev, int nvec,
+ struct irq_affinity_desc *masks)
{
- struct msi_desc *entry;
+ struct msi_desc desc;
unsigned long prop;
u16 control;
+ int ret;

/* MSI Entry Initialization */
- entry = alloc_msi_entry(&dev->dev, nvec, masks);
- if (!entry)
- return NULL;
+ memset(&desc, 0, sizeof(desc));

pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
/* Lies, damned lies, and MSIs */
if (dev->dev_flags & PCI_DEV_FLAGS_HAS_MSI_MASKING)
control |= PCI_MSI_FLAGS_MASKBIT;
+ /* Respect XEN's mask disabling */
+ if (pci_msi_ignore_mask)
+ control &= ~PCI_MSI_FLAGS_MASKBIT;

- entry->pci.msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT);
- entry->pci.msi_attrib.can_mask = !pci_msi_ignore_mask &&
- !!(control & PCI_MSI_FLAGS_MASKBIT);
- entry->pci.msi_attrib.default_irq = dev->irq;
- entry->pci.msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1;
- entry->pci.msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec));
+ desc.nvec_used = nvec;
+ desc.pci.msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT);
+ desc.pci.msi_attrib.can_mask = !!(control & PCI_MSI_FLAGS_MASKBIT);
+ desc.pci.msi_attrib.default_irq = dev->irq;
+ desc.pci.msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1;
+ desc.pci.msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec));
+ desc.affinity = masks;

if (control & PCI_MSI_FLAGS_64BIT)
- entry->pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
+ desc.pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
else
- entry->pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_32;
+ desc.pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_32;

/* Save the initial mask status */
- if (entry->pci.msi_attrib.can_mask)
- pci_read_config_dword(dev, entry->pci.mask_pos, &entry->pci.msi_mask);
+ if (desc.pci.msi_attrib.can_mask)
+ pci_read_config_dword(dev, desc.pci.mask_pos, &desc.pci.msi_mask);

- prop = MSI_PROP_PCI_MSI;
- if (entry->pci.msi_attrib.is_64)
- prop |= MSI_PROP_64BIT;
- msi_device_set_properties(&dev->dev, prop);
+ ret = msi_add_msi_desc(&dev->dev, &desc);
+ if (!ret) {
+ prop = MSI_PROP_PCI_MSI;
+ if (desc.pci.msi_attrib.is_64)
+ prop |= MSI_PROP_64BIT;
+ msi_device_set_properties(&dev->dev, prop);
+ }

- return entry;
+ return ret;
}

static int msi_verify_entries(struct pci_dev *dev)
@@ -423,17 +429,14 @@ static int msi_capability_init(struct pc
masks = irq_create_affinity_masks(nvec, affd);

msi_lock_descs(&dev->dev);
- entry = msi_setup_entry(dev, nvec, masks);
- if (!entry) {
- ret = -ENOMEM;
+ ret = msi_setup_msi_desc(dev, nvec, masks);
+ if (ret)
goto unlock;
- }

/* All MSIs are unmasked by default; mask them all */
+ entry = first_pci_msi_entry(dev);
pci_msi_mask(entry, msi_multi_mask(entry));

- list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
-
/* Configure MSI capability structure */
ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
if (ret)
@@ -482,49 +485,40 @@ static void __iomem *msix_map_region(str
return ioremap(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
}

-static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
- struct msix_entry *entries, int nvec,
- struct irq_affinity_desc *masks)
+static int msix_setup_msi_descs(struct pci_dev *dev, void __iomem *base,
+ struct msix_entry *entries, int nvec,
+ struct irq_affinity_desc *masks)
{
- int i, vec_count = pci_msix_vec_count(dev);
+ int ret = 0, i, vec_count = pci_msix_vec_count(dev);
struct irq_affinity_desc *curmsk;
- struct msi_desc *entry;
+ struct msi_desc desc;
void __iomem *addr;

- for (i = 0, curmsk = masks; i < nvec; i++) {
- entry = alloc_msi_entry(&dev->dev, 1, curmsk);
- if (!entry) {
- /* No enough memory. Don't try again */
- return -ENOMEM;
- }
-
- entry->pci.msi_attrib.is_msix = 1;
- entry->pci.msi_attrib.is_64 = 1;
-
- if (entries)
- entry->msi_index = entries[i].entry;
- else
- entry->msi_index = i;
-
- entry->pci.msi_attrib.is_virtual = entry->msi_index >= vec_count;
-
- entry->pci.msi_attrib.can_mask = !pci_msi_ignore_mask &&
- !entry->pci.msi_attrib.is_virtual;
-
- entry->pci.msi_attrib.default_irq = dev->irq;
- entry->pci.mask_base = base;
+ memset(&desc, 0, sizeof(desc));

- if (entry->pci.msi_attrib.can_mask) {
- addr = pci_msix_desc_addr(entry);
- entry->pci.msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
+ desc.nvec_used = 1;
+ desc.pci.msi_attrib.is_msix = 1;
+ desc.pci.msi_attrib.is_64 = 1;
+ desc.pci.msi_attrib.default_irq = dev->irq;
+ desc.pci.mask_base = base;
+
+ for (i = 0, curmsk = masks; i < nvec; i++, curmsk++) {
+ desc.msi_index = entries ? entries[i].entry : i;
+ desc.affinity = masks ? curmsk : NULL;
+ desc.pci.msi_attrib.is_virtual = desc.msi_index >= vec_count;
+ desc.pci.msi_attrib.can_mask = !pci_msi_ignore_mask &&
+ !desc.pci.msi_attrib.is_virtual;
+
+ if (!desc.pci.msi_attrib.can_mask) {
+ addr = pci_msix_desc_addr(&desc);
+ desc.pci.msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
}

- list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
- if (masks)
- curmsk++;
+ ret = msi_add_msi_desc(&dev->dev, &desc);
+ if (ret)
+ break;
}
- msi_device_set_properties(&dev->dev, MSI_PROP_PCI_MSIX | MSI_PROP_64BIT);
- return 0;
+ return ret;
}

static void msix_update_entries(struct pci_dev *dev, struct msix_entry *entries)
@@ -562,10 +556,12 @@ static int msix_setup_interrupts(struct
masks = irq_create_affinity_masks(nvec, affd);

msi_lock_descs(&dev->dev);
- ret = msix_setup_entries(dev, base, entries, nvec, masks);
+ ret = msix_setup_msi_descs(dev, base, entries, nvec, masks);
if (ret)
goto out_free;

+ msi_device_set_properties(&dev->dev, MSI_PROP_PCI_MSIX | MSI_PROP_64BIT);
+
ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
if (ret)
goto out_free;



2021-12-07 21:07:27

by Bjorn Helgaas

[permalink] [raw]
Subject: Re: [patch V2 08/31] PCI/MSI: Use msi_add_msi_desc()

On Mon, Dec 06, 2021 at 11:51:15PM +0100, Thomas Gleixner wrote:
> Simplify the allocation of MSI descriptors by using msi_add_msi_desc()
> which moves the storage handling to core code and prepares for dynamic
> extension of the MSI-X vector space.
>
> Signed-off-by: Thomas Gleixner <[email protected]>

Acked-by: Bjorn Helgaas <[email protected]>

> ---
> drivers/pci/msi/msi.c | 122 ++++++++++++++++++++++++--------------------------
> 1 file changed, 59 insertions(+), 63 deletions(-)
>
> --- a/drivers/pci/msi/msi.c
> +++ b/drivers/pci/msi/msi.c
> @@ -340,45 +340,51 @@ void pci_restore_msi_state(struct pci_de
> }
> EXPORT_SYMBOL_GPL(pci_restore_msi_state);
>
> -static struct msi_desc *
> -msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity_desc *masks)
> +static int msi_setup_msi_desc(struct pci_dev *dev, int nvec,
> + struct irq_affinity_desc *masks)
> {
> - struct msi_desc *entry;
> + struct msi_desc desc;
> unsigned long prop;
> u16 control;
> + int ret;
>
> /* MSI Entry Initialization */
> - entry = alloc_msi_entry(&dev->dev, nvec, masks);
> - if (!entry)
> - return NULL;
> + memset(&desc, 0, sizeof(desc));
>
> pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
> /* Lies, damned lies, and MSIs */
> if (dev->dev_flags & PCI_DEV_FLAGS_HAS_MSI_MASKING)
> control |= PCI_MSI_FLAGS_MASKBIT;
> + /* Respect XEN's mask disabling */
> + if (pci_msi_ignore_mask)
> + control &= ~PCI_MSI_FLAGS_MASKBIT;
>
> - entry->pci.msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT);
> - entry->pci.msi_attrib.can_mask = !pci_msi_ignore_mask &&
> - !!(control & PCI_MSI_FLAGS_MASKBIT);
> - entry->pci.msi_attrib.default_irq = dev->irq;
> - entry->pci.msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1;
> - entry->pci.msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec));
> + desc.nvec_used = nvec;
> + desc.pci.msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT);
> + desc.pci.msi_attrib.can_mask = !!(control & PCI_MSI_FLAGS_MASKBIT);
> + desc.pci.msi_attrib.default_irq = dev->irq;
> + desc.pci.msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1;
> + desc.pci.msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec));
> + desc.affinity = masks;
>
> if (control & PCI_MSI_FLAGS_64BIT)
> - entry->pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
> + desc.pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
> else
> - entry->pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_32;
> + desc.pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_32;
>
> /* Save the initial mask status */
> - if (entry->pci.msi_attrib.can_mask)
> - pci_read_config_dword(dev, entry->pci.mask_pos, &entry->pci.msi_mask);
> + if (desc.pci.msi_attrib.can_mask)
> + pci_read_config_dword(dev, desc.pci.mask_pos, &desc.pci.msi_mask);
>
> - prop = MSI_PROP_PCI_MSI;
> - if (entry->pci.msi_attrib.is_64)
> - prop |= MSI_PROP_64BIT;
> - msi_device_set_properties(&dev->dev, prop);
> + ret = msi_add_msi_desc(&dev->dev, &desc);
> + if (!ret) {
> + prop = MSI_PROP_PCI_MSI;
> + if (desc.pci.msi_attrib.is_64)
> + prop |= MSI_PROP_64BIT;
> + msi_device_set_properties(&dev->dev, prop);
> + }
>
> - return entry;
> + return ret;
> }
>
> static int msi_verify_entries(struct pci_dev *dev)
> @@ -423,17 +429,14 @@ static int msi_capability_init(struct pc
> masks = irq_create_affinity_masks(nvec, affd);
>
> msi_lock_descs(&dev->dev);
> - entry = msi_setup_entry(dev, nvec, masks);
> - if (!entry) {
> - ret = -ENOMEM;
> + ret = msi_setup_msi_desc(dev, nvec, masks);
> + if (ret)
> goto unlock;
> - }
>
> /* All MSIs are unmasked by default; mask them all */
> + entry = first_pci_msi_entry(dev);
> pci_msi_mask(entry, msi_multi_mask(entry));
>
> - list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
> -
> /* Configure MSI capability structure */
> ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
> if (ret)
> @@ -482,49 +485,40 @@ static void __iomem *msix_map_region(str
> return ioremap(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
> }
>
> -static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
> - struct msix_entry *entries, int nvec,
> - struct irq_affinity_desc *masks)
> +static int msix_setup_msi_descs(struct pci_dev *dev, void __iomem *base,
> + struct msix_entry *entries, int nvec,
> + struct irq_affinity_desc *masks)
> {
> - int i, vec_count = pci_msix_vec_count(dev);
> + int ret = 0, i, vec_count = pci_msix_vec_count(dev);
> struct irq_affinity_desc *curmsk;
> - struct msi_desc *entry;
> + struct msi_desc desc;
> void __iomem *addr;
>
> - for (i = 0, curmsk = masks; i < nvec; i++) {
> - entry = alloc_msi_entry(&dev->dev, 1, curmsk);
> - if (!entry) {
> - /* No enough memory. Don't try again */
> - return -ENOMEM;
> - }
> -
> - entry->pci.msi_attrib.is_msix = 1;
> - entry->pci.msi_attrib.is_64 = 1;
> -
> - if (entries)
> - entry->msi_index = entries[i].entry;
> - else
> - entry->msi_index = i;
> -
> - entry->pci.msi_attrib.is_virtual = entry->msi_index >= vec_count;
> -
> - entry->pci.msi_attrib.can_mask = !pci_msi_ignore_mask &&
> - !entry->pci.msi_attrib.is_virtual;
> -
> - entry->pci.msi_attrib.default_irq = dev->irq;
> - entry->pci.mask_base = base;
> + memset(&desc, 0, sizeof(desc));
>
> - if (entry->pci.msi_attrib.can_mask) {
> - addr = pci_msix_desc_addr(entry);
> - entry->pci.msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
> + desc.nvec_used = 1;
> + desc.pci.msi_attrib.is_msix = 1;
> + desc.pci.msi_attrib.is_64 = 1;
> + desc.pci.msi_attrib.default_irq = dev->irq;
> + desc.pci.mask_base = base;
> +
> + for (i = 0, curmsk = masks; i < nvec; i++, curmsk++) {
> + desc.msi_index = entries ? entries[i].entry : i;
> + desc.affinity = masks ? curmsk : NULL;
> + desc.pci.msi_attrib.is_virtual = desc.msi_index >= vec_count;
> + desc.pci.msi_attrib.can_mask = !pci_msi_ignore_mask &&
> + !desc.pci.msi_attrib.is_virtual;
> +
> + if (!desc.pci.msi_attrib.can_mask) {
> + addr = pci_msix_desc_addr(&desc);
> + desc.pci.msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
> }
>
> - list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
> - if (masks)
> - curmsk++;
> + ret = msi_add_msi_desc(&dev->dev, &desc);
> + if (ret)
> + break;
> }
> - msi_device_set_properties(&dev->dev, MSI_PROP_PCI_MSIX | MSI_PROP_64BIT);
> - return 0;
> + return ret;
> }
>
> static void msix_update_entries(struct pci_dev *dev, struct msix_entry *entries)
> @@ -562,10 +556,12 @@ static int msix_setup_interrupts(struct
> masks = irq_create_affinity_masks(nvec, affd);
>
> msi_lock_descs(&dev->dev);
> - ret = msix_setup_entries(dev, base, entries, nvec, masks);
> + ret = msix_setup_msi_descs(dev, base, entries, nvec, masks);
> if (ret)
> goto out_free;
>
> + msi_device_set_properties(&dev->dev, MSI_PROP_PCI_MSIX | MSI_PROP_64BIT);
> +
> ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
> if (ret)
> goto out_free;
>

Subject: [tip: irq/msi] PCI/MSI: Use msi_add_msi_desc()

The following commit has been merged into the irq/msi branch of tip:

Commit-ID: 71020a3c0dff4a00d96922a4a95a067f524a7dcb
Gitweb: https://git.kernel.org/tip/71020a3c0dff4a00d96922a4a95a067f524a7dcb
Author: Thomas Gleixner <[email protected]>
AuthorDate: Mon, 06 Dec 2021 23:51:15 +01:00
Committer: Thomas Gleixner <[email protected]>
CommitterDate: Thu, 16 Dec 2021 22:22:17 +01:00

PCI/MSI: Use msi_add_msi_desc()

Simplify the allocation of MSI descriptors by using msi_add_msi_desc()
which moves the storage handling to core code and prepares for dynamic
extension of the MSI-X vector space.

Signed-off-by: Thomas Gleixner <[email protected]>
Tested-by: Michael Kelley <[email protected]>
Tested-by: Nishanth Menon <[email protected]>
Reviewed-by: Jason Gunthorpe <[email protected]>
Acked-by: Bjorn Helgaas <[email protected]>
Link: https://lore.kernel.org/r/[email protected]

---
drivers/pci/msi/msi.c | 104 ++++++++++++++++++-----------------------
1 file changed, 47 insertions(+), 57 deletions(-)

diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c
index c9a03f9..4ee47ee 100644
--- a/drivers/pci/msi/msi.c
+++ b/drivers/pci/msi/msi.c
@@ -376,40 +376,41 @@ static int pci_setup_msi_context(struct pci_dev *dev)
return ret;
}

-static struct msi_desc *
-msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity_desc *masks)
+static int msi_setup_msi_desc(struct pci_dev *dev, int nvec,
+ struct irq_affinity_desc *masks)
{
- struct msi_desc *entry;
+ struct msi_desc desc;
u16 control;

/* MSI Entry Initialization */
- entry = alloc_msi_entry(&dev->dev, nvec, masks);
- if (!entry)
- return NULL;
+ memset(&desc, 0, sizeof(desc));

pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
/* Lies, damned lies, and MSIs */
if (dev->dev_flags & PCI_DEV_FLAGS_HAS_MSI_MASKING)
control |= PCI_MSI_FLAGS_MASKBIT;
+ /* Respect XEN's mask disabling */
+ if (pci_msi_ignore_mask)
+ control &= ~PCI_MSI_FLAGS_MASKBIT;

- entry->pci.msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT);
- entry->pci.msi_attrib.can_mask = !pci_msi_ignore_mask &&
- !!(control & PCI_MSI_FLAGS_MASKBIT);
- entry->pci.msi_attrib.default_irq = dev->irq;
- entry->pci.msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1;
- entry->pci.msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec));
+ desc.nvec_used = nvec;
+ desc.pci.msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT);
+ desc.pci.msi_attrib.can_mask = !!(control & PCI_MSI_FLAGS_MASKBIT);
+ desc.pci.msi_attrib.default_irq = dev->irq;
+ desc.pci.msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1;
+ desc.pci.msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec));
+ desc.affinity = masks;

if (control & PCI_MSI_FLAGS_64BIT)
- entry->pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
+ desc.pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
else
- entry->pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_32;
+ desc.pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_32;

/* Save the initial mask status */
- if (entry->pci.msi_attrib.can_mask)
- pci_read_config_dword(dev, entry->pci.mask_pos, &entry->pci.msi_mask);
-
+ if (desc.pci.msi_attrib.can_mask)
+ pci_read_config_dword(dev, desc.pci.mask_pos, &desc.pci.msi_mask);

- return entry;
+ return msi_add_msi_desc(&dev->dev, &desc);
}

static int msi_verify_entries(struct pci_dev *dev)
@@ -459,17 +460,14 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
masks = irq_create_affinity_masks(nvec, affd);

msi_lock_descs(&dev->dev);
- entry = msi_setup_entry(dev, nvec, masks);
- if (!entry) {
- ret = -ENOMEM;
+ ret = msi_setup_msi_desc(dev, nvec, masks);
+ if (ret)
goto fail;
- }

/* All MSIs are unmasked by default; mask them all */
+ entry = first_pci_msi_entry(dev);
pci_msi_mask(entry, msi_multi_mask(entry));

- list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
-
/* Configure MSI capability structure */
ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
if (ret)
@@ -519,48 +517,40 @@ static void __iomem *msix_map_region(struct pci_dev *dev,
return ioremap(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
}

-static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
- struct msix_entry *entries, int nvec,
- struct irq_affinity_desc *masks)
+static int msix_setup_msi_descs(struct pci_dev *dev, void __iomem *base,
+ struct msix_entry *entries, int nvec,
+ struct irq_affinity_desc *masks)
{
- int i, vec_count = pci_msix_vec_count(dev);
+ int ret = 0, i, vec_count = pci_msix_vec_count(dev);
struct irq_affinity_desc *curmsk;
- struct msi_desc *entry;
+ struct msi_desc desc;
void __iomem *addr;

- for (i = 0, curmsk = masks; i < nvec; i++) {
- entry = alloc_msi_entry(&dev->dev, 1, curmsk);
- if (!entry) {
- /* No enough memory. Don't try again */
- return -ENOMEM;
- }
-
- entry->pci.msi_attrib.is_msix = 1;
- entry->pci.msi_attrib.is_64 = 1;
+ memset(&desc, 0, sizeof(desc));

- if (entries)
- entry->msi_index = entries[i].entry;
- else
- entry->msi_index = i;
+ desc.nvec_used = 1;
+ desc.pci.msi_attrib.is_msix = 1;
+ desc.pci.msi_attrib.is_64 = 1;
+ desc.pci.msi_attrib.default_irq = dev->irq;
+ desc.pci.mask_base = base;

- entry->pci.msi_attrib.is_virtual = entry->msi_index >= vec_count;
+ for (i = 0, curmsk = masks; i < nvec; i++, curmsk++) {
+ desc.msi_index = entries ? entries[i].entry : i;
+ desc.affinity = masks ? curmsk : NULL;
+ desc.pci.msi_attrib.is_virtual = desc.msi_index >= vec_count;
+ desc.pci.msi_attrib.can_mask = !pci_msi_ignore_mask &&
+ !desc.pci.msi_attrib.is_virtual;

- entry->pci.msi_attrib.can_mask = !pci_msi_ignore_mask &&
- !entry->pci.msi_attrib.is_virtual;
-
- entry->pci.msi_attrib.default_irq = dev->irq;
- entry->pci.mask_base = base;
-
- if (entry->pci.msi_attrib.can_mask) {
- addr = pci_msix_desc_addr(entry);
- entry->pci.msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
+ if (!desc.pci.msi_attrib.can_mask) {
+ addr = pci_msix_desc_addr(&desc);
+ desc.pci.msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
}

- list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
- if (masks)
- curmsk++;
+ ret = msi_add_msi_desc(&dev->dev, &desc);
+ if (ret)
+ break;
}
- return 0;
+ return ret;
}

static void msix_update_entries(struct pci_dev *dev, struct msix_entry *entries)
@@ -598,7 +588,7 @@ static int msix_setup_interrupts(struct pci_dev *dev, void __iomem *base,
masks = irq_create_affinity_masks(nvec, affd);

msi_lock_descs(&dev->dev);
- ret = msix_setup_entries(dev, base, entries, nvec, masks);
+ ret = msix_setup_msi_descs(dev, base, entries, nvec, masks);
if (ret)
goto out_free;