2012-11-19 15:01:26

by Alexander Gordeev

[permalink] [raw]
Subject: [PATCH v4 -tip 0/3] x86, MSI, AHCI: Support multiple MSIs

Hi Jeff,

I am resending this series with changes based on Ingo's review of v3.

Patch #3 has a single change in functionality compared to the previous
version you acked before - that is a fix of invalid index spotted by Ingo
(below is the interdiff's hunk). Could you please review the patch again
and ack or comment?

@@ -1129,8 +1131,8 @@
out_free_all_irqs:
i = host->n_ports;
out_free_irqs:
- for (; i; i--)
- devm_free_irq(host->dev, irq + i - 1, host->ports[i]);
+ for (i--; i >= 0; i--)
+ devm_free_irq(host->dev, irq + i, host->ports[i]);

return rc;
}

Whole series compared to v3:

I have not noticed that v3 2/5 patch "x86, MSI: Allocate as many
multiple IRQs as requested" is redundant, since the AHCI code
tries to allocate maximum possible number of MSIs. As there are
no other users of this functionality, I removed v3 2/5 patch for now;

v3 patch 3/5 "x86, MSI: Minor readability fixes" merged into
v4 patch 1/3 "x86, MSI: Support multiple MSIs in presense of IRQ
remapping";

1/3: - racy can_create_irqs() check removed;
- multi-line comments fixed;
- loops 'for (; i; i--)' changed to 'for (i--; i >= 0; i--)';

v1 conditionally acked by Suresh
v2 concerned Yinghai's comments

2/3: v2 acked by Bjorn, no changes since

3/3: - ahci_host_activate() failure path fixed and tested;
- ahci_port_priv::lock field commented;
- multi-line comments fixed;

v3 acked by Jeff

Alexander Gordeev (3):
x86, MSI: Support multiple MSIs in presense of IRQ remapping
PCI, MSI: Enable multiple MSIs with pci_enable_msi_block_auto()
AHCI: Support multiple MSIs

Documentation/PCI/MSI-HOWTO.txt | 37 ++++++++-
arch/x86/kernel/apic/io_apic.c | 165 +++++++++++++++++++++++++++++++--------
drivers/ata/ahci.c | 93 +++++++++++++++++++++-
drivers/ata/ahci.h | 6 ++
drivers/ata/libahci.c | 118 ++++++++++++++++++++++++++--
drivers/pci/msi.c | 26 ++++++
include/linux/irq.h | 5 +
include/linux/pci.h | 7 ++
kernel/irq/chip.c | 30 +++++--
9 files changed, 432 insertions(+), 55 deletions(-)

--
1.7.7.6


--
Regards,
Alexander Gordeev
[email protected]


2012-11-19 15:02:14

by Alexander Gordeev

[permalink] [raw]
Subject: [PATCH v4 -tip 1/3] x86, MSI: Support multiple MSIs in presense of IRQ remapping

The MSI specification has several constraints in comparison with MSI-X,
most notable of them is the inability to configure MSIs independently.
As a result, it is impossible to dispatch interrupts from different
queues to different CPUs. This is largely devalues the support of
multiple MSIs in SMP systems.

Also, a necessity to allocate a contiguous block of vector numbers for
devices capable of multiple MSIs might cause a considerable pressure on
x86 interrupt vector allocator and could lead to fragmentation of the
interrupt vectors space.

This patch overcomes both drawbacks in presense of IRQ remapping and
lets devices take advantage of multiple queues and per-IRQ affinity
assignments.

Signed-off-by: Alexander Gordeev <[email protected]>
---
arch/x86/kernel/apic/io_apic.c | 165 ++++++++++++++++++++++++++++++++--------
include/linux/irq.h | 5 +
kernel/irq/chip.c | 30 +++++--
3 files changed, 160 insertions(+), 40 deletions(-)

diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index b134f0b..2cdc1b9 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -300,9 +300,9 @@ static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node)
return cfg;
}

-static int alloc_irq_from(unsigned int from, int node)
+static int alloc_irqs_from(unsigned int from, unsigned int count, int node)
{
- return irq_alloc_desc_from(from, node);
+ return irq_alloc_descs_from(from, count, node);
}

static void free_irq_at(unsigned int at, struct irq_cfg *cfg)
@@ -2994,37 +2994,58 @@ device_initcall(ioapic_init_ops);
/*
* Dynamic irq allocate and deallocation
*/
-unsigned int create_irq_nr(unsigned int from, int node)
+unsigned int __create_irqs(unsigned int from, unsigned int count, int node)
{
- struct irq_cfg *cfg;
+ struct irq_cfg **cfg;
unsigned long flags;
- unsigned int ret = 0;
- int irq;
+ int irq, i;

if (from < nr_irqs_gsi)
from = nr_irqs_gsi;

- irq = alloc_irq_from(from, node);
- if (irq < 0)
- return 0;
- cfg = alloc_irq_cfg(irq, node);
- if (!cfg) {
- free_irq_at(irq, NULL);
+ cfg = kzalloc_node(count * sizeof(cfg[0]), GFP_KERNEL, node);
+ if (!cfg)
return 0;
+
+ irq = alloc_irqs_from(from, count, node);
+ if (irq < 0)
+ goto out_cfgs;
+
+ for (i = 0; i < count; i++) {
+ cfg[i] = alloc_irq_cfg(irq + i, node);
+ if (!cfg[i])
+ goto out_irqs;
}

raw_spin_lock_irqsave(&vector_lock, flags);
- if (!__assign_irq_vector(irq, cfg, apic->target_cpus()))
- ret = irq;
+ for (i = 0; i < count; i++)
+ if (__assign_irq_vector(irq + i, cfg[i], apic->target_cpus()))
+ goto out_vecs;
raw_spin_unlock_irqrestore(&vector_lock, flags);

- if (ret) {
- irq_set_chip_data(irq, cfg);
- irq_clear_status_flags(irq, IRQ_NOREQUEST);
- } else {
- free_irq_at(irq, cfg);
+ for (i = 0; i < count; i++) {
+ irq_set_chip_data(irq + i, cfg[i]);
+ irq_clear_status_flags(irq + i, IRQ_NOREQUEST);
}
- return ret;
+
+ kfree(cfg);
+ return irq;
+
+out_vecs:
+ for (i--; i >= 0; i--)
+ __clear_irq_vector(irq + i, cfg[i]);
+ raw_spin_unlock_irqrestore(&vector_lock, flags);
+out_irqs:
+ for (i = 0; i < count; i++)
+ free_irq_at(irq + i, cfg[i]);
+out_cfgs:
+ kfree(cfg);
+ return 0;
+}
+
+unsigned int create_irq_nr(unsigned int from, int node)
+{
+ return __create_irqs(from, 1, node);
}

int create_irq(void)
@@ -3057,6 +3078,14 @@ void destroy_irq(unsigned int irq)
free_irq_at(irq, cfg);
}

+static inline void destroy_irqs(unsigned int irq, unsigned int count)
+{
+ unsigned int i;
+
+ for (i = 0; i < count; i++)
+ destroy_irq(irq + i);
+}
+
/*
* MSI message composition
*/
@@ -3083,7 +3112,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,

if (irq_remapped(cfg)) {
compose_remapped_msi_msg(pdev, irq, dest, msg, hpet_id);
- return err;
+ return 0;
}

if (x2apic_enabled())
@@ -3110,7 +3139,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
MSI_DATA_DELIVERY_LOWPRI) |
MSI_DATA_VECTOR(cfg->vector);

- return err;
+ return 0;
}

static int
@@ -3148,18 +3177,26 @@ static struct irq_chip msi_chip = {
.irq_retrigger = ioapic_retrigger_irq,
};

-static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
+static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc,
+ unsigned int irq_base, unsigned int irq_offset)
{
struct irq_chip *chip = &msi_chip;
struct msi_msg msg;
+ unsigned int irq = irq_base + irq_offset;
int ret;

ret = msi_compose_msg(dev, irq, &msg, -1);
if (ret < 0)
return ret;

- irq_set_msi_desc(irq, msidesc);
- write_msi_msg(irq, &msg);
+ irq_set_msi_desc_off(irq_base, irq_offset, msidesc);
+
+ /*
+ * MSI-X message is written per-IRQ, the offset is always 0.
+ * MSI message denotes a contiguous group of IRQs, written for 0th IRQ.
+ */
+ if (!irq_offset)
+ write_msi_msg(irq, &msg);

if (irq_remapped(irq_get_chip_data(irq))) {
irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
@@ -3173,23 +3210,19 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
return 0;
}

-int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+int setup_msix_irqs(struct pci_dev *dev, int nvec)
{
int node, ret, sub_handle, index = 0;
unsigned int irq, irq_want;
struct msi_desc *msidesc;

- /* x86 doesn't support multiple MSI yet */
- if (type == PCI_CAP_ID_MSI && nvec > 1)
- return 1;
-
node = dev_to_node(&dev->dev);
irq_want = nr_irqs_gsi;
sub_handle = 0;
list_for_each_entry(msidesc, &dev->msi_list, list) {
irq = create_irq_nr(irq_want, node);
if (irq == 0)
- return -1;
+ return -ENOSPC;
irq_want = irq + 1;
if (!irq_remapping_enabled)
goto no_ir;
@@ -3211,7 +3244,7 @@ int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
goto error;
}
no_ir:
- ret = setup_msi_irq(dev, msidesc, irq);
+ ret = setup_msi_irq(dev, msidesc, irq, 0);
if (ret < 0)
goto error;
sub_handle++;
@@ -3223,6 +3256,74 @@ error:
return ret;
}

+int setup_msi_irqs(struct pci_dev *dev, int nvec)
+{
+ int node, ret, sub_handle, index = 0;
+ unsigned int irq;
+ struct msi_desc *msidesc;
+
+ if (nvec > 1 && !irq_remapping_enabled)
+ return 1;
+
+ nvec = __roundup_pow_of_two(nvec);
+
+ WARN_ON(!list_is_singular(&dev->msi_list));
+ msidesc = list_entry(dev->msi_list.next, struct msi_desc, list);
+ WARN_ON(msidesc->irq);
+ WARN_ON(msidesc->msi_attrib.multiple);
+
+ node = dev_to_node(&dev->dev);
+ irq = __create_irqs(nr_irqs_gsi, nvec, node);
+ if (irq == 0)
+ return -ENOSPC;
+
+ if (!irq_remapping_enabled) {
+ ret = setup_msi_irq(dev, msidesc, irq, 0);
+ if (ret < 0)
+ goto error;
+ return 0;
+ }
+
+ msidesc->msi_attrib.multiple = ilog2(nvec);
+ for (sub_handle = 0; sub_handle < nvec; sub_handle++) {
+ if (!sub_handle) {
+ index = msi_alloc_remapped_irq(dev, irq, nvec);
+ if (index < 0) {
+ ret = index;
+ goto error;
+ }
+ } else {
+ ret = msi_setup_remapped_irq(dev, irq + sub_handle,
+ index, sub_handle);
+ if (ret < 0)
+ goto error;
+ }
+ ret = setup_msi_irq(dev, msidesc, irq, sub_handle);
+ if (ret < 0)
+ goto error;
+ }
+ return 0;
+
+error:
+ destroy_irqs(irq, nvec);
+
+ /*
+ * Restore altered MSI descriptor fields and prevent just destroyed
+ * IRQs from tearing down again in default_teardown_msi_irqs()
+ */
+ msidesc->irq = 0;
+ msidesc->msi_attrib.multiple = 0;
+
+ return ret;
+}
+
+int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+{
+ if (type == PCI_CAP_ID_MSI)
+ return setup_msi_irqs(dev, nvec);
+ return setup_msix_irqs(dev, nvec);
+}
+
void native_teardown_msi_irq(unsigned int irq)
{
destroy_irq(irq);
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 526f10a..90568cf 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -531,6 +531,8 @@ extern int irq_set_handler_data(unsigned int irq, void *data);
extern int irq_set_chip_data(unsigned int irq, void *data);
extern int irq_set_irq_type(unsigned int irq, unsigned int type);
extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry);
+extern int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
+ struct msi_desc *entry);
extern struct irq_data *irq_get_irq_data(unsigned int irq);

static inline struct irq_chip *irq_get_chip(unsigned int irq)
@@ -593,6 +595,9 @@ int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
#define irq_alloc_desc_from(from, node) \
irq_alloc_descs(-1, from, 1, node)

+#define irq_alloc_descs_from(from, cnt, node) \
+ irq_alloc_descs(-1, from, cnt, node)
+
void irq_free_descs(unsigned int irq, unsigned int cnt);
int irq_reserve_irqs(unsigned int from, unsigned int cnt);

diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 3aca9f2..cbd97ce 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -90,27 +90,41 @@ int irq_set_handler_data(unsigned int irq, void *data)
EXPORT_SYMBOL(irq_set_handler_data);

/**
- * irq_set_msi_desc - set MSI descriptor data for an irq
- * @irq: Interrupt number
- * @entry: Pointer to MSI descriptor data
+ * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
+ * @irq_base: Interrupt number base
+ * @irq_offset: Interrupt number offset
+ * @entry: Pointer to MSI descriptor data
*
- * Set the MSI descriptor entry for an irq
+ * Set the MSI descriptor entry for an irq at offset
*/
-int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
+int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
+ struct msi_desc *entry)
{
unsigned long flags;
- struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
+ struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);

if (!desc)
return -EINVAL;
desc->irq_data.msi_desc = entry;
- if (entry)
- entry->irq = irq;
+ if (entry && !irq_offset)
+ entry->irq = irq_base;
irq_put_desc_unlock(desc, flags);
return 0;
}

/**
+ * irq_set_msi_desc - set MSI descriptor data for an irq
+ * @irq: Interrupt number
+ * @entry: Pointer to MSI descriptor data
+ *
+ * Set the MSI descriptor entry for an irq
+ */
+int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
+{
+ return irq_set_msi_desc_off(irq, 0, entry);
+}
+
+/**
* irq_set_chip_data - set irq chip data for an irq
* @irq: Interrupt number
* @data: Pointer to chip specific data
--
1.7.7.6


--
Regards,
Alexander Gordeev
[email protected]

2012-11-19 15:03:09

by Alexander Gordeev

[permalink] [raw]
Subject: [PATCH v4 -tip 3/3] AHCI: Support multiple MSIs

Take advantage of multiple MSIs implementation on x86 - on systems with
IRQ remapping AHCI ports not only get assigned separate MSI vectors -
but also separate IRQs. As result, interrupts generated by different
ports could be serviced on different CPUs rather than on a single one.

In cases when number of allocated MSIs is less than requested the Sharing
Last MSI mode does not get used, no matter implemented in hardware or not.
Instead, the driver assumes the advantage of multiple MSIs is negated and
falls back to the single MSI mode as if MRSM bit was set (some Intel chips
implement this strategy anyway - MRSM bit gets set even if the number of
allocated MSIs exceeds the number of implemented ports).

Signed-off-by: Alexander Gordeev <[email protected]>
---
drivers/ata/ahci.c | 93 +++++++++++++++++++++++++++++++++++++--
drivers/ata/ahci.h | 6 +++
drivers/ata/libahci.c | 118 ++++++++++++++++++++++++++++++++++++++++++++++---
3 files changed, 207 insertions(+), 10 deletions(-)

diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 7862d17..29ed8a8 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1057,6 +1057,86 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host)
{}
#endif

+int ahci_init_interrupts(struct pci_dev *pdev, struct ahci_host_priv *hpriv)
+{
+ int rc;
+ unsigned int maxvec;
+
+ if (!(hpriv->flags & AHCI_HFLAG_NO_MSI)) {
+ rc = pci_enable_msi_block_auto(pdev, &maxvec);
+ if (rc > 0) {
+ if ((rc == maxvec) || (rc == 1))
+ return rc;
+ /*
+ * Assume that advantage of multipe MSIs is negated,
+ * so fallback to single MSI mode to save resources
+ */
+ pci_disable_msi(pdev);
+ if (!pci_enable_msi(pdev))
+ return 1;
+ }
+ }
+
+ pci_intx(pdev, 1);
+ return 0;
+}
+
+/**
+ * ahci_host_activate - start AHCI host, request IRQs and register it
+ * @host: target ATA host
+ * @irq: base IRQ number to request
+ * @n_msis: number of MSIs allocated for this host
+ * @irq_handler: irq_handler used when requesting IRQs
+ * @irq_flags: irq_flags used when requesting IRQs
+ *
+ * Similar to ata_host_activate, but requests IRQs according to AHCI-1.1
+ * when multiple MSIs were allocated. That is one MSI per port, starting
+ * from @irq.
+ *
+ * LOCKING:
+ * Inherited from calling layer (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, -errno otherwise.
+ */
+int ahci_host_activate(struct ata_host *host, int irq, unsigned int n_msis)
+{
+ int i, rc;
+
+ /* Sharing Last Message among several ports is not supported */
+ if (n_msis < host->n_ports)
+ return -EINVAL;
+
+ rc = ata_host_start(host);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < host->n_ports; i++) {
+ rc = devm_request_threaded_irq(host->dev,
+ irq + i, ahci_hw_interrupt, ahci_thread_fn, IRQF_SHARED,
+ dev_driver_string(host->dev), host->ports[i]);
+ if (rc)
+ goto out_free_irqs;
+ }
+
+ for (i = 0; i < host->n_ports; i++)
+ ata_port_desc(host->ports[i], "irq %d", irq + i);
+
+ rc = ata_host_register(host, &ahci_sht);
+ if (rc)
+ goto out_free_all_irqs;
+
+ return 0;
+
+out_free_all_irqs:
+ i = host->n_ports;
+out_free_irqs:
+ for (i--; i >= 0; i--)
+ devm_free_irq(host->dev, irq + i, host->ports[i]);
+
+ return rc;
+}
+
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
unsigned int board_id = ent->driver_data;
@@ -1065,7 +1145,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
struct ata_host *host;
- int n_ports, i, rc;
+ int n_ports, n_msis, i, rc;
int ahci_pci_bar = AHCI_PCI_BAR_STANDARD;

VPRINTK("ENTER\n");
@@ -1150,11 +1230,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ahci_sb600_enable_64bit(pdev))
hpriv->flags &= ~AHCI_HFLAG_32BIT_ONLY;

- if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
- pci_intx(pdev, 1);
-
hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar];

+ n_msis = ahci_init_interrupts(pdev, hpriv);
+ if (n_msis > 1)
+ hpriv->flags |= AHCI_HFLAG_MULTI_MSI;
+
/* save initial config */
ahci_pci_save_initial_config(pdev, hpriv);

@@ -1250,6 +1331,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
ahci_pci_print_info(host);

pci_set_master(pdev);
+
+ if (hpriv->flags & AHCI_HFLAG_MULTI_MSI)
+ return ahci_host_activate(host, pdev->irq, n_msis);
+
return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
&ahci_sht);
}
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 9be4712..b830e6c 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -231,6 +231,7 @@ enum {
AHCI_HFLAG_DELAY_ENGINE = (1 << 15), /* do not start engine on
port start (wait until
error-handling stage) */
+ AHCI_HFLAG_MULTI_MSI = (1 << 16), /* multiple PCI MSIs */

/* ap->flags bits */

@@ -297,6 +298,8 @@ struct ahci_port_priv {
unsigned int ncq_saw_d2h:1;
unsigned int ncq_saw_dmas:1;
unsigned int ncq_saw_sdb:1;
+ u32 intr_status; /* interrupts to handle */
+ spinlock_t lock; /* protects parent ata_port */
u32 intr_mask; /* interrupts to enable */
bool fbs_supported; /* set iff FBS is supported */
bool fbs_enabled; /* set iff FBS is enabled */
@@ -359,7 +362,10 @@ void ahci_set_em_messages(struct ahci_host_priv *hpriv,
struct ata_port_info *pi);
int ahci_reset_em(struct ata_host *host);
irqreturn_t ahci_interrupt(int irq, void *dev_instance);
+irqreturn_t ahci_hw_interrupt(int irq, void *dev_instance);
+irqreturn_t ahci_thread_fn(int irq, void *dev_instance);
void ahci_print_info(struct ata_host *host, const char *scc_s);
+int ahci_host_activate(struct ata_host *host, int irq, unsigned int n_msis);

static inline void __iomem *__ahci_port_base(struct ata_host *host,
unsigned int port_no)
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 4201e53..887ecd6 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1655,19 +1655,16 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
ata_port_abort(ap);
}

-static void ahci_port_intr(struct ata_port *ap)
+static void ahci_handle_port_interrupt(struct ata_port *ap,
+ void __iomem *port_mmio, u32 status)
{
- void __iomem *port_mmio = ahci_port_base(ap);
struct ata_eh_info *ehi = &ap->link.eh_info;
struct ahci_port_priv *pp = ap->private_data;
struct ahci_host_priv *hpriv = ap->host->private_data;
int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
- u32 status, qc_active = 0;
+ u32 qc_active = 0;
int rc;

- status = readl(port_mmio + PORT_IRQ_STAT);
- writel(status, port_mmio + PORT_IRQ_STAT);
-
/* ignore BAD_PMP while resetting */
if (unlikely(resetting))
status &= ~PORT_IRQ_BAD_PMP;
@@ -1743,6 +1740,107 @@ static void ahci_port_intr(struct ata_port *ap)
}
}

+void ahci_port_intr(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 status;
+
+ status = readl(port_mmio + PORT_IRQ_STAT);
+ writel(status, port_mmio + PORT_IRQ_STAT);
+
+ ahci_handle_port_interrupt(ap, port_mmio, status);
+}
+
+irqreturn_t ahci_thread_fn(int irq, void *dev_instance)
+{
+ struct ata_port *ap = dev_instance;
+ struct ahci_port_priv *pp = ap->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ unsigned long flags;
+ u32 status;
+
+ spin_lock_irqsave(&ap->host->lock, flags);
+ status = pp->intr_status;
+ if (status)
+ pp->intr_status = 0;
+ spin_unlock_irqrestore(&ap->host->lock, flags);
+
+ spin_lock_bh(ap->lock);
+ ahci_handle_port_interrupt(ap, port_mmio, status);
+ spin_unlock_bh(ap->lock);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(ahci_thread_fn);
+
+void ahci_hw_port_interrupt(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ahci_port_priv *pp = ap->private_data;
+ u32 status;
+
+ status = readl(port_mmio + PORT_IRQ_STAT);
+ writel(status, port_mmio + PORT_IRQ_STAT);
+
+ pp->intr_status |= status;
+}
+
+irqreturn_t ahci_hw_interrupt(int irq, void *dev_instance)
+{
+ struct ata_port *ap_this = dev_instance;
+ struct ahci_port_priv *pp = ap_this->private_data;
+ struct ata_host *host = ap_this->host;
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ unsigned int i;
+ u32 irq_stat, irq_masked;
+
+ VPRINTK("ENTER\n");
+
+ spin_lock(&host->lock);
+
+ irq_stat = readl(mmio + HOST_IRQ_STAT);
+
+ if (!irq_stat) {
+ u32 status = pp->intr_status;
+
+ spin_unlock(&host->lock);
+
+ VPRINTK("EXIT\n");
+
+ return status ? IRQ_WAKE_THREAD : IRQ_NONE;
+ }
+
+ irq_masked = irq_stat & hpriv->port_map;
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap;
+
+ if (!(irq_masked & (1 << i)))
+ continue;
+
+ ap = host->ports[i];
+ if (ap) {
+ ahci_hw_port_interrupt(ap);
+ VPRINTK("port %u\n", i);
+ } else {
+ VPRINTK("port %u (no irq)\n", i);
+ if (ata_ratelimit())
+ dev_warn(host->dev,
+ "interrupt on disabled port %u\n", i);
+ }
+ }
+
+ writel(irq_stat, mmio + HOST_IRQ_STAT);
+
+ spin_unlock(&host->lock);
+
+ VPRINTK("EXIT\n");
+
+ return IRQ_WAKE_THREAD;
+}
+EXPORT_SYMBOL_GPL(ahci_hw_interrupt);
+
irqreturn_t ahci_interrupt(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
@@ -2196,6 +2294,14 @@ static int ahci_port_start(struct ata_port *ap)
*/
pp->intr_mask = DEF_PORT_IRQ;

+ /*
+ * Switch to per-port locking in case each port has its own MSI vector.
+ */
+ if ((hpriv->flags & AHCI_HFLAG_MULTI_MSI)) {
+ spin_lock_init(&pp->lock);
+ ap->lock = &pp->lock;
+ }
+
ap->private_data = pp;

/* engage engines, captain */
--
1.7.7.6


--
Regards,
Alexander Gordeev
[email protected]

2012-11-19 15:03:41

by Alexander Gordeev

[permalink] [raw]
Subject: [PATCH v4 -tip 2/3] PCI, MSI: Enable multiple MSIs with pci_enable_msi_block_auto()

The new function pci_enable_msi_block_auto() tries to allocate maximum
possible number of MSIs up to the number the device supports. It
generalizes a pattern when pci_enable_msi_block() is contiguously called
until it succeeds or fails.

Opposite to pci_enable_msi_block() which takes the number of MSIs to
allocate as a input parameter, pci_enable_msi_block_auto() could be used
by device drivers to obtain the number of assigned MSIs and the number
of MSIs the device supports.

Signed-off-by: Alexander Gordeev <[email protected]>
Acked-by: Bjorn Helgaas <[email protected]>
---
Documentation/PCI/MSI-HOWTO.txt | 37 ++++++++++++++++++++++++++++++++-----
drivers/pci/msi.c | 26 ++++++++++++++++++++++++++
include/linux/pci.h | 7 +++++++
3 files changed, 65 insertions(+), 5 deletions(-)

diff --git a/Documentation/PCI/MSI-HOWTO.txt b/Documentation/PCI/MSI-HOWTO.txt
index 53e6fca..a091780 100644
--- a/Documentation/PCI/MSI-HOWTO.txt
+++ b/Documentation/PCI/MSI-HOWTO.txt
@@ -127,15 +127,42 @@ on the number of vectors that can be allocated; pci_enable_msi_block()
returns as soon as it finds any constraint that doesn't allow the
call to succeed.

-4.2.3 pci_disable_msi
+4.2.3 pci_enable_msi_block_auto
+
+int pci_enable_msi_block_auto(struct pci_dev *dev, unsigned int *count)
+
+This variation on pci_enable_msi() call allows a device driver to request
+the maximum possible number of MSIs. The MSI specification only allows
+interrupts to be allocated in powers of two, up to a maximum of 2^5 (32).
+
+If this function returns a positive number, it indicates that it has
+succeeded and the returned value is the number of allocated interrupts. In
+this case, the function enables MSI on this device and updates dev->irq to
+be the lowest of the new interrupts assigned to it. The other interrupts
+assigned to the device are in the range dev->irq to dev->irq + returned
+value - 1.
+
+If this function returns a negative number, it indicates an error and
+the driver should not attempt to request any more MSI interrupts for
+this device.
+
+If the device driver needs to know the number of interrupts the device
+supports it can pass the pointer count where that number is stored. The
+device driver must decide what action to take if pci_enable_msi_block_auto()
+succeeds, but returns a value less than the number of interrupts supported.
+If the device driver does not need to know the number of interrupts
+supported, it can set the pointer count to NULL.
+
+4.2.4 pci_disable_msi

void pci_disable_msi(struct pci_dev *dev)

This function should be used to undo the effect of pci_enable_msi() or
-pci_enable_msi_block(). Calling it restores dev->irq to the pin-based
-interrupt number and frees the previously allocated message signaled
-interrupt(s). The interrupt may subsequently be assigned to another
-device, so drivers should not cache the value of dev->irq.
+pci_enable_msi_block() or pci_enable_msi_block_auto(). Calling it restores
+dev->irq to the pin-based interrupt number and frees the previously
+allocated message signaled interrupt(s). The interrupt may subsequently be
+assigned to another device, so drivers should not cache the value of
+dev->irq.

Before calling this function, a device driver must always call free_irq()
on any interrupt for which it previously called request_irq().
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index a825d78..6ec44e2 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -839,6 +839,32 @@ int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec)
}
EXPORT_SYMBOL(pci_enable_msi_block);

+int pci_enable_msi_block_auto(struct pci_dev *dev, unsigned int *maxvec)
+{
+ int ret, pos, nvec;
+ u16 msgctl;
+
+ pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
+ if (!pos)
+ return -EINVAL;
+
+ pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
+ ret = 1 << ((msgctl & PCI_MSI_FLAGS_QMASK) >> 1);
+
+ if (maxvec)
+ *maxvec = ret;
+
+ do {
+ nvec = ret;
+ ret = pci_enable_msi_block(dev, nvec);
+ } while (ret > 0);
+
+ if (ret < 0)
+ return ret;
+ return nvec;
+}
+EXPORT_SYMBOL(pci_enable_msi_block_auto);
+
void pci_msi_shutdown(struct pci_dev *dev)
{
struct msi_desc *desc;
diff --git a/include/linux/pci.h b/include/linux/pci.h
index ee21795..fbba374 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1080,6 +1080,12 @@ static inline int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec)
return -1;
}

+static inline int
+pci_enable_msi_block_auto(struct pci_dev *dev, unsigned int *maxvec)
+{
+ return -1;
+}
+
static inline void pci_msi_shutdown(struct pci_dev *dev)
{ }
static inline void pci_disable_msi(struct pci_dev *dev)
@@ -1111,6 +1117,7 @@ static inline int pci_msi_enabled(void)
}
#else
extern int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec);
+extern int pci_enable_msi_block_auto(struct pci_dev *dev, unsigned int *maxvec);
extern void pci_msi_shutdown(struct pci_dev *dev);
extern void pci_disable_msi(struct pci_dev *dev);
extern int pci_msix_table_size(struct pci_dev *dev);
--
1.7.7.6


--
Regards,
Alexander Gordeev
[email protected]