Patch set to abstract portions of the MSI core so that it can be used on
architectures which don't use standard interrupt controllers.
1/4 msi-arch-init.patch
Add an msi_arch_init() hook which can be used to perform platform
specific setup prior to msi use.
2/4 msi-callouts.patch
Define a set of callouts to implement the platform-specific tasks:
msi_setup - set up plumbing to get a vector directed at a default
cpu, and return the corresponding MSI bus address and data.
msi_teardown - inverse of msi_setup
msi_target - retarget a vector to a given cpu
Define the routine msi_register_callouts() which can be called from
a platform's msi_arch_init() code to override the generic callouts.
3/4 ia64-per-platform-device-vector.patch
For the ia64 arch, allow per-platform definitions of
IA64_FIRST_DEVICE_VECTOR and IA64_LAST_DEVICE_VECTOR.
4/4 msi-altix.patch
Altix specific callouts to implement MSI.
MSI callouts for altix. Involves a fair amount of code reorg in sn irq.c
code as well as adding some extensions to the altix PCI provider abstaction.
Signed-off-by: Mark Maule <[email protected]>
Index: msi/arch/ia64/sn/pci/msi.c
===================================================================
--- msi.orig/arch/ia64/sn/pci/msi.c 2005-12-20 09:40:22.174273268 -0600
+++ msi/arch/ia64/sn/pci/msi.c 2005-12-20 11:24:01.969082807 -0600
@@ -6,13 +6,205 @@
* Copyright (C) 2005 Silicon Graphics, Inc. All Rights Reserved.
*/
-#include <asm/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/cpumask.h>
+
+#include <asm/msi.h>
+
+#include <asm/sn/addrs.h>
+#include <asm/sn/intr.h>
+#include <asm/sn/pcibus_provider_defs.h>
+#include <asm/sn/pcidev.h>
+#include <asm/sn/nodepda.h>
+
+struct sn_msi_info {
+ uint64_t pci_addr;
+ struct sn_irq_info *sn_irq_info;
+};
+
+static struct sn_msi_info *sn_msi_info;
+
+static void
+sn_msi_teardown(unsigned int vector)
+{
+ nasid_t nasid;
+ int widget;
+ struct pci_dev *pdev;
+ struct pcidev_info *sn_pdev;
+ struct sn_irq_info *sn_irq_info;
+ struct pcibus_bussoft *bussoft;
+ struct sn_pcibus_provider *provider;
+
+ sn_irq_info = sn_msi_info[vector].sn_irq_info;
+ if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0)
+ return;
+
+ sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
+ pdev = sn_pdev->pdi_linux_pcidev;
+ provider = SN_PCIDEV_BUSPROVIDER(pdev);
+
+ (*provider->dma_unmap)(pdev,
+ sn_msi_info[vector].pci_addr,
+ PCI_DMA_FROMDEVICE);
+ sn_msi_info[vector].pci_addr = 0;
+
+ bussoft = SN_PCIDEV_BUSSOFT(pdev);
+ nasid = NASID_GET(bussoft->bs_base);
+ widget = (nasid & 1) ?
+ TIO_SWIN_WIDGETNUM(bussoft->bs_base) :
+ SWIN_WIDGETNUM(bussoft->bs_base);
+
+ sn_intr_free(nasid, widget, sn_irq_info);
+ sn_msi_info[vector].sn_irq_info = NULL;
+
+ return;
+}
int
-sn_msi_init(void)
+sn_msi_setup(struct pci_dev *pdev, unsigned int vector,
+ uint32_t *addr_hi, uint32_t *addr_lo, uint32_t *data)
{
+ int widget;
+ int status;
+ nasid_t nasid;
+ uint64_t bus_addr;
+ struct sn_irq_info *sn_irq_info;
+ struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(pdev);
+ struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
+
+ if (bussoft == NULL)
+ return -EINVAL;
+
+ if (provider == NULL || provider->dma_map_consistent == NULL)
+ return -EINVAL;
+
+ /*
+ * Set up the vector plumbing. Let the prom (via sn_intr_alloc)
+ * decide which cpu to direct this msi at by default.
+ */
+
+ nasid = NASID_GET(bussoft->bs_base);
+ widget = (nasid & 1) ?
+ TIO_SWIN_WIDGETNUM(bussoft->bs_base) :
+ SWIN_WIDGETNUM(bussoft->bs_base);
+
+ sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
+ if (! sn_irq_info)
+ return -ENOMEM;
+
+ status = sn_intr_alloc(nasid, widget, sn_irq_info, vector, -1, -1);
+ if (status) {
+ kfree(sn_irq_info);
+ return -ENOMEM;
+ }
+
+ sn_irq_info->irq_int_bit = -1; /* mark this as an MSI irq */
+ sn_irq_fixup(pdev, sn_irq_info);
+
+ /* Prom probably should fill these in, but doesn't ... */
+ sn_irq_info->irq_bridge_type = bussoft->bs_asic_type;
+ sn_irq_info->irq_bridge = (void *)bussoft->bs_base;
+
/*
- * return error until MSI is supported on altix platforms
+ * Map the xio address into bus space
*/
- return -EINVAL;
+ bus_addr = (*provider->dma_map_consistent)(pdev,
+ sn_irq_info->irq_xtalkaddr,
+ sizeof(sn_irq_info->irq_xtalkaddr),
+ SN_DMA_MSI|SN_DMA_ADDR_XIO);
+ if (! bus_addr) {
+ sn_intr_free(nasid, widget, sn_irq_info);
+ kfree(sn_irq_info);
+ return -ENOMEM;
+ }
+
+ sn_msi_info[vector].sn_irq_info = sn_irq_info;
+ sn_msi_info[vector].pci_addr = bus_addr;
+
+ *addr_hi = (uint32_t)(bus_addr >> 32);
+ *addr_lo = (uint32_t)(bus_addr & 0x00000000ffffffff);
+
+ /*
+ * In the SN platform, bit 16 is a "send vector" bit which
+ * must be present in order to move the vector through the system.
+ */
+ *data = 0x100 + (unsigned int)vector;
+
+#ifdef CONFIG_SMP
+ set_irq_affinity_info((vector & 0xff), sn_irq_info->irq_cpuid, 0);
+#endif
+
+ return 0;
+}
+
+static void
+sn_msi_target(unsigned int vector, unsigned int cpu,
+ uint32_t *addr_hi, uint32_t *addr_lo)
+{
+ int slice;
+ nasid_t nasid;
+ uint64_t bus_addr;
+ struct pci_dev *pdev;
+ struct pcidev_info *sn_pdev;
+ struct sn_irq_info *sn_irq_info;
+ struct sn_irq_info *new_irq_info;
+ struct sn_pcibus_provider *provider;
+
+ sn_irq_info = sn_msi_info[vector].sn_irq_info;
+ if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0)
+ return;
+
+ /*
+ * Release XIO resources for the old MSI PCI address
+ */
+
+ sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
+ pdev = sn_pdev->pdi_linux_pcidev;
+ provider = SN_PCIDEV_BUSPROVIDER(pdev);
+
+ bus_addr = (uint64_t)(*addr_hi) << 32 | (uint64_t)(*addr_lo);
+ (*provider->dma_unmap)(pdev, bus_addr, PCI_DMA_FROMDEVICE);
+ sn_msi_info[vector].pci_addr = 0;
+
+ nasid = cpuid_to_nasid(cpu);
+ slice = cpuid_to_slice(cpu);
+
+ new_irq_info = sn_retarget_vector(sn_irq_info, nasid, slice);
+ sn_msi_info[vector].sn_irq_info = new_irq_info;
+ if (new_irq_info == NULL)
+ return;
+
+ /*
+ * Map the xio address into bus space
+ */
+
+ bus_addr = (*provider->dma_map_consistent)(pdev,
+ new_irq_info->irq_xtalkaddr,
+ sizeof(new_irq_info->irq_xtalkaddr),
+ SN_DMA_MSI|SN_DMA_ADDR_XIO);
+
+ sn_msi_info[vector].pci_addr = bus_addr;
+ *addr_hi = (uint32_t)(bus_addr >> 32);
+ *addr_lo = (uint32_t)(bus_addr & 0x00000000ffffffff);
+}
+
+struct msi_callouts sn_msi_callouts = {
+ .msi_setup = sn_msi_setup,
+ .msi_teardown = sn_msi_teardown,
+#ifdef CONFIG_SMP
+ .msi_target = sn_msi_target,
+#endif
+};
+
+int
+sn_msi_init(void)
+{
+ sn_msi_info =
+ kzalloc(sizeof(struct sn_msi_info) * NR_VECTORS, GFP_KERNEL);
+ if (! sn_msi_info)
+ return -ENOMEM;
+
+ msi_register_callouts(&sn_msi_callouts);
+ return 0;
}
Index: msi/arch/ia64/sn/kernel/io_init.c
===================================================================
--- msi.orig/arch/ia64/sn/kernel/io_init.c 2005-12-20 09:40:22.176226204 -0600
+++ msi/arch/ia64/sn/kernel/io_init.c 2005-12-20 11:24:01.972012209 -0600
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/bootmem.h>
@@ -51,7 +51,7 @@
*/
static dma_addr_t
-sn_default_pci_map(struct pci_dev *pdev, unsigned long paddr, size_t size)
+sn_default_pci_map(struct pci_dev *pdev, unsigned long paddr, size_t size, int type)
{
return 0;
}
Index: msi/arch/ia64/sn/kernel/irq.c
===================================================================
--- msi.orig/arch/ia64/sn/kernel/irq.c 2005-12-20 09:40:22.176226204 -0600
+++ msi/arch/ia64/sn/kernel/irq.c 2005-12-20 11:24:01.973965144 -0600
@@ -25,11 +25,11 @@
int sn_force_interrupt_flag = 1;
extern int sn_ioif_inited;
-static struct list_head **sn_irq_lh;
+struct list_head **sn_irq_lh;
static spinlock_t sn_irq_info_lock = SPIN_LOCK_UNLOCKED; /* non-IRQ lock */
-static inline uint64_t sn_intr_alloc(nasid_t local_nasid, int local_widget,
- u64 sn_irq_info,
+uint64_t sn_intr_alloc(nasid_t local_nasid, int local_widget,
+ struct sn_irq_info *sn_irq_info,
int req_irq, nasid_t req_nasid,
int req_slice)
{
@@ -39,12 +39,13 @@
SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
(u64) SAL_INTR_ALLOC, (u64) local_nasid,
- (u64) local_widget, (u64) sn_irq_info, (u64) req_irq,
+ (u64) local_widget, __pa(sn_irq_info), (u64) req_irq,
(u64) req_nasid, (u64) req_slice);
+
return ret_stuff.status;
}
-static inline void sn_intr_free(nasid_t local_nasid, int local_widget,
+void sn_intr_free(nasid_t local_nasid, int local_widget,
struct sn_irq_info *sn_irq_info)
{
struct ia64_sal_retval ret_stuff;
@@ -113,73 +114,91 @@
static void sn_irq_info_free(struct rcu_head *head);
-static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
+struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
+ nasid_t nasid, int slice)
{
- struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
- int cpuid, cpuphys;
+ int vector;
+ int cpuphys;
+ int64_t bridge;
+ int local_widget, status;
+ nasid_t local_nasid;
+ struct sn_irq_info *new_irq_info;
+ struct sn_pcibus_provider *pci_provider;
- cpuid = first_cpu(mask);
- cpuphys = cpu_physical_id(cpuid);
+ new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC);
+ if (new_irq_info == NULL)
+ return NULL;
- list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
- sn_irq_lh[irq], list) {
- uint64_t bridge;
- int local_widget, status;
- nasid_t local_nasid;
- struct sn_irq_info *new_irq_info;
- struct sn_pcibus_provider *pci_provider;
-
- new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC);
- if (new_irq_info == NULL)
- break;
- memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info));
-
- bridge = (uint64_t) new_irq_info->irq_bridge;
- if (!bridge) {
- kfree(new_irq_info);
- break; /* irq is not a device interrupt */
- }
+ memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info));
- local_nasid = NASID_GET(bridge);
+ bridge = (uint64_t) new_irq_info->irq_bridge;
+ if (!bridge) {
+ kfree(new_irq_info);
+ return NULL; /* irq is not a device interrupt */
+ }
- if (local_nasid & 1)
- local_widget = TIO_SWIN_WIDGETNUM(bridge);
- else
- local_widget = SWIN_WIDGETNUM(bridge);
-
- /* Free the old PROM new_irq_info structure */
- sn_intr_free(local_nasid, local_widget, new_irq_info);
- /* Update kernels new_irq_info with new target info */
- unregister_intr_pda(new_irq_info);
-
- /* allocate a new PROM new_irq_info struct */
- status = sn_intr_alloc(local_nasid, local_widget,
- __pa(new_irq_info), irq,
- cpuid_to_nasid(cpuid),
- cpuid_to_slice(cpuid));
-
- /* SAL call failed */
- if (status) {
- kfree(new_irq_info);
- break;
- }
+ local_nasid = NASID_GET(bridge);
- new_irq_info->irq_cpuid = cpuid;
- register_intr_pda(new_irq_info);
+ if (local_nasid & 1)
+ local_widget = TIO_SWIN_WIDGETNUM(bridge);
+ else
+ local_widget = SWIN_WIDGETNUM(bridge);
- pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type];
- if (pci_provider && pci_provider->target_interrupt)
- (pci_provider->target_interrupt)(new_irq_info);
-
- spin_lock(&sn_irq_info_lock);
- list_replace_rcu(&sn_irq_info->list, &new_irq_info->list);
- spin_unlock(&sn_irq_info_lock);
- call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
+ vector = sn_irq_info->irq_irq;
+ /* Free the old PROM new_irq_info structure */
+ sn_intr_free(local_nasid, local_widget, new_irq_info);
+ /* Update kernels new_irq_info with new target info */
+ unregister_intr_pda(new_irq_info);
+
+ /* allocate a new PROM new_irq_info struct */
+ status = sn_intr_alloc(local_nasid, local_widget,
+ new_irq_info, vector,
+ nasid, slice);
+
+ /* SAL call failed */
+ if (status) {
+ kfree(new_irq_info);
+ return NULL;
+ }
+
+ cpuphys = nasid_slice_to_cpuid(nasid, slice);
+ new_irq_info->irq_cpuid = cpuphys;
+ register_intr_pda(new_irq_info);
+
+ pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type];
+
+ /*
+ * If this represents a line interrupt, target it. If it's
+ * an msi (irq_int_bit < 0), it's already targeted.
+ */
+ if (new_irq_info->irq_int_bit >= 0 &&
+ pci_provider && pci_provider->target_interrupt)
+ (pci_provider->target_interrupt)(new_irq_info);
+
+ spin_lock(&sn_irq_info_lock);
+ list_replace_rcu(&sn_irq_info->list, &new_irq_info->list);
+ spin_unlock(&sn_irq_info_lock);
+ call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
#ifdef CONFIG_SMP
- set_irq_affinity_info((irq & 0xff), cpuphys, 0);
+ set_irq_affinity_info((vector & 0xff), cpuphys, 0);
#endif
- }
+
+ return new_irq_info;
+}
+
+static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
+{
+ struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
+ nasid_t nasid;
+ int slice;
+
+ nasid = cpuid_to_nasid(first_cpu(mask));
+ slice = cpuid_to_slice(first_cpu(mask));
+
+ list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
+ sn_irq_lh[irq], list)
+ (void)sn_retarget_vector(sn_irq_info, nasid, slice);
}
struct hw_interrupt_type irq_type_sn = {
@@ -441,5 +460,4 @@
INIT_LIST_HEAD(sn_irq_lh[i]);
}
-
}
Index: msi/arch/ia64/sn/pci/pci_dma.c
===================================================================
--- msi.orig/arch/ia64/sn/pci/pci_dma.c 2005-12-20 09:40:22.174273268 -0600
+++ msi/arch/ia64/sn/pci/pci_dma.c 2005-12-20 11:24:01.975918079 -0600
@@ -11,7 +11,7 @@
#include <linux/module.h>
#include <asm/dma.h>
-#include <asm/sn/pcibr_provider.h>
+#include <asm/sn/intr.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h>
#include <asm/sn/sn_sal.h>
@@ -113,7 +113,8 @@
* resources.
*/
- *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size);
+ *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size,
+ SN_DMA_ADDR_PHYS);
if (!*dma_handle) {
printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
free_pages((unsigned long)cpuaddr, get_order(size));
@@ -176,7 +177,7 @@
BUG_ON(dev->bus != &pci_bus_type);
phys_addr = __pa(cpu_addr);
- dma_addr = provider->dma_map(pdev, phys_addr, size);
+ dma_addr = provider->dma_map(pdev, phys_addr, size, SN_DMA_ADDR_PHYS);
if (!dma_addr) {
printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
return 0;
@@ -260,7 +261,8 @@
for (i = 0; i < nhwentries; i++, sg++) {
phys_addr = SG_ENT_PHYS_ADDRESS(sg);
sg->dma_address = provider->dma_map(pdev,
- phys_addr, sg->length);
+ phys_addr, sg->length,
+ SN_DMA_ADDR_PHYS);
if (!sg->dma_address) {
printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
Index: msi/arch/ia64/sn/pci/pcibr/pcibr_dma.c
===================================================================
--- msi.orig/arch/ia64/sn/pci/pcibr/pcibr_dma.c 2005-12-20 09:40:22.175249736 -0600
+++ msi/arch/ia64/sn/pci/pcibr/pcibr_dma.c 2005-12-20 11:24:01.980800417 -0600
@@ -41,7 +41,7 @@
static dma_addr_t
pcibr_dmamap_ate32(struct pcidev_info *info,
- uint64_t paddr, size_t req_size, uint64_t flags)
+ uint64_t paddr, size_t req_size, uint64_t flags, int dma_flags)
{
struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
@@ -81,9 +81,12 @@
if (IS_PCIX(pcibus_info))
ate_flags &= ~(PCI32_ATE_PREF);
- xio_addr =
- IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
- PHYS_TO_TIODMA(paddr);
+ if (SN_DMA_ADDRTYPE(dma_flags == SN_DMA_ADDR_PHYS))
+ xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
+ PHYS_TO_TIODMA(paddr);
+ else
+ xio_addr = paddr;
+
offset = IOPGOFF(xio_addr);
ate = ate_flags | (xio_addr - offset);
@@ -91,6 +94,13 @@
if (IS_PIC_SOFT(pcibus_info)) {
ate |= (pcibus_info->pbi_hub_xid << PIC_ATE_TARGETID_SHFT);
}
+
+ /*
+ * If we're mapping for MSI, set the MSI bit in the ATE
+ */
+ if (dma_flags & SN_DMA_MSI)
+ ate |= PCI32_ATE_MSI;
+
ate_write(pcibus_info, ate_index, ate_count, ate);
/*
@@ -105,20 +115,27 @@
if (pcibus_info->pbi_devreg[internal_device] & PCIBR_DEV_SWAP_DIR)
ATE_SWAP_ON(pci_addr);
+
return pci_addr;
}
static dma_addr_t
pcibr_dmatrans_direct64(struct pcidev_info * info, uint64_t paddr,
- uint64_t dma_attributes)
+ uint64_t dma_attributes, int dma_flags)
{
struct pcibus_info *pcibus_info = (struct pcibus_info *)
((info->pdi_host_pcidev_info)->pdi_pcibus_info);
uint64_t pci_addr;
/* Translate to Crosstalk View of Physical Address */
- pci_addr = (IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
- PHYS_TO_TIODMA(paddr)) | dma_attributes;
+ if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS)
+ pci_addr = IS_PIC_SOFT(pcibus_info) ?
+ PHYS_TO_DMA(paddr) :
+ PHYS_TO_TIODMA(paddr) | dma_attributes;
+ else
+ pci_addr = IS_PIC_SOFT(pcibus_info) ?
+ paddr :
+ paddr | dma_attributes;
/* Handle Bus mode */
if (IS_PCIX(pcibus_info))
@@ -130,7 +147,9 @@
((uint64_t) pcibus_info->
pbi_hub_xid << PIC_PCI64_ATTR_TARG_SHFT);
} else
- pci_addr |= TIOCP_PCI64_CMDTYPE_MEM;
+ pci_addr |= (dma_flags & SN_DMA_MSI) ?
+ TIOCP_PCI64_CMDTYPE_MSI :
+ TIOCP_PCI64_CMDTYPE_MEM;
/* If PCI mode, func zero uses VCHAN0, every other func uses VCHAN1 */
if (!IS_PCIX(pcibus_info) && PCI_FUNC(info->pdi_linux_pcidev->devfn))
@@ -142,7 +161,7 @@
static dma_addr_t
pcibr_dmatrans_direct32(struct pcidev_info * info,
- uint64_t paddr, size_t req_size, uint64_t flags)
+ uint64_t paddr, size_t req_size, uint64_t flags, int dma_flags)
{
struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
@@ -158,8 +177,14 @@
return 0;
}
- xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
- PHYS_TO_TIODMA(paddr);
+ if (dma_flags & SN_DMA_MSI)
+ return 0;
+
+ if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS)
+ xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
+ PHYS_TO_TIODMA(paddr);
+ else
+ xio_addr = paddr;
xio_base = pcibus_info->pbi_dir_xbase;
offset = xio_addr - xio_base;
@@ -331,7 +356,7 @@
*/
dma_addr_t
-pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size)
+pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size, int dma_flags)
{
dma_addr_t dma_handle;
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
@@ -348,11 +373,11 @@
*/
dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr,
- PCI64_ATTR_PREF);
+ PCI64_ATTR_PREF, dma_flags);
} else {
/* Handle 32-63 bit cards via direct mapping */
dma_handle = pcibr_dmatrans_direct32(pcidev_info, phys_addr,
- size, 0);
+ size, 0, dma_flags);
if (!dma_handle) {
/*
* It is a 32 bit card and we cannot do direct mapping,
@@ -360,7 +385,8 @@
*/
dma_handle = pcibr_dmamap_ate32(pcidev_info, phys_addr,
- size, PCI32_ATE_PREF);
+ size, PCI32_ATE_PREF,
+ dma_flags);
}
}
@@ -369,18 +395,18 @@
dma_addr_t
pcibr_dma_map_consistent(struct pci_dev * hwdev, unsigned long phys_addr,
- size_t size)
+ size_t size, int dma_flags)
{
dma_addr_t dma_handle;
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
if (hwdev->dev.coherent_dma_mask == ~0UL) {
dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr,
- PCI64_ATTR_BAR);
+ PCI64_ATTR_BAR, dma_flags);
} else {
dma_handle = (dma_addr_t) pcibr_dmamap_ate32(pcidev_info,
phys_addr, size,
- PCI32_ATE_BAR);
+ PCI32_ATE_BAR, dma_flags);
}
return dma_handle;
Index: msi/arch/ia64/sn/pci/tioca_provider.c
===================================================================
--- msi.orig/arch/ia64/sn/pci/tioca_provider.c 2005-12-20 09:40:22.175249736 -0600
+++ msi/arch/ia64/sn/pci/tioca_provider.c 2005-12-20 11:24:01.982753352 -0600
@@ -515,11 +515,17 @@
* use the GART mapped mode.
*/
static uint64_t
-tioca_dma_map(struct pci_dev *pdev, uint64_t paddr, size_t byte_count)
+tioca_dma_map(struct pci_dev *pdev, uint64_t paddr, size_t byte_count, int dma_flags)
{
uint64_t mapaddr;
/*
+ * Not supported for now ...
+ */
+ if (dma_flags & SN_DMA_MSI)
+ return 0;
+
+ /*
* If card is 64 or 48 bit addresable, use a direct mapping. 32
* bit direct is so restrictive w.r.t. where the memory resides that
* we don't use it even though CA has some support.
Index: msi/arch/ia64/sn/pci/tioce_provider.c
===================================================================
--- msi.orig/arch/ia64/sn/pci/tioce_provider.c 2005-12-20 09:40:22.175249736 -0600
+++ msi/arch/ia64/sn/pci/tioce_provider.c 2005-12-20 11:24:01.984706287 -0600
@@ -52,7 +52,8 @@
(ATE_PAGE((start)+(len)-1, pagesize) - ATE_PAGE(start, pagesize) + 1)
#define ATE_VALID(ate) ((ate) & (1UL << 63))
-#define ATE_MAKE(addr, ps) (((addr) & ~ATE_PAGEMASK(ps)) | (1UL << 63))
+#define ATE_MAKE(addr, ps, msi) \
+ (((addr) & ~ATE_PAGEMASK(ps)) | (1UL << 63) | ((msi)?(1UL << 62):0))
/*
* Flavors of ate-based mapping supported by tioce_alloc_map()
@@ -78,15 +79,17 @@
*
* 63 - must be 1 to indicate d64 mode to CE hardware
* 62 - barrier bit ... controlled with tioce_dma_barrier()
- * 61 - 0 since this is not an MSI transaction
+ * 61 - msi bit ... specified through dma_flags
* 60:54 - reserved, MBZ
*/
static uint64_t
-tioce_dma_d64(unsigned long ct_addr)
+tioce_dma_d64(unsigned long ct_addr, int dma_flags)
{
uint64_t bus_addr;
bus_addr = ct_addr | (1UL << 63);
+ if (dma_flags & SN_DMA_MSI)
+ bus_addr |= (1UL << 61);
return bus_addr;
}
@@ -143,7 +146,7 @@
*/
static uint64_t
tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
- uint64_t ct_addr, int len)
+ uint64_t ct_addr, int len, int dma_flags)
{
int i;
int j;
@@ -152,6 +155,7 @@
int entries;
int nates;
int pagesize;
+ int msi_capable, msi_wanted;
uint64_t *ate_shadow;
uint64_t *ate_reg;
uint64_t addr;
@@ -173,6 +177,7 @@
ate_reg = ce_mmr->ce_ure_ate3240;
pagesize = ce_kern->ce_ate3240_pagesize;
bus_base = TIOCE_M32_MIN;
+ msi_capable = 1;
break;
case TIOCE_ATE_M40:
first = 0;
@@ -181,6 +186,7 @@
ate_reg = ce_mmr->ce_ure_ate40;
pagesize = MB(64);
bus_base = TIOCE_M40_MIN;
+ msi_capable = 0;
break;
case TIOCE_ATE_M40S:
/*
@@ -193,11 +199,16 @@
ate_reg = ce_mmr->ce_ure_ate3240;
pagesize = GB(16);
bus_base = TIOCE_M40S_MIN;
+ msi_capable = 0;
break;
default:
return 0;
}
+ msi_wanted = dma_flags & SN_DMA_MSI;
+ if (msi_wanted && !msi_capable)
+ return 0;
+
nates = ATE_NPAGES(ct_addr, len, pagesize);
if (nates > entries)
return 0;
@@ -226,7 +237,7 @@
for (j = 0; j < nates; j++) {
uint64_t ate;
- ate = ATE_MAKE(addr, pagesize);
+ ate = ATE_MAKE(addr, pagesize, msi_wanted);
ate_shadow[i + j] = ate;
writeq(ate, &ate_reg[i + j]);
addr += pagesize;
@@ -253,7 +264,7 @@
* Map @paddr into 32-bit bus space of the CE associated with @pcidev_info.
*/
static uint64_t
-tioce_dma_d32(struct pci_dev *pdev, uint64_t ct_addr)
+tioce_dma_d32(struct pci_dev *pdev, uint64_t ct_addr, int dma_flags)
{
int dma_ok;
int port;
@@ -263,6 +274,9 @@
uint64_t ct_lower;
dma_addr_t bus_addr;
+ if (dma_flags & SN_DMA_MSI)
+ return 0;
+
ct_upper = ct_addr & ~0x3fffffffUL;
ct_lower = ct_addr & 0x3fffffffUL;
@@ -387,7 +401,7 @@
*/
static uint64_t
tioce_do_dma_map(struct pci_dev *pdev, uint64_t paddr, size_t byte_count,
- int barrier)
+ int barrier, int dma_flags)
{
unsigned long flags;
uint64_t ct_addr;
@@ -403,15 +417,18 @@
if (dma_mask < 0x7fffffffUL)
return 0;
- ct_addr = PHYS_TO_TIODMA(paddr);
+ if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS)
+ ct_addr = PHYS_TO_TIODMA(paddr);
+ else
+ ct_addr = paddr;
/*
* If the device can generate 64 bit addresses, create a D64 map.
- * Since this should never fail, bypass the rest of the checks.
*/
if (dma_mask == ~0UL) {
- mapaddr = tioce_dma_d64(ct_addr);
- goto dma_map_done;
+ mapaddr = tioce_dma_d64(ct_addr, dma_flags);
+ if (mapaddr)
+ goto dma_map_done;
}
pcidev_to_tioce(pdev, NULL, &ce_kern, &port);
@@ -454,18 +471,22 @@
if (byte_count > MB(64)) {
mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40S,
- port, ct_addr, byte_count);
+ port, ct_addr, byte_count,
+ dma_flags);
if (!mapaddr)
mapaddr =
tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1,
- ct_addr, byte_count);
+ ct_addr, byte_count,
+ dma_flags);
} else {
mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1,
- ct_addr, byte_count);
+ ct_addr, byte_count,
+ dma_flags);
if (!mapaddr)
mapaddr =
tioce_alloc_map(ce_kern, TIOCE_ATE_M40S,
- port, ct_addr, byte_count);
+ port, ct_addr, byte_count,
+ dma_flags);
}
}
@@ -473,7 +494,7 @@
* 32-bit direct is the next mode to try
*/
if (!mapaddr && dma_mask >= 0xffffffffUL)
- mapaddr = tioce_dma_d32(pdev, ct_addr);
+ mapaddr = tioce_dma_d32(pdev, ct_addr, dma_flags);
/*
* Last resort, try 32-bit ATE-based map.
@@ -481,12 +502,12 @@
if (!mapaddr)
mapaddr =
tioce_alloc_map(ce_kern, TIOCE_ATE_M32, -1, ct_addr,
- byte_count);
+ byte_count, dma_flags);
spin_unlock_irqrestore(&ce_kern->ce_lock, flags);
dma_map_done:
- if (mapaddr & barrier)
+ if (mapaddr && barrier)
mapaddr = tioce_dma_barrier(mapaddr, 1);
return mapaddr;
@@ -502,9 +523,9 @@
* in the address.
*/
static uint64_t
-tioce_dma(struct pci_dev *pdev, uint64_t paddr, size_t byte_count)
+tioce_dma(struct pci_dev *pdev, uint64_t paddr, size_t byte_count, int dma_flags)
{
- return tioce_do_dma_map(pdev, paddr, byte_count, 0);
+ return tioce_do_dma_map(pdev, paddr, byte_count, 0, dma_flags);
}
/**
@@ -516,9 +537,9 @@
* Simply call tioce_do_dma_map() to create a map with the barrier bit set
* in the address.
*/ static uint64_t
-tioce_dma_consistent(struct pci_dev *pdev, uint64_t paddr, size_t byte_count)
+tioce_dma_consistent(struct pci_dev *pdev, uint64_t paddr, size_t byte_count, int dma_flags)
{
- return tioce_do_dma_map(pdev, paddr, byte_count, 1);
+ return tioce_do_dma_map(pdev, paddr, byte_count, 1, dma_flags);
}
/**
Index: msi/include/asm-ia64/sn/intr.h
===================================================================
--- msi.orig/include/asm-ia64/sn/intr.h 2005-12-20 09:40:22.176226204 -0600
+++ msi/include/asm-ia64/sn/intr.h 2005-12-20 11:24:01.991541559 -0600
@@ -3,13 +3,14 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_INTR_H
#define _ASM_IA64_SN_INTR_H
#include <linux/rcupdate.h>
+#include <asm/sn/types.h>
#define SGI_UART_VECTOR (0xe9)
@@ -40,6 +41,7 @@
int irq_cpuid; /* kernel logical cpuid */
int irq_irq; /* the IRQ number */
int irq_int_bit; /* Bridge interrupt pin */
+ /* <0 means MSI */
uint64_t irq_xtalkaddr; /* xtalkaddr IRQ is sent to */
int irq_bridge_type;/* pciio asic type (pciio.h) */
void *irq_bridge; /* bridge generating irq */
@@ -53,6 +55,12 @@
};
extern void sn_send_IPI_phys(int, long, int, int);
+extern uint64_t sn_intr_alloc(nasid_t, int,
+ struct sn_irq_info *,
+ int, nasid_t, int);
+extern void sn_intr_free(nasid_t, int, struct sn_irq_info *);
+extern struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *, nasid_t, int);
+extern struct list_head **sn_irq_lh;
#define CPU_VECTOR_TO_IRQ(cpuid,vector) (vector)
Index: msi/include/asm-ia64/sn/pcibr_provider.h
===================================================================
--- msi.orig/include/asm-ia64/sn/pcibr_provider.h 2005-12-20 09:40:22.177202671 -0600
+++ msi/include/asm-ia64/sn/pcibr_provider.h 2005-12-20 11:24:01.993494494 -0600
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992-1997,2000-2004 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992-1997,2000-2005 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H
#define _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H
@@ -55,6 +55,7 @@
#define PCI32_ATE_V (0x1 << 0)
#define PCI32_ATE_CO (0x1 << 1)
#define PCI32_ATE_PREC (0x1 << 2)
+#define PCI32_ATE_MSI (0x1 << 2)
#define PCI32_ATE_PREF (0x1 << 3)
#define PCI32_ATE_BAR (0x1 << 4)
#define PCI32_ATE_ADDR_SHFT 12
@@ -129,8 +130,8 @@
extern int pcibr_init_provider(void);
extern void *pcibr_bus_fixup(struct pcibus_bussoft *, struct pci_controller *);
-extern dma_addr_t pcibr_dma_map(struct pci_dev *, unsigned long, size_t);
-extern dma_addr_t pcibr_dma_map_consistent(struct pci_dev *, unsigned long, size_t);
+extern dma_addr_t pcibr_dma_map(struct pci_dev *, unsigned long, size_t, int type);
+extern dma_addr_t pcibr_dma_map_consistent(struct pci_dev *, unsigned long, size_t, int type);
extern void pcibr_dma_unmap(struct pci_dev *, dma_addr_t, int);
/*
Index: msi/include/asm-ia64/sn/pcibus_provider_defs.h
===================================================================
--- msi.orig/include/asm-ia64/sn/pcibus_provider_defs.h 2005-12-20 09:40:22.177202671 -0600
+++ msi/include/asm-ia64/sn/pcibus_provider_defs.h 2005-12-20 11:24:01.996423896 -0600
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H
#define _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H
@@ -45,13 +45,24 @@
*/
struct sn_pcibus_provider {
- dma_addr_t (*dma_map)(struct pci_dev *, unsigned long, size_t);
- dma_addr_t (*dma_map_consistent)(struct pci_dev *, unsigned long, size_t);
+ dma_addr_t (*dma_map)(struct pci_dev *, unsigned long, size_t, int flags);
+ dma_addr_t (*dma_map_consistent)(struct pci_dev *, unsigned long, size_t, int flags);
void (*dma_unmap)(struct pci_dev *, dma_addr_t, int);
void * (*bus_fixup)(struct pcibus_bussoft *, struct pci_controller *);
void (*force_interrupt)(struct sn_irq_info *);
void (*target_interrupt)(struct sn_irq_info *);
};
+/*
+ * Flags used by the map interfaces
+ * bits 3:0 specifies format of passed in address
+ * bit 4 specifies that address is to be used for MSI
+ */
+
+#define SN_DMA_ADDRTYPE(x) ((x) & 0xf)
+#define SN_DMA_ADDR_PHYS 1 /* address is an xio address. */
+#define SN_DMA_ADDR_XIO 2 /* address is phys memory */
+#define SN_DMA_MSI 0x10 /* Bus address is to be used for MSI */
+
extern struct sn_pcibus_provider *sn_pci_provider[];
#endif /* _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H */
Index: msi/include/asm-ia64/sn/tiocp.h
===================================================================
--- msi.orig/include/asm-ia64/sn/tiocp.h 2005-12-20 09:40:22.177202671 -0600
+++ msi/include/asm-ia64/sn/tiocp.h 2005-12-20 11:24:01.998376831 -0600
@@ -3,13 +3,14 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2003-2004 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2003-2005 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_PCI_TIOCP_H
#define _ASM_IA64_SN_PCI_TIOCP_H
#define TIOCP_HOST_INTR_ADDR 0x003FFFFFFFFFFFFFUL
#define TIOCP_PCI64_CMDTYPE_MEM (0x1ull << 60)
+#define TIOCP_PCI64_CMDTYPE_MSI (0x3ull << 60)
/*****************************************************************************
Add an architecture-specific MSI setup hook (msi_arch_init()) called from
msi_init(). For IA64, implement this as a machvec to call platform specific
code.
Signed-off-by: Mark Maule <[email protected]>
Index: msi/drivers/pci/msi.c
===================================================================
--- msi.orig/drivers/pci/msi.c 2005-12-13 12:22:42.784269607 -0600
+++ msi/drivers/pci/msi.c 2005-12-19 15:34:28.427921393 -0600
@@ -367,6 +367,13 @@
return status;
}
+ if ((status = msi_arch_init()) < 0) {
+ pci_msi_enable = 0;
+ printk(KERN_WARNING
+ "PCI: MSI arch init failed. MSI disabled.\n");
+ return status;
+ }
+
if ((status = msi_cache_init()) < 0) {
pci_msi_enable = 0;
printk(KERN_WARNING "PCI: MSI cache init failed\n");
Index: msi/include/asm-i386/msi.h
===================================================================
--- msi.orig/include/asm-i386/msi.h 2005-12-13 12:22:42.785246074 -0600
+++ msi/include/asm-i386/msi.h 2005-12-13 16:09:49.152553259 -0600
@@ -12,4 +12,6 @@
#define LAST_DEVICE_VECTOR 232
#define MSI_TARGET_CPU_SHIFT 12
+static inline int msi_arch_init(void) { return 0; }
+
#endif /* ASM_MSI_H */
Index: msi/include/asm-sparc/msi.h
===================================================================
--- msi.orig/include/asm-sparc/msi.h 2005-12-13 12:22:42.785246074 -0600
+++ msi/include/asm-sparc/msi.h 2005-12-13 16:09:49.194541334 -0600
@@ -28,4 +28,6 @@
"i" (ASI_M_CTL), "r" (MSI_ASYNC_MODE) : "g3");
}
+static inline int msi_arch_init(void) { return 0; }
+
#endif /* !(_SPARC_MSI_H) */
Index: msi/include/asm-x86_64/msi.h
===================================================================
--- msi.orig/include/asm-x86_64/msi.h 2005-12-13 12:22:42.786222541 -0600
+++ msi/include/asm-x86_64/msi.h 2005-12-13 16:09:49.227741207 -0600
@@ -13,4 +13,6 @@
#define LAST_DEVICE_VECTOR 232
#define MSI_TARGET_CPU_SHIFT 12
+static inline int msi_arch_init(void) { return 0; }
+
#endif /* ASM_MSI_H */
Index: msi/include/asm-ia64/machvec.h
===================================================================
--- msi.orig/include/asm-ia64/machvec.h 2005-12-13 12:22:42.786222541 -0600
+++ msi/include/asm-ia64/machvec.h 2005-12-13 16:09:49.247270544 -0600
@@ -74,6 +74,7 @@
typedef unsigned short ia64_mv_readw_relaxed_t (const volatile void __iomem *);
typedef unsigned int ia64_mv_readl_relaxed_t (const volatile void __iomem *);
typedef unsigned long ia64_mv_readq_relaxed_t (const volatile void __iomem *);
+typedef int ia64_mv_msi_init_t (void);
static inline void
machvec_noop (void)
@@ -85,6 +86,12 @@
{
}
+static inline int
+machvec_noop_retzero (void)
+{
+ return 0;
+}
+
extern void machvec_setup (char **);
extern void machvec_timer_interrupt (int, void *, struct pt_regs *);
extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int);
@@ -146,6 +153,7 @@
# define platform_readw_relaxed ia64_mv.readw_relaxed
# define platform_readl_relaxed ia64_mv.readl_relaxed
# define platform_readq_relaxed ia64_mv.readq_relaxed
+# define platform_msi_init ia64_mv.msi_init
# endif
/* __attribute__((__aligned__(16))) is required to make size of the
@@ -194,6 +202,7 @@
ia64_mv_readw_relaxed_t *readw_relaxed;
ia64_mv_readl_relaxed_t *readl_relaxed;
ia64_mv_readq_relaxed_t *readq_relaxed;
+ ia64_mv_msi_init_t *msi_init;
} __attribute__((__aligned__(16))); /* align attrib? see above comment */
#define MACHVEC_INIT(name) \
@@ -238,6 +247,7 @@
platform_readw_relaxed, \
platform_readl_relaxed, \
platform_readq_relaxed, \
+ platform_msi_init, \
}
extern struct ia64_machine_vector ia64_mv;
@@ -386,5 +396,8 @@
#ifndef platform_readq_relaxed
# define platform_readq_relaxed __ia64_readq_relaxed
#endif
+#ifndef platform_msi_init
+# define platform_msi_init machvec_noop_retzero
+#endif
#endif /* _ASM_IA64_MACHVEC_H */
Index: msi/include/asm-ia64/machvec_sn2.h
===================================================================
--- msi.orig/include/asm-ia64/machvec_sn2.h 2005-12-13 12:22:42.787199008 -0600
+++ msi/include/asm-ia64/machvec_sn2.h 2005-12-13 16:09:49.257035213 -0600
@@ -71,6 +71,7 @@
extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device;
extern ia64_mv_dma_mapping_error sn_dma_mapping_error;
extern ia64_mv_dma_supported sn_dma_supported;
+extern ia64_mv_msi_init_t sn_msi_init;
/*
* This stuff has dual use!
@@ -120,6 +121,7 @@
#define platform_dma_sync_sg_for_device sn_dma_sync_sg_for_device
#define platform_dma_mapping_error sn_dma_mapping_error
#define platform_dma_supported sn_dma_supported
+#define platform_msi_init sn_msi_init
#include <asm/sn/io.h>
Index: msi/include/asm-ia64/msi.h
===================================================================
--- msi.orig/include/asm-ia64/msi.h 2005-12-13 12:22:42.787199008 -0600
+++ msi/include/asm-ia64/msi.h 2005-12-13 16:09:49.268752815 -0600
@@ -14,4 +14,6 @@
#define ack_APIC_irq ia64_eoi
#define MSI_TARGET_CPU_SHIFT 4
+static inline int msi_arch_init(void) { return platform_msi_init(); }
+
#endif /* ASM_MSI_H */
Index: msi/arch/ia64/sn/pci/Makefile
===================================================================
--- msi.orig/arch/ia64/sn/pci/Makefile 2005-12-13 12:22:42.788175474 -0600
+++ msi/arch/ia64/sn/pci/Makefile 2005-12-13 16:09:49.296093887 -0600
@@ -3,8 +3,9 @@
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
-# Copyright (C) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
+# Copyright (C) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
#
# Makefile for the sn pci general routines.
obj-y := pci_dma.o tioca_provider.o tioce_provider.o pcibr/
+obj-$(CONFIG_PCI_MSI) += msi.o
Index: msi/arch/ia64/sn/pci/msi.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ msi/arch/ia64/sn/pci/msi.c 2005-12-19 15:34:25.300296820 -0600
@@ -0,0 +1,18 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+#include <asm/errno.h>
+
+int
+sn_msi_init(void)
+{
+ /*
+ * return error until MSI is supported on altix platforms
+ */
+ return -EINVAL;
+}
Abstract IA64_FIRST_DEVICE_VECTOR/IA64_LAST_DEVICE_VECTOR since SN platforms
use a subset of the IA64 range. Implement this by making the above macros
global variables which the platform can override in it setup code.
Also add a reserve_irq_vector() routine used by SN to mark a vector's as
in-use when that weren't allocated through assign_irq_vector().
Signed-off-by: Mark Maule <[email protected]>
Index: msi/arch/ia64/kernel/irq_ia64.c
===================================================================
--- msi.orig/arch/ia64/kernel/irq_ia64.c 2005-12-19 18:22:03.288789302 -0600
+++ msi/arch/ia64/kernel/irq_ia64.c 2005-12-19 18:24:08.578523710 -0600
@@ -46,6 +46,10 @@
#define IRQ_DEBUG 0
+/* These can be overridden in platform_irq_init */
+int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR;
+int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
+
/* default base addr of IPI table */
void __iomem *ipi_base_addr = ((void __iomem *)
(__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR));
@@ -60,7 +64,7 @@
};
EXPORT_SYMBOL(isa_irq_to_vector_map);
-static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_NUM_DEVICE_VECTORS)];
+static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_MAX_DEVICE_VECTORS)];
int
assign_irq_vector (int irq)
@@ -89,6 +93,17 @@
printk(KERN_WARNING "%s: double free!\n", __FUNCTION__);
}
+int
+reserve_irq_vector (int vector)
+{
+ if (vector < IA64_FIRST_DEVICE_VECTOR ||
+ vector > IA64_LAST_DEVICE_VECTOR)
+ return -EINVAL;
+
+ return test_and_set_bit(IA64_FIRST_DEVICE_VECTOR + vector,
+ ia64_vector_mask);
+}
+
#ifdef CONFIG_SMP
# define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE)
#else
Index: msi/arch/ia64/sn/kernel/irq.c
===================================================================
--- msi.orig/arch/ia64/sn/kernel/irq.c 2005-12-19 18:22:03.289765771 -0600
+++ msi/arch/ia64/sn/kernel/irq.c 2005-12-20 09:40:22.176226204 -0600
@@ -203,6 +203,9 @@
int i;
irq_desc_t *base_desc = irq_desc;
+ ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR;
+ ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR;
+
for (i = 0; i < NR_IRQS; i++) {
if (base_desc[i].handler == &no_irq_type) {
base_desc[i].handler = &irq_type_sn;
@@ -287,6 +290,7 @@
/* link it into the sn_irq[irq] list */
spin_lock(&sn_irq_info_lock);
list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]);
+ reserve_irq_vector(sn_irq_info->irq_irq);
spin_unlock(&sn_irq_info_lock);
(void)register_intr_pda(sn_irq_info);
@@ -310,8 +314,11 @@
spin_lock(&sn_irq_info_lock);
list_del_rcu(&sn_irq_info->list);
spin_unlock(&sn_irq_info_lock);
+ if (list_empty(sn_irq_lh[sn_irq_info->irq_irq]))
+ free_irq_vector(sn_irq_info->irq_irq);
call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
pci_dev_put(pci_dev);
+
}
static inline void
Index: msi/include/asm-ia64/hw_irq.h
===================================================================
--- msi.orig/include/asm-ia64/hw_irq.h 2005-12-19 18:22:03.289765771 -0600
+++ msi/include/asm-ia64/hw_irq.h 2005-12-19 18:24:08.596100146 -0600
@@ -47,9 +47,19 @@
#define IA64_CMC_VECTOR 0x1f /* corrected machine-check interrupt vector */
/*
* Vectors 0x20-0x2f are reserved for legacy ISA IRQs.
+ * Use vectors 0x30-0xe7 as the default device vector range for ia64.
+ * Platforms may choose to reduce this range in platform_irq_setup, but the
+ * platform range must fall within
+ * [IA64_DEF_FIRST_DEVICE_VECTOR..IA64_DEF_LAST_DEVICE_VECTOR]
*/
-#define IA64_FIRST_DEVICE_VECTOR 0x30
-#define IA64_LAST_DEVICE_VECTOR 0xe7
+extern int ia64_first_device_vector;
+extern int ia64_last_device_vector;
+
+#define IA64_DEF_FIRST_DEVICE_VECTOR 0x30
+#define IA64_DEF_LAST_DEVICE_VECTOR 0xe7
+#define IA64_FIRST_DEVICE_VECTOR ia64_first_device_vector
+#define IA64_LAST_DEVICE_VECTOR ia64_last_device_vector
+#define IA64_MAX_DEVICE_VECTORS (IA64_DEF_LAST_DEVICE_VECTOR - IA64_DEF_FIRST_DEVICE_VECTOR + 1)
#define IA64_NUM_DEVICE_VECTORS (IA64_LAST_DEVICE_VECTOR - IA64_FIRST_DEVICE_VECTOR + 1)
#define IA64_MCA_RENDEZ_VECTOR 0xe8 /* MCA rendez interrupt */
@@ -83,6 +93,7 @@
extern int assign_irq_vector (int irq); /* allocate a free vector */
extern void free_irq_vector (int vector);
+extern int reserve_irq_vector (int vector);
extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect);
extern void register_percpu_irq (ia64_vector vec, struct irqaction *action);
Index: msi/drivers/pci/msi.c
===================================================================
--- msi.orig/drivers/pci/msi.c 2005-12-19 18:22:03.290742240 -0600
+++ msi/drivers/pci/msi.c 2005-12-20 09:59:14.669765449 -0600
@@ -37,7 +37,7 @@
#ifndef CONFIG_X86_IO_APIC
int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1};
-u8 irq_vector[NR_IRQ_VECTORS] = { FIRST_DEVICE_VECTOR , 0 };
+u8 irq_vector[NR_IRQ_VECTORS] = { [0 ... NR_IRQ_VECTORS - 1 ] = 0 };
#endif
static struct msi_callouts msi_callouts;
@@ -421,6 +421,11 @@
printk(KERN_WARNING "PCI: MSI cache init failed\n");
return status;
}
+
+#ifndef CONFIG_X86_IO_APIC
+ irq_vector[0] = FIRST_DEVICE_VECTOR;
+#endif
+
last_alloc_vector = assign_irq_vector(AUTO_ASSIGN);
if (last_alloc_vector < 0) {
pci_msi_enable = 0;
Abstract portions of the MSI core for platforms that do not use standard
APIC interrupt controllers. This is implemented through a set of callouts
which default to current behavior, but which can be overridden by calling
msi_register_callouts() in the platform msi init code.
Signed-off-by: Mark Maule <[email protected]>
Index: msi/drivers/pci/msi.c
===================================================================
--- msi.orig/drivers/pci/msi.c 2005-12-19 15:34:28.427921393 -0600
+++ msi/drivers/pci/msi.c 2005-12-19 18:22:03.290742240 -0600
@@ -40,6 +40,8 @@
u8 irq_vector[NR_IRQ_VECTORS] = { FIRST_DEVICE_VECTOR , 0 };
#endif
+static struct msi_callouts msi_callouts;
+
static void msi_cache_ctor(void *p, kmem_cache_t *cache, unsigned long flags)
{
memset(p, 0, NR_IRQS * sizeof(struct msi_desc));
@@ -89,10 +91,25 @@
}
#ifdef CONFIG_SMP
+static void msi_target_generic(unsigned int vector,
+ unsigned int dest_cpu,
+ uint32_t *address_hi, /* in/out */
+ uint32_t *address_lo) /* in/out */
+{
+ struct msg_address address;
+
+ address.lo_address.value = *address_lo;
+ address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK;
+ address.lo_address.value |=
+ (cpu_physical_id(dest_cpu) << MSI_TARGET_CPU_SHIFT);
+
+ *address_lo = address.lo_address.value;
+}
+
static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask)
{
struct msi_desc *entry;
- struct msg_address address;
+ uint32_t address_hi, address_lo;
unsigned int irq = vector;
unsigned int dest_cpu = first_cpu(cpu_mask);
@@ -108,28 +125,38 @@
if (!(pos = pci_find_capability(entry->dev, PCI_CAP_ID_MSI)))
return;
+ pci_read_config_dword(entry->dev, msi_upper_address_reg(pos),
+ &address_hi);
pci_read_config_dword(entry->dev, msi_lower_address_reg(pos),
- &address.lo_address.value);
- address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK;
- address.lo_address.value |= (cpu_physical_id(dest_cpu) <<
- MSI_TARGET_CPU_SHIFT);
- entry->msi_attrib.current_cpu = cpu_physical_id(dest_cpu);
+ &address_lo);
+
+ msi_callouts.msi_target(vector, dest_cpu,
+ &address_hi, &address_lo);
+
+ pci_write_config_dword(entry->dev, msi_upper_address_reg(pos),
+ address_hi);
pci_write_config_dword(entry->dev, msi_lower_address_reg(pos),
- address.lo_address.value);
+ address_lo);
set_native_irq_info(irq, cpu_mask);
break;
}
case PCI_CAP_ID_MSIX:
{
- int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET;
+ int offset_hi =
+ entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
+ PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET;
+ int offset_lo =
+ entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
+ PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET;
+
+ address_hi = readl(entry->mask_base + offset_hi);
+ address_lo = readl(entry->mask_base + offset_lo);
+
+ msi_callouts.msi_target(vector, dest_cpu,
+ &address_hi, &address_lo);
- address.lo_address.value = readl(entry->mask_base + offset);
- address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK;
- address.lo_address.value |= (cpu_physical_id(dest_cpu) <<
- MSI_TARGET_CPU_SHIFT);
- entry->msi_attrib.current_cpu = cpu_physical_id(dest_cpu);
- writel(address.lo_address.value, entry->mask_base + offset);
+ writel(address_hi, entry->mask_base + offset_hi);
+ writel(address_lo, entry->mask_base + offset_lo);
set_native_irq_info(irq, cpu_mask);
break;
}
@@ -249,28 +276,43 @@
.set_affinity = set_msi_irq_affinity
};
-static void msi_data_init(struct msg_data *msi_data,
- unsigned int vector)
-{
- memset(msi_data, 0, sizeof(struct msg_data));
- msi_data->vector = (u8)vector;
- msi_data->delivery_mode = MSI_DELIVERY_MODE;
- msi_data->level = MSI_LEVEL_MODE;
- msi_data->trigger = MSI_TRIGGER_MODE;
-}
-
-static void msi_address_init(struct msg_address *msi_address)
+static int
+msi_setup_generic(struct pci_dev *pdev, /* unused in generic */
+ unsigned int vector,
+ uint32_t *address_hi,
+ uint32_t *address_lo,
+ uint32_t *data)
{
unsigned int dest_id;
unsigned long dest_phys_id = cpu_physical_id(MSI_TARGET_CPU);
+ struct msg_address msi_address;
+ union msg_data msi_data;
- memset(msi_address, 0, sizeof(struct msg_address));
- msi_address->hi_address = (u32)0;
+ memset(&msi_address, 0, sizeof(struct msg_address));
+ msi_address.hi_address = (u32)0;
dest_id = (MSI_ADDRESS_HEADER << MSI_ADDRESS_HEADER_SHIFT);
- msi_address->lo_address.u.dest_mode = MSI_PHYSICAL_MODE;
- msi_address->lo_address.u.redirection_hint = MSI_REDIRECTION_HINT_MODE;
- msi_address->lo_address.u.dest_id = dest_id;
- msi_address->lo_address.value |= (dest_phys_id << MSI_TARGET_CPU_SHIFT);
+ msi_address.lo_address.u.dest_mode = MSI_PHYSICAL_MODE;
+ msi_address.lo_address.u.redirection_hint = MSI_REDIRECTION_HINT_MODE;
+ msi_address.lo_address.u.dest_id = dest_id;
+ msi_address.lo_address.value |= (dest_phys_id << MSI_TARGET_CPU_SHIFT);
+
+ memset(&msi_data, 0, sizeof(union msg_data));
+ msi_data.u.vector = (u8)vector;
+ msi_data.u.delivery_mode = MSI_DELIVERY_MODE;
+ msi_data.u.level = MSI_LEVEL_MODE;
+ msi_data.u.trigger = MSI_TRIGGER_MODE;
+
+ *address_hi = msi_address.hi_address;
+ *address_lo = msi_address.lo_address.value;
+ *data = msi_data.value;
+
+ return 0;
+}
+
+void
+msi_teardown_generic(unsigned int vector)
+{
+ return; /* no-op in most archs */
}
static int msi_free_vector(struct pci_dev* dev, int vector, int reassign);
@@ -517,9 +559,10 @@
**/
static int msi_capability_init(struct pci_dev *dev)
{
+ int status;
struct msi_desc *entry;
struct msg_address address;
- struct msg_data data;
+ union msg_data data;
int pos, vector;
u16 control;
@@ -546,13 +589,18 @@
entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos,
is_64bit_address(control));
}
+ /* Configure MSI capability structure */
+ status = msi_callouts.msi_setup(dev, vector,
+ &address.hi_address,
+ &address.lo_address.value,
+ &data.value);
+ if (status < 0) {
+ kmem_cache_free(msi_cachep, entry);
+ return status;
+ }
/* Replace with MSI handler */
irq_handler_init(PCI_CAP_ID_MSI, vector, entry->msi_attrib.maskbit);
- /* Configure MSI capability structure */
- msi_address_init(&address);
- msi_data_init(&data, vector);
- entry->msi_attrib.current_cpu = ((address.lo_address.u.dest_id >>
- MSI_TARGET_CPU_SHIFT) & MSI_TARGET_CPU_MASK);
+
pci_write_config_dword(dev, msi_lower_address_reg(pos),
address.lo_address.value);
if (is_64bit_address(control)) {
@@ -598,12 +646,13 @@
{
struct msi_desc *head = NULL, *tail = NULL, *entry = NULL;
struct msg_address address;
- struct msg_data data;
+ union msg_data data;
int vector, pos, i, j, nr_entries, temp = 0;
u32 phys_addr, table_offset;
u16 control;
u8 bir;
void __iomem *base;
+ int status;
pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
/* Request & Map MSI-X table region */
@@ -650,11 +699,13 @@
/* Replace with MSI-X handler */
irq_handler_init(PCI_CAP_ID_MSIX, vector, 1);
/* Configure MSI-X capability structure */
- msi_address_init(&address);
- msi_data_init(&data, vector);
- entry->msi_attrib.current_cpu =
- ((address.lo_address.u.dest_id >>
- MSI_TARGET_CPU_SHIFT) & MSI_TARGET_CPU_MASK);
+ status = msi_callouts.msi_setup(dev, vector,
+ &address.hi_address,
+ &address.lo_address.value,
+ &data.value);
+ if (status < 0)
+ break;
+
writel(address.lo_address.value,
base + j * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
@@ -796,6 +847,8 @@
void __iomem *base;
unsigned long flags;
+ (*msi_callouts.msi_teardown)(vector);
+
spin_lock_irqsave(&msi_lock, flags);
entry = msi_desc[vector];
if (!entry || entry->dev != dev) {
@@ -1126,6 +1179,26 @@
}
}
+/*
+ * Generic callouts used on most archs/platforms. Override with
+ * msi_register_callouts()
+ */
+
+static struct msi_callouts msi_callouts = {
+ .msi_setup = msi_setup_generic,
+ .msi_teardown = msi_teardown_generic,
+#ifdef CONFIG_SMP
+ .msi_target = msi_target_generic,
+#endif
+};
+
+int
+msi_register_callouts(struct msi_callouts *co)
+{
+ msi_callouts = *co; /* structure copy */
+ return 0;
+}
+
EXPORT_SYMBOL(pci_enable_msi);
EXPORT_SYMBOL(pci_disable_msi);
EXPORT_SYMBOL(pci_enable_msix);
Index: msi/drivers/pci/msi.h
===================================================================
--- msi.orig/drivers/pci/msi.h 2005-12-19 15:34:28.428897860 -0600
+++ msi/drivers/pci/msi.h 2005-12-19 16:08:08.081789136 -0600
@@ -84,24 +84,29 @@
#define MSI_LOGICAL_MODE 1
#define MSI_REDIRECTION_HINT_MODE 0
-struct msg_data {
+union msg_data {
+ struct {
#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u32 vector : 8;
- __u32 delivery_mode : 3; /* 000b: FIXED | 001b: lowest prior */
- __u32 reserved_1 : 3;
- __u32 level : 1; /* 0: deassert | 1: assert */
- __u32 trigger : 1; /* 0: edge | 1: level */
- __u32 reserved_2 : 16;
+ __u32 vector : 8;
+ __u32 delivery_mode : 3; /* 000b: FIXED */
+ /* 001b: lowest prior */
+ __u32 reserved_1 : 3;
+ __u32 level : 1; /* 0: deassert | 1: assert */
+ __u32 trigger : 1; /* 0: edge | 1: level */
+ __u32 reserved_2 : 16;
#elif defined(__BIG_ENDIAN_BITFIELD)
- __u32 reserved_2 : 16;
- __u32 trigger : 1; /* 0: edge | 1: level */
- __u32 level : 1; /* 0: deassert | 1: assert */
- __u32 reserved_1 : 3;
- __u32 delivery_mode : 3; /* 000b: FIXED | 001b: lowest prior */
- __u32 vector : 8;
+ __u32 reserved_2 : 16;
+ __u32 trigger : 1; /* 0: edge | 1: level */
+ __u32 level : 1; /* 0: deassert | 1: assert */
+ __u32 reserved_1 : 3;
+ __u32 delivery_mode : 3; /* 000b: FIXED */
+ /* 001b: lowest prior */
+ __u32 vector : 8;
#else
#error "Bitfield endianness not defined! Check your byteorder.h"
#endif
+ }u;
+ __u32 value;
} __attribute__ ((packed));
struct msg_address {
@@ -138,7 +143,7 @@
__u8 reserved: 1; /* reserved */
__u8 entry_nr; /* specific enabled entry */
__u8 default_vector; /* default pre-assigned vector */
- __u8 current_cpu; /* current destination cpu */
+ __u8 unused; /* formerly unused destination cpu*/
}msi_attrib;
struct {
Index: msi/include/linux/pci.h
===================================================================
--- msi.orig/include/linux/pci.h 2005-12-19 15:34:28.428897860 -0600
+++ msi/include/linux/pci.h 2005-12-19 18:23:28.984670376 -0600
@@ -478,6 +478,18 @@
u16 entry; /* driver uses to specify entry, OS writes */
};
+struct msi_callouts {
+ int (*msi_setup) (struct pci_dev *pdev,
+ unsigned int vector,
+ uint32_t *addr_hi, uint32_t *addr_lo,
+ uint32_t *data);
+ void (*msi_teardown) (unsigned int vector);
+#ifdef CONFIG_SMP
+ void (*msi_target) (unsigned int vector, unsigned int cpu,
+ uint32_t *addr_hi, uint32_t *addr_lo);
+#endif
+};
+
#ifndef CONFIG_PCI_MSI
static inline void pci_scan_msi_device(struct pci_dev *dev) {}
static inline int pci_enable_msi(struct pci_dev *dev) {return -1;}
@@ -486,6 +498,7 @@
struct msix_entry *entries, int nvec) {return -1;}
static inline void pci_disable_msix(struct pci_dev *dev) {}
static inline void msi_remove_pci_irq_vectors(struct pci_dev *dev) {}
+static inline int msi_register_callouts(struct msi_callouts *co) {return -1;}
#else
extern void pci_scan_msi_device(struct pci_dev *dev);
extern int pci_enable_msi(struct pci_dev *dev);
@@ -494,6 +507,7 @@
struct msix_entry *entries, int nvec);
extern void pci_disable_msix(struct pci_dev *dev);
extern void msi_remove_pci_irq_vectors(struct pci_dev *dev);
+extern int msi_register_callouts(struct msi_callouts *co);
#endif
extern void pci_block_user_cfg_access(struct pci_dev *dev);
On Wed, Dec 21, 2005 at 12:42:36PM -0600, Mark Maule wrote:
> Index: msi/include/asm-sparc/msi.h
> ===================================================================
> --- msi.orig/include/asm-sparc/msi.h 2005-12-13 12:22:42.785246074 -0600
> +++ msi/include/asm-sparc/msi.h 2005-12-13 16:09:49.194541334 -0600
> @@ -28,4 +28,6 @@
> "i" (ASI_M_CTL), "r" (MSI_ASYNC_MODE) : "g3");
> }
>
> +static inline int msi_arch_init(void) { return 0; }
> +
> #endif /* !(_SPARC_MSI_H) */
As far as I can tell, you can't select MSI on Sparc, so this doesn't
need to be here ...
On Wed, Dec 21, 2005 at 12:42:41PM -0600, Mark Maule wrote:
> Abstract portions of the MSI core for platforms that do not use standard
> APIC interrupt controllers. This is implemented through a set of callouts
> which default to current behavior, but which can be overridden by calling
> msi_register_callouts() in the platform msi init code.
we tend to calls these _ops or _operations instead of _callouts.
Also I'd suggest to not keep the generic ones where they are but
in a separate file and let the existing plattforms calls msi_register()
with the ops table for those. This keeps the interface symmetric instead
of favouring the first implementation.
> @@ -89,10 +91,25 @@
> }
>
> #ifdef CONFIG_SMP
> +static void msi_target_generic(unsigned int vector,
> + unsigned int dest_cpu,
> + uint32_t *address_hi, /* in/out */
> + uint32_t *address_lo) /* in/out */
Please try to use u32 instead of uint32_t everywhere. Dito for other
sizes and signed types.
> +{
> + struct msg_address address;
> +
> + address.lo_address.value = *address_lo;
> + address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK;
> + address.lo_address.value |=
> + (cpu_physical_id(dest_cpu) << MSI_TARGET_CPU_SHIFT);
> +
> + *address_lo = address.lo_address.value;
> +}
Why do we need the full struct msg_address here? What about just:
static void msi_target_apic(unsigned int vector, unsigned int dest_cpu,
u32 *address_hi, u32 *address_lo)
{
u32 addr = *address_lo;
addr &= MSI_ADDRESS_DEST_ID_MASK;
addr |= (cpu_physical_id(dest_cpu) << MSI_TARGET_CPU_SHIFT);
*address_lo = addr;
}
> + (*msi_callouts.msi_teardown)(vector);
> +
just
msi_ops.teardown(vector);
> +union msg_data {
> + struct {
> #if defined(__LITTLE_ENDIAN_BITFIELD)
> - __u32 vector : 8;
> - __u32 delivery_mode : 3; /* 000b: FIXED | 001b: lowest prior */
> - __u32 reserved_1 : 3;
> - __u32 level : 1; /* 0: deassert | 1: assert */
> - __u32 trigger : 1; /* 0: edge | 1: level */
> - __u32 reserved_2 : 16;
> + __u32 vector : 8;
> + __u32 delivery_mode : 3; /* 000b: FIXED */
> + /* 001b: lowest prior */
> + __u32 reserved_1 : 3;
> + __u32 level : 1; /* 0: deassert | 1: assert */
> + __u32 trigger : 1; /* 0: edge | 1: level */
> + __u32 reserved_2 : 16;
> #elif defined(__BIG_ENDIAN_BITFIELD)
> - __u32 reserved_2 : 16;
> - __u32 trigger : 1; /* 0: edge | 1: level */
> - __u32 level : 1; /* 0: deassert | 1: assert */
> - __u32 reserved_1 : 3;
> - __u32 delivery_mode : 3; /* 000b: FIXED | 001b: lowest prior */
> - __u32 vector : 8;
> + __u32 reserved_2 : 16;
> + __u32 trigger : 1; /* 0: edge | 1: level */
> + __u32 level : 1; /* 0: deassert | 1: assert */
> + __u32 reserved_1 : 3;
> + __u32 delivery_mode : 3; /* 000b: FIXED */
> + /* 001b: lowest prior */
> + __u32 vector : 8;
> #else
> #error "Bitfield endianness not defined! Check your byteorder.h"
> #endif
> + }u;
> + __u32 value;
> } __attribute__ ((packed));
While you're cleaning things up, could you please kill the horrible abuse
of bitfields for H/W structures and do proper masking instead.
On Wed, Dec 21, 2005 at 11:53:46AM -0700, Matthew Wilcox wrote:
> On Wed, Dec 21, 2005 at 12:42:36PM -0600, Mark Maule wrote:
> > Index: msi/include/asm-sparc/msi.h
> > ===================================================================
> > --- msi.orig/include/asm-sparc/msi.h 2005-12-13 12:22:42.785246074 -0600
> > +++ msi/include/asm-sparc/msi.h 2005-12-13 16:09:49.194541334 -0600
> > @@ -28,4 +28,6 @@
> > "i" (ASI_M_CTL), "r" (MSI_ASYNC_MODE) : "g3");
> > }
> >
> > +static inline int msi_arch_init(void) { return 0; }
> > +
> > #endif /* !(_SPARC_MSI_H) */
>
> As far as I can tell, you can't select MSI on Sparc, so this doesn't
> need to be here ...
Ok. I'm a little confused on why we have asm-sparc/msi.h then. Should I
yank it, or leave it for consistency and return -EINVAL so pci_enable_msi()
could fail somewhat gracefully on sparc ?
Mark
On Wed, Dec 21, 2005 at 12:42:41PM -0600, Mark Maule wrote:
> {
> struct msi_desc *entry;
> - struct msg_address address;
> + uint32_t address_hi, address_lo;
Don't use uint32_t. Use u32 instead.
> @@ -108,28 +125,38 @@
> if (!(pos = pci_find_capability(entry->dev, PCI_CAP_ID_MSI)))
> return;
>
> + pci_read_config_dword(entry->dev, msi_upper_address_reg(pos),
> + &address_hi);
> pci_read_config_dword(entry->dev, msi_lower_address_reg(pos),
> - &address.lo_address.value);
> - address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK;
> - address.lo_address.value |= (cpu_physical_id(dest_cpu) <<
> - MSI_TARGET_CPU_SHIFT);
> - entry->msi_attrib.current_cpu = cpu_physical_id(dest_cpu);
> + &address_lo);
> +
> + msi_callouts.msi_target(vector, dest_cpu,
> + &address_hi, &address_lo);
> +
> + pci_write_config_dword(entry->dev, msi_upper_address_reg(pos),
> + address_hi);
> pci_write_config_dword(entry->dev, msi_lower_address_reg(pos),
> - address.lo_address.value);
> + address_lo);
But actually, I don't understand why you don't just pass a msg_address
pointer to msi_target instead.
(last two points apply throughtout this patch)
>
> + (*msi_callouts.msi_teardown)(vector);
> +
Yuck. There's a reason C allows you to call through function pointers as if
they were functions.
> +int
> +msi_register_callouts(struct msi_callouts *co)
> +{
> + msi_callouts = *co; /* structure copy */
> + return 0;
Why do it this way instead of having a pointer to a struct?
> -struct msg_data {
> +union msg_data {
> + struct {
How about leaving struct msg_data alone and adding
union good_name {
struct msg_data;
u32 value;
}
Or possibly struct msg_data should just be deleted and we should use
shift/mask to access the contents of it. ISTR GCC handled that much
better.
On Wed, Dec 21, 2005 at 12:42:46PM -0600, Mark Maule wrote:
> #ifndef CONFIG_X86_IO_APIC
> int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1};
> -u8 irq_vector[NR_IRQ_VECTORS] = { FIRST_DEVICE_VECTOR , 0 };
> +u8 irq_vector[NR_IRQ_VECTORS] = { [0 ... NR_IRQ_VECTORS - 1 ] = 0 };
Isn't this just a very complicated way of saying:
u8 irq_vector[NR_IRQ_VECTORS];
?
On Wed, Dec 21, 2005 at 06:56:37PM +0000, Christoph Hellwig wrote:
> On Wed, Dec 21, 2005 at 12:42:41PM -0600, Mark Maule wrote:
> > Abstract portions of the MSI core for platforms that do not use standard
> > APIC interrupt controllers. This is implemented through a set of callouts
> > which default to current behavior, but which can be overridden by calling
> > msi_register_callouts() in the platform msi init code.
>
> we tend to calls these _ops or _operations instead of _callouts.
> Also I'd suggest to not keep the generic ones where they are but
> in a separate file and let the existing plattforms calls msi_register()
> with the ops table for those. This keeps the interface symmetric instead
> of favouring the first implementation.
ok.
>
> > @@ -89,10 +91,25 @@
> > }
> >
> > #ifdef CONFIG_SMP
> > +static void msi_target_generic(unsigned int vector,
> > + unsigned int dest_cpu,
> > + uint32_t *address_hi, /* in/out */
> > + uint32_t *address_lo) /* in/out */
>
> Please try to use u32 instead of uint32_t everywhere. Dito for other
> sizes and signed types.
ok.
>
> > +{
> > + struct msg_address address;
> > +
> > + address.lo_address.value = *address_lo;
> > + address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK;
> > + address.lo_address.value |=
> > + (cpu_physical_id(dest_cpu) << MSI_TARGET_CPU_SHIFT);
> > +
> > + *address_lo = address.lo_address.value;
> > +}
>
> Why do we need the full struct msg_address here? What about just:
>
> static void msi_target_apic(unsigned int vector, unsigned int dest_cpu,
> u32 *address_hi, u32 *address_lo)
> {
> u32 addr = *address_lo;
>
> addr &= MSI_ADDRESS_DEST_ID_MASK;
> addr |= (cpu_physical_id(dest_cpu) << MSI_TARGET_CPU_SHIFT);
>
> *address_lo = addr;
> }
Right.
>
> > + (*msi_callouts.msi_teardown)(vector);
> > +
>
> just
> msi_ops.teardown(vector);
>
ok.
On Wed, 21 Dec 2005, Matthew Wilcox wrote:
> On Wed, Dec 21, 2005 at 12:42:41PM -0600, Mark Maule wrote:
>> {
>> struct msi_desc *entry;
>> - struct msg_address address;
>> + uint32_t address_hi, address_lo;
>
> Don't use uint32_t. Use u32 instead.
a>
But uint32_t is a correct POSIX type. Why would you smear its
use in the kernel when, in fact, one is not allowed to mix/match
kernel and user headers so it's impossible to pollute namespace?
>> @@ -108,28 +125,38 @@
>> if (!(pos = pci_find_capability(entry->dev, PCI_CAP_ID_MSI)))
>> return;
>>
>> + pci_read_config_dword(entry->dev, msi_upper_address_reg(pos),
>> + &address_hi);
>> pci_read_config_dword(entry->dev, msi_lower_address_reg(pos),
>> - &address.lo_address.value);
>> - address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK;
>> - address.lo_address.value |= (cpu_physical_id(dest_cpu) <<
>> - MSI_TARGET_CPU_SHIFT);
>> - entry->msi_attrib.current_cpu = cpu_physical_id(dest_cpu);
>> + &address_lo);
>> +
>> + msi_callouts.msi_target(vector, dest_cpu,
>> + &address_hi, &address_lo);
>> +
>> + pci_write_config_dword(entry->dev, msi_upper_address_reg(pos),
>> + address_hi);
>> pci_write_config_dword(entry->dev, msi_lower_address_reg(pos),
>> - address.lo_address.value);
>> + address_lo);
>
> But actually, I don't understand why you don't just pass a msg_address
> pointer to msi_target instead.
>
> (last two points apply throughtout this patch)
>
>>
>> + (*msi_callouts.msi_teardown)(vector);
>> +
>
> Yuck. There's a reason C allows you to call through function pointers as if
> they were functions.
The reason is to allow sloppy coding. The patch writer was properly
instructed to dereference function pointers. If you don't like
this "obviously correct" syntax there is no reason to force your
will on others.
Cheers,
Dick Johnson
Penguin : Linux version 2.6.13.4 on an i686 machine (5589.55 BogoMips).
Warning : 98.36% of all statistics are fiction.
.
****************************************************************
The information transmitted in this message is confidential and may be privileged. Any review, retransmission, dissemination, or other use of this information by persons or entities other than the intended recipient is prohibited. If you are not the intended recipient, please notify Analogic Corporation immediately - by replying to this message or by sending an email to [email protected] - and destroy all copies of this information, including any attachments, without reading or disclosing them.
Thank you.
On Wed, Dec 21, 2005 at 12:09:16PM -0700, Matthew Wilcox wrote:
> On Wed, Dec 21, 2005 at 12:42:46PM -0600, Mark Maule wrote:
> > #ifndef CONFIG_X86_IO_APIC
> > int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1};
> > -u8 irq_vector[NR_IRQ_VECTORS] = { FIRST_DEVICE_VECTOR , 0 };
> > +u8 irq_vector[NR_IRQ_VECTORS] = { [0 ... NR_IRQ_VECTORS - 1 ] = 0 };
>
> Isn't this just a very complicated way of saying:
>
> u8 irq_vector[NR_IRQ_VECTORS];
>
> ?
Ok. Was just following the lead of this:
static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL };
So arrays are always init'd to zero?
Mark
On Wed, Dec 21, 2005 at 12:05:58PM -0700, Matthew Wilcox wrote:
> On Wed, Dec 21, 2005 at 12:42:41PM -0600, Mark Maule wrote:
>
> > @@ -108,28 +125,38 @@
> > if (!(pos = pci_find_capability(entry->dev, PCI_CAP_ID_MSI)))
> > return;
> >
> > + pci_read_config_dword(entry->dev, msi_upper_address_reg(pos),
> > + &address_hi);
> > pci_read_config_dword(entry->dev, msi_lower_address_reg(pos),
> > - &address.lo_address.value);
> > - address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK;
> > - address.lo_address.value |= (cpu_physical_id(dest_cpu) <<
> > - MSI_TARGET_CPU_SHIFT);
> > - entry->msi_attrib.current_cpu = cpu_physical_id(dest_cpu);
> > + &address_lo);
> > +
> > + msi_callouts.msi_target(vector, dest_cpu,
> > + &address_hi, &address_lo);
> > +
> > + pci_write_config_dword(entry->dev, msi_upper_address_reg(pos),
> > + address_hi);
> > pci_write_config_dword(entry->dev, msi_lower_address_reg(pos),
> > - address.lo_address.value);
> > + address_lo);
>
> But actually, I don't understand why you don't just pass a msg_address
> pointer to msi_target instead.
Mainly I did it this way 'cause msg_address seems to be geared toward specific
hw (apic?). In the case of altix interrupt hw, we don't know about
dest_mode et. al., but only care about the raw address.
I think this style makes it clearer that the core code should only be
using opaque data when interacting with the platform hooks and the MSI
registers.
>
> (last two points apply throughtout this patch)
>
> >
> > + (*msi_callouts.msi_teardown)(vector);
> > +
>
> Yuck. There's a reason C allows you to call through function pointers as if
> they were functions.
My bad ... I used the alternate style elsewhere, just botched this one up.
>
> > +int
> > +msi_register_callouts(struct msi_callouts *co)
> > +{
> > + msi_callouts = *co; /* structure copy */
> > + return 0;
>
> Why do it this way instead of having a pointer to a struct?
Are you suggesting just have:
struct msi_callouts *msi_callouts = (some default value or NULL)
and then having each platform just assign msi_callouts in their msi_arch_init?
Doesn't matter to me either way ... I thought having an interface to set
the callouts was cleaner.
>
> > -struct msg_data {
> > +union msg_data {
> > + struct {
>
> How about leaving struct msg_data alone and adding
>
> union good_name {
> struct msg_data;
> u32 value;
> }
>
> Or possibly struct msg_data should just be deleted and we should use
> shift/mask to access the contents of it. ISTR GCC handled that much
> better.
Christoph had similiar comments. Will put some thought into it.
Mark
On Wed, Dec 21, 2005 at 01:18:43PM -0600, Mark Maule wrote:
> Ok. Was just following the lead of this:
>
> static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL };
>
> So arrays are always init'd to zero?
Static variables without an initialiser go to the bss section and get
initialised to 0 by the loader. So the initialisation above is
redundant on all machines which use a bitpattern of zeros to represent
the NULL pointer. Which is all machines Linux runs on.
On Wed, Dec 21, 2005 at 12:42:36PM -0600, Mark Maule wrote:
> Index: msi/include/asm-sparc/msi.h
> ===================================================================
> --- msi.orig/include/asm-sparc/msi.h 2005-12-13 12:22:42.785246074 -0600
> +++ msi/include/asm-sparc/msi.h 2005-12-13 16:09:49.194541334 -0600
> @@ -28,4 +28,6 @@
> "i" (ASI_M_CTL), "r" (MSI_ASYNC_MODE) : "g3");
> }
>
> +static inline int msi_arch_init(void) { return 0; }
> +
> #endif /* !(_SPARC_MSI_H) */
Ah, look at the header for asm-sparc/msi.h:
* msi.h: Defines specific to the MBus - Sbus - Interface.
Not Message Signalled Interrupts at all ;-)
On Wed, 21 Dec 2005 12:32:20 -0700,
Matthew Wilcox <[email protected]> wrote:
>On Wed, Dec 21, 2005 at 01:18:43PM -0600, Mark Maule wrote:
>> Ok. Was just following the lead of this:
>>
>> static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL };
>>
>> So arrays are always init'd to zero?
>
>Static variables without an initialiser go to the bss section and get
>initialised to 0 by the loader. So the initialisation above is
>redundant on all machines which use a bitpattern of zeros to represent
>the NULL pointer. Which is all machines Linux runs on.
Semi off topic nit pick. C99 standard, section 6.7.8, note 10.
"... If an object that has static storage duration is not initialized
explicitly, then:
— if it has pointer type, it is initialized to a null pointer;
— if it has arithmetic type, it is initialized to (positive or
unsigned) zero;
— if it is an aggregate, every member is initialized (recursively)
according to these rules;
— if it is a union, the first named member is initialized
(recursively) according to these rules."
On the off chance that Linux is ever implemented on a machine that does
not use zeroes for a NULL pointer, it would be the compiler's job to
correctly initialise a pointer or array of pointers.
> Mainly I did it this way 'cause msg_address seems to be geared toward specific
> hw (apic?). In the case of altix interrupt hw, we don't know about
> dest_mode et. al., but only care about the raw address.
In that case you should probably kill the struct as I suggested and only
keep the shift & mask defines in the file for the apic hw implementation.
On Thu, Dec 22, 2005 at 05:26:12PM +1100, Keith Owens wrote:
> Semi off topic nit pick. C99 standard, section 6.7.8, note 10.
Thanks, Keith. I learned something there.
On Thu, Dec 22, 2005 at 10:36:06AM +0000, Christoph Hellwig wrote:
> > Mainly I did it this way 'cause msg_address seems to be geared toward specific
> > hw (apic?). In the case of altix interrupt hw, we don't know about
> > dest_mode et. al., but only care about the raw address.
>
> In that case you should probably kill the struct as I suggested and only
> keep the shift & mask defines in the file for the apic hw implementation.
Yes, that's what I've done (mostly) for the next patch round. Still haven't
killed the struct, but at least its isolated to apic code now.
Mark