- Add support for Versal CPM as Root Port.
- The Versal ACAP devices include CCIX-PCIe Module (CPM). The integrated
block for CPM along with the integrated bridge can function
as PCIe Root Port.
- CPM Versal uses GICv3 ITS feature for achieving assigning MSI/MSI-X
vectors and handling MSI/MSI-X interrupts.
- Bridge error and legacy interrupts in Versal CPM are handled using
Versal CPM specific MISC interrupt line.
Changes v5:
- Removed xilinx_cpm_pcie_valid_device function
Signed-off-by: Bharat Kumar Gogada <[email protected]>
---
drivers/pci/controller/Kconfig | 8 +
drivers/pci/controller/Makefile | 1 +
drivers/pci/controller/pcie-xilinx-cpm.c | 491 +++++++++++++++++++++++++++++++
3 files changed, 500 insertions(+)
create mode 100644 drivers/pci/controller/pcie-xilinx-cpm.c
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index c77069c..362f4db 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -81,6 +81,14 @@ config PCIE_XILINX
Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
Host Bridge driver.
+config PCIE_XILINX_CPM
+ bool "Xilinx Versal CPM host bridge support"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ help
+ Say 'Y' here if you want kernel support for the
+ Xilinx Versal CPM host bridge. The driver supports
+ MSI/MSI-X interrupts using GICv3 ITS feature.
+
config PCI_XGENE
bool "X-Gene PCIe controller"
depends on ARM64 || COMPILE_TEST
diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
index 3d4f597..6c936e9 100644
--- a/drivers/pci/controller/Makefile
+++ b/drivers/pci/controller/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_PCI_HOST_COMMON) += pci-host-common.o
obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o
obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o
+obj-$(CONFIG_PCIE_XILINX_CPM) += pcie-xilinx-cpm.o
obj-$(CONFIG_PCI_V3_SEMI) += pci-v3-semi.o
obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o
obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o
diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c
new file mode 100644
index 0000000..4e4c0f0
--- /dev/null
+++ b/drivers/pci/controller/pcie-xilinx-cpm.c
@@ -0,0 +1,491 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PCIe host controller driver for Xilinx Versal CPM DMA Bridge
+ *
+ * (C) Copyright 2019 - 2020, Xilinx, Inc.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+#include "../pci.h"
+
+/* Register definitions */
+#define XILINX_CPM_PCIE_REG_IDR 0x00000E10
+#define XILINX_CPM_PCIE_REG_IMR 0x00000E14
+#define XILINX_CPM_PCIE_REG_PSCR 0x00000E1C
+#define XILINX_CPM_PCIE_REG_RPSC 0x00000E20
+#define XILINX_CPM_PCIE_REG_RPEFR 0x00000E2C
+#define XILINX_CPM_PCIE_REG_IDRN 0x00000E38
+#define XILINX_CPM_PCIE_REG_IDRN_MASK 0x00000E3C
+#define XILINX_CPM_PCIE_MISC_IR_STATUS 0x00000340
+#define XILINX_CPM_PCIE_MISC_IR_ENABLE 0x00000348
+#define XILINX_CPM_PCIE_MISC_IR_LOCAL BIT(1)
+
+/* Interrupt registers definitions */
+#define XILINX_CPM_PCIE_INTR_LINK_DOWN BIT(0)
+#define XILINX_CPM_PCIE_INTR_HOT_RESET BIT(3)
+#define XILINX_CPM_PCIE_INTR_CFG_TIMEOUT BIT(8)
+#define XILINX_CPM_PCIE_INTR_CORRECTABLE BIT(9)
+#define XILINX_CPM_PCIE_INTR_NONFATAL BIT(10)
+#define XILINX_CPM_PCIE_INTR_FATAL BIT(11)
+#define XILINX_CPM_PCIE_INTR_INTX BIT(16)
+#define XILINX_CPM_PCIE_INTR_MSI BIT(17)
+#define XILINX_CPM_PCIE_INTR_SLV_UNSUPP BIT(20)
+#define XILINX_CPM_PCIE_INTR_SLV_UNEXP BIT(21)
+#define XILINX_CPM_PCIE_INTR_SLV_COMPL BIT(22)
+#define XILINX_CPM_PCIE_INTR_SLV_ERRP BIT(23)
+#define XILINX_CPM_PCIE_INTR_SLV_CMPABT BIT(24)
+#define XILINX_CPM_PCIE_INTR_SLV_ILLBUR BIT(25)
+#define XILINX_CPM_PCIE_INTR_MST_DECERR BIT(26)
+#define XILINX_CPM_PCIE_INTR_MST_SLVERR BIT(27)
+#define XILINX_CPM_PCIE_IMR_ALL_MASK 0x1FF39FF9
+#define XILINX_CPM_PCIE_IDR_ALL_MASK 0xFFFFFFFF
+#define XILINX_CPM_PCIE_IDRN_MASK GENMASK(19, 16)
+#define XILINX_CPM_PCIE_INTR_CFG_PCIE_TIMEOUT BIT(4)
+#define XILINX_CPM_PCIE_INTR_CFG_ERR_POISON BIT(12)
+#define XILINX_CPM_PCIE_INTR_PME_TO_ACK_RCVD BIT(15)
+#define XILINX_CPM_PCIE_INTR_PM_PME_RCVD BIT(17)
+#define XILINX_CPM_PCIE_INTR_SLV_PCIE_TIMEOUT BIT(28)
+#define XILINX_CPM_PCIE_IDRN_SHIFT 16
+
+/* Root Port Error FIFO Read Register definitions */
+#define XILINX_CPM_PCIE_RPEFR_ERR_VALID BIT(18)
+#define XILINX_CPM_PCIE_RPEFR_REQ_ID GENMASK(15, 0)
+#define XILINX_CPM_PCIE_RPEFR_ALL_MASK 0xFFFFFFFF
+
+/* Root Port Status/control Register definitions */
+#define XILINX_CPM_PCIE_REG_RPSC_BEN BIT(0)
+
+/* Phy Status/Control Register definitions */
+#define XILINX_CPM_PCIE_REG_PSCR_LNKUP BIT(11)
+
+/* ECAM definitions */
+#define ECAM_BUS_NUM_SHIFT 20
+#define ECAM_DEV_NUM_SHIFT 12
+
+/**
+ * struct xilinx_cpm_pcie_port - PCIe port information
+ * @reg_base: Bridge Register Base
+ * @cpm_base: CPM System Level Control and Status Register(SLCR) Base
+ * @irq: Interrupt number
+ * @root_busno: Root Bus number
+ * @dev: Device pointer
+ * @leg_domain: Legacy IRQ domain pointer
+ * @irq_misc: Legacy and error interrupt number
+ */
+struct xilinx_cpm_pcie_port {
+ void __iomem *reg_base;
+ void __iomem *cpm_base;
+ u32 irq;
+ u8 root_busno;
+ struct device *dev;
+ struct irq_domain *leg_domain;
+ int irq_misc;
+};
+
+static inline u32 pcie_read(struct xilinx_cpm_pcie_port *port, u32 reg)
+{
+ return readl(port->reg_base + reg);
+}
+
+static inline void pcie_write(struct xilinx_cpm_pcie_port *port,
+ u32 val, u32 reg)
+{
+ writel(val, port->reg_base + reg);
+}
+
+static inline bool cpm_pcie_link_up(struct xilinx_cpm_pcie_port *port)
+{
+ return (pcie_read(port, XILINX_CPM_PCIE_REG_PSCR) &
+ XILINX_CPM_PCIE_REG_PSCR_LNKUP) ? 1 : 0;
+}
+
+/**
+ * xilinx_cpm_pcie_clear_err_interrupts - Clear Error Interrupts
+ * @port: PCIe port information
+ */
+static void cpm_pcie_clear_err_interrupts(struct xilinx_cpm_pcie_port *port)
+{
+ unsigned long val = pcie_read(port, XILINX_CPM_PCIE_REG_RPEFR);
+
+ if (val & XILINX_CPM_PCIE_RPEFR_ERR_VALID) {
+ dev_dbg(port->dev, "Requester ID %lu\n",
+ val & XILINX_CPM_PCIE_RPEFR_REQ_ID);
+ pcie_write(port, XILINX_CPM_PCIE_RPEFR_ALL_MASK,
+ XILINX_CPM_PCIE_REG_RPEFR);
+ }
+}
+
+/**
+ * xilinx_cpm_pcie_map_bus - Get configuration base
+ * @bus: PCI Bus structure
+ * @devfn: Device/function
+ * @where: Offset from base
+ *
+ * Return: Base address of the configuration space needed to be
+ * accessed.
+ */
+static void __iomem *xilinx_cpm_pcie_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct xilinx_cpm_pcie_port *port = bus->sysdata;
+ int relbus;
+
+ relbus = (bus->number << ECAM_BUS_NUM_SHIFT) |
+ (devfn << ECAM_DEV_NUM_SHIFT);
+
+ return port->reg_base + relbus + where;
+}
+
+/* PCIe operations */
+static struct pci_ops xilinx_cpm_pcie_ops = {
+ .map_bus = xilinx_cpm_pcie_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+/**
+ * xilinx_cpm_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid
+ * @domain: IRQ domain
+ * @irq: Virtual IRQ number
+ * @hwirq: HW interrupt number
+ *
+ * Return: Always returns 0.
+ */
+static int xilinx_cpm_pcie_intx_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_status_flags(irq, IRQ_LEVEL);
+
+ return 0;
+}
+
+/* INTx IRQ Domain operations */
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = xilinx_cpm_pcie_intx_map,
+ .xlate = pci_irqd_intx_xlate,
+};
+
+/**
+ * xilinx_cpm_pcie_intr_handler - Interrupt Service Handler
+ * @irq: IRQ number
+ * @data: PCIe port information
+ *
+ * Return: IRQ_HANDLED on success and IRQ_NONE on failure
+ */
+static irqreturn_t xilinx_cpm_pcie_intr_handler(int irq, void *data)
+{
+ struct xilinx_cpm_pcie_port *port = data;
+ struct device *dev = port->dev;
+ u32 val, mask, status, bit;
+ unsigned long intr_val;
+
+ /* Read interrupt decode and mask registers */
+ val = pcie_read(port, XILINX_CPM_PCIE_REG_IDR);
+ mask = pcie_read(port, XILINX_CPM_PCIE_REG_IMR);
+
+ status = val & mask;
+ if (!status)
+ return IRQ_NONE;
+
+ if (status & XILINX_CPM_PCIE_INTR_LINK_DOWN)
+ dev_warn(dev, "Link Down\n");
+
+ if (status & XILINX_CPM_PCIE_INTR_HOT_RESET)
+ dev_info(dev, "Hot reset\n");
+
+ if (status & XILINX_CPM_PCIE_INTR_CFG_TIMEOUT)
+ dev_warn(dev, "ECAM access timeout\n");
+
+ if (status & XILINX_CPM_PCIE_INTR_CORRECTABLE) {
+ dev_warn(dev, "Correctable error message\n");
+ cpm_pcie_clear_err_interrupts(port);
+ }
+
+ if (status & XILINX_CPM_PCIE_INTR_NONFATAL) {
+ dev_warn(dev, "Non fatal error message\n");
+ cpm_pcie_clear_err_interrupts(port);
+ }
+
+ if (status & XILINX_CPM_PCIE_INTR_FATAL) {
+ dev_warn(dev, "Fatal error message\n");
+ cpm_pcie_clear_err_interrupts(port);
+ }
+
+ if (status & XILINX_CPM_PCIE_INTR_INTX) {
+ /* Handle INTx Interrupt */
+ intr_val = pcie_read(port, XILINX_CPM_PCIE_REG_IDRN);
+ intr_val = intr_val >> XILINX_CPM_PCIE_IDRN_SHIFT;
+
+ for_each_set_bit(bit, &intr_val, PCI_NUM_INTX)
+ generic_handle_irq(irq_find_mapping(port->leg_domain,
+ bit));
+ }
+
+ if (status & XILINX_CPM_PCIE_INTR_SLV_UNSUPP)
+ dev_warn(dev, "Slave unsupported request\n");
+
+ if (status & XILINX_CPM_PCIE_INTR_SLV_UNEXP)
+ dev_warn(dev, "Slave unexpected completion\n");
+
+ if (status & XILINX_CPM_PCIE_INTR_SLV_COMPL)
+ dev_warn(dev, "Slave completion timeout\n");
+
+ if (status & XILINX_CPM_PCIE_INTR_SLV_ERRP)
+ dev_warn(dev, "Slave Error Poison\n");
+
+ if (status & XILINX_CPM_PCIE_INTR_SLV_CMPABT)
+ dev_warn(dev, "Slave Completer Abort\n");
+
+ if (status & XILINX_CPM_PCIE_INTR_SLV_ILLBUR)
+ dev_warn(dev, "Slave Illegal Burst\n");
+
+ if (status & XILINX_CPM_PCIE_INTR_MST_DECERR)
+ dev_warn(dev, "Master decode error\n");
+
+ if (status & XILINX_CPM_PCIE_INTR_MST_SLVERR)
+ dev_warn(dev, "Master slave error\n");
+
+ if (status & XILINX_CPM_PCIE_INTR_CFG_PCIE_TIMEOUT)
+ dev_warn(dev, "PCIe ECAM access timeout\n");
+
+ if (status & XILINX_CPM_PCIE_INTR_CFG_ERR_POISON)
+ dev_warn(dev, "ECAM poisoned completion received\n");
+
+ if (status & XILINX_CPM_PCIE_INTR_PME_TO_ACK_RCVD)
+ dev_warn(dev, "PME_TO_ACK message received\n");
+
+ if (status & XILINX_CPM_PCIE_INTR_PM_PME_RCVD)
+ dev_warn(dev, "PM_PME message received\n");
+
+ if (status & XILINX_CPM_PCIE_INTR_SLV_PCIE_TIMEOUT)
+ dev_warn(dev, "PCIe completion timeout received\n");
+
+ /* Clear the Interrupt Decode register */
+ pcie_write(port, status, XILINX_CPM_PCIE_REG_IDR);
+
+ /*
+ * XILINX_CPM_PCIE_MISC_IR_STATUS register is mapped to
+ * CPM SLCR block.
+ */
+ val = readl(port->cpm_base + XILINX_CPM_PCIE_MISC_IR_STATUS);
+ if (val)
+ writel(val, port->cpm_base + XILINX_CPM_PCIE_MISC_IR_STATUS);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * xilinx_cpm_pcie_init_irq_domain - Initialize IRQ domain
+ * @port: PCIe port information
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_cpm_pcie_init_irq_domain(struct xilinx_cpm_pcie_port *port)
+{
+ struct device *dev = port->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *pcie_intc_node;
+
+ /* Setup INTx */
+ pcie_intc_node = of_get_next_child(node, NULL);
+ if (!pcie_intc_node) {
+ dev_err(dev, "No PCIe Intc node found\n");
+ return -EINVAL;
+ }
+
+ port->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
+ &intx_domain_ops,
+ port);
+ if (!port->leg_domain) {
+ dev_err(dev, "Failed to get a INTx IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * xilinx_cpm_pcie_init_port - Initialize hardware
+ * @port: PCIe port information
+ */
+static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie_port *port)
+{
+ if (cpm_pcie_link_up(port))
+ dev_info(port->dev, "PCIe Link is UP\n");
+ else
+ dev_info(port->dev, "PCIe Link is DOWN\n");
+
+ /* Disable all interrupts */
+ pcie_write(port, ~XILINX_CPM_PCIE_IDR_ALL_MASK,
+ XILINX_CPM_PCIE_REG_IMR);
+
+ /* Clear pending interrupts */
+ pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_IDR) &
+ XILINX_CPM_PCIE_IMR_ALL_MASK,
+ XILINX_CPM_PCIE_REG_IDR);
+
+ /* Enable all interrupts */
+ pcie_write(port, XILINX_CPM_PCIE_IMR_ALL_MASK,
+ XILINX_CPM_PCIE_REG_IMR);
+ pcie_write(port, XILINX_CPM_PCIE_IDRN_MASK,
+ XILINX_CPM_PCIE_REG_IDRN_MASK);
+
+ /*
+ * XILINX_CPM_PCIE_MISC_IR_ENABLE register is mapped to
+ * CPM SLCR block.
+ */
+ writel(XILINX_CPM_PCIE_MISC_IR_LOCAL,
+ port->cpm_base + XILINX_CPM_PCIE_MISC_IR_ENABLE);
+ /* Enable the Bridge enable bit */
+ pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_RPSC) |
+ XILINX_CPM_PCIE_REG_RPSC_BEN,
+ XILINX_CPM_PCIE_REG_RPSC);
+}
+
+static int xilinx_cpm_request_misc_irq(struct xilinx_cpm_pcie_port *port)
+{
+ struct device *dev = port->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int err;
+
+ port->irq_misc = platform_get_irq_byname(pdev, "misc");
+ if (port->irq_misc <= 0) {
+ dev_err(dev, "Unable to find misc IRQ line\n");
+ return port->irq_misc;
+ }
+ err = devm_request_irq(dev, port->irq_misc,
+ xilinx_cpm_pcie_intr_handler,
+ IRQF_SHARED | IRQF_NO_THREAD,
+ "xilinx-pcie", port);
+ if (err) {
+ dev_err(dev, "unable to request misc IRQ line %d\n",
+ port->irq_misc);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * xilinx_cpm_pcie_parse_dt - Parse Device tree
+ * @port: PCIe port information
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie_port *port)
+{
+ struct device *dev = port->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *res;
+ int err;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
+ port->reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(port->reg_base))
+ return PTR_ERR(port->reg_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "cpm_slcr");
+ port->cpm_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(port->cpm_base))
+ return PTR_ERR(port->cpm_base);
+
+ err = xilinx_cpm_request_misc_irq(port);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * xilinx_cpm_pcie_probe - Probe function
+ * @pdev: Platform device pointer
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_cpm_pcie_probe(struct platform_device *pdev)
+{
+ struct xilinx_cpm_pcie_port *port;
+ struct device *dev = &pdev->dev;
+ struct pci_bus *bus;
+ struct pci_bus *child;
+ struct pci_host_bridge *bridge;
+ int err;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
+ if (!bridge)
+ return -ENODEV;
+
+ port = pci_host_bridge_priv(bridge);
+
+ port->dev = dev;
+
+ err = xilinx_cpm_pcie_parse_dt(port);
+ if (err) {
+ dev_err(dev, "Parsing DT failed\n");
+ return err;
+ }
+
+ xilinx_cpm_pcie_init_port(port);
+
+ err = xilinx_cpm_pcie_init_irq_domain(port);
+ if (err) {
+ dev_err(dev, "Failed creating IRQ Domain\n");
+ return err;
+ }
+
+ err = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
+ &bridge->dma_ranges, NULL);
+ if (err) {
+ dev_err(dev, "Getting bridge resources failed\n");
+ return err;
+ }
+
+ bridge->dev.parent = dev;
+ bridge->sysdata = port;
+ bridge->busnr = port->root_busno;
+ bridge->ops = &xilinx_cpm_pcie_ops;
+ bridge->map_irq = of_irq_parse_and_map_pci;
+ bridge->swizzle_irq = pci_common_swizzle;
+
+ err = pci_scan_root_bus_bridge(bridge);
+ if (err)
+ return err;
+
+ bus = bridge->bus;
+
+ pci_assign_unassigned_bus_resources(bus);
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
+ pci_bus_add_devices(bus);
+ return 0;
+}
+
+static const struct of_device_id xilinx_cpm_pcie_of_match[] = {
+ { .compatible = "xlnx,versal-cpm-host-1.00", },
+ {}
+};
+
+static struct platform_driver xilinx_cpm_pcie_driver = {
+ .driver = {
+ .name = "xilinx-cpm-pcie",
+ .of_match_table = xilinx_cpm_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = xilinx_cpm_pcie_probe,
+};
+
+builtin_platform_driver(xilinx_cpm_pcie_driver);
--
2.7.4
Hi Bjorn,
Can you please let us know, if you have any further comments on this series ?
Regards,
Bharat
> -----Original Message-----
> From: Bharat Kumar Gogada <[email protected]>
> Sent: Thursday, January 30, 2020 9:43 PM
> To: [email protected]; [email protected]
> Cc: [email protected]; Ravikiran Gummaluri <[email protected]>;
> Bharat Kumar Gogada <[email protected]>
> Subject: [PATCH v5 2/2] PCI: xilinx-cpm: Add Versal CPM Root Port driver
>
> - Add support for Versal CPM as Root Port.
> - The Versal ACAP devices include CCIX-PCIe Module (CPM). The integrated
> block for CPM along with the integrated bridge can function
> as PCIe Root Port.
> - CPM Versal uses GICv3 ITS feature for achieving assigning MSI/MSI-X
> vectors and handling MSI/MSI-X interrupts.
> - Bridge error and legacy interrupts in Versal CPM are handled using
> Versal CPM specific MISC interrupt line.
>
> Changes v5:
> - Removed xilinx_cpm_pcie_valid_device function
>
> Signed-off-by: Bharat Kumar Gogada <[email protected]>
> ---
> drivers/pci/controller/Kconfig | 8 +
> drivers/pci/controller/Makefile | 1 +
> drivers/pci/controller/pcie-xilinx-cpm.c | 491
> +++++++++++++++++++++++++++++++
> 3 files changed, 500 insertions(+)
> create mode 100644 drivers/pci/controller/pcie-xilinx-cpm.c
>
> diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig index
> c77069c..362f4db 100644
> --- a/drivers/pci/controller/Kconfig
> +++ b/drivers/pci/controller/Kconfig
> @@ -81,6 +81,14 @@ config PCIE_XILINX
> Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
> Host Bridge driver.
>
> +config PCIE_XILINX_CPM
> + bool "Xilinx Versal CPM host bridge support"
> + depends on ARCH_ZYNQMP || COMPILE_TEST
> + help
> + Say 'Y' here if you want kernel support for the
> + Xilinx Versal CPM host bridge. The driver supports
> + MSI/MSI-X interrupts using GICv3 ITS feature.
> +
> config PCI_XGENE
> bool "X-Gene PCIe controller"
> depends on ARM64 || COMPILE_TEST
> diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
> index 3d4f597..6c936e9 100644
> --- a/drivers/pci/controller/Makefile
> +++ b/drivers/pci/controller/Makefile
> @@ -12,6 +12,7 @@ obj-$(CONFIG_PCI_HOST_COMMON) += pci-host-
> common.o
> obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o
> obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
> obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o
> +obj-$(CONFIG_PCIE_XILINX_CPM) += pcie-xilinx-cpm.o
> obj-$(CONFIG_PCI_V3_SEMI) += pci-v3-semi.o
> obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o
> obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o diff --git
> a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-
> cpm.c
> new file mode 100644
> index 0000000..4e4c0f0
> --- /dev/null
> +++ b/drivers/pci/controller/pcie-xilinx-cpm.c
> @@ -0,0 +1,491 @@
> +// SPDX-License-Identifier: GPL-2.0+
> +/*
> + * PCIe host controller driver for Xilinx Versal CPM DMA Bridge
> + *
> + * (C) Copyright 2019 - 2020, Xilinx, Inc.
> + */
> +
> +#include <linux/interrupt.h>
> +#include <linux/irq.h>
> +#include <linux/irqdomain.h>
> +#include <linux/kernel.h>
> +#include <linux/module.h>
> +#include <linux/of_address.h>
> +#include <linux/of_pci.h>
> +#include <linux/of_platform.h>
> +#include <linux/of_irq.h>
> +#include <linux/pci.h>
> +#include <linux/platform_device.h>
> +
> +#include "../pci.h"
> +
> +/* Register definitions */
> +#define XILINX_CPM_PCIE_REG_IDR 0x00000E10
> +#define XILINX_CPM_PCIE_REG_IMR 0x00000E14
> +#define XILINX_CPM_PCIE_REG_PSCR 0x00000E1C
> +#define XILINX_CPM_PCIE_REG_RPSC 0x00000E20
> +#define XILINX_CPM_PCIE_REG_RPEFR 0x00000E2C
> +#define XILINX_CPM_PCIE_REG_IDRN 0x00000E38
> +#define XILINX_CPM_PCIE_REG_IDRN_MASK 0x00000E3C
> +#define XILINX_CPM_PCIE_MISC_IR_STATUS 0x00000340
> +#define XILINX_CPM_PCIE_MISC_IR_ENABLE 0x00000348
> +#define XILINX_CPM_PCIE_MISC_IR_LOCAL BIT(1)
> +
> +/* Interrupt registers definitions */
> +#define XILINX_CPM_PCIE_INTR_LINK_DOWN BIT(0)
> +#define XILINX_CPM_PCIE_INTR_HOT_RESET BIT(3)
> +#define XILINX_CPM_PCIE_INTR_CFG_TIMEOUT BIT(8)
> +#define XILINX_CPM_PCIE_INTR_CORRECTABLE BIT(9)
> +#define XILINX_CPM_PCIE_INTR_NONFATAL BIT(10)
> +#define XILINX_CPM_PCIE_INTR_FATAL BIT(11)
> +#define XILINX_CPM_PCIE_INTR_INTX BIT(16)
> +#define XILINX_CPM_PCIE_INTR_MSI BIT(17)
> +#define XILINX_CPM_PCIE_INTR_SLV_UNSUPP BIT(20)
> +#define XILINX_CPM_PCIE_INTR_SLV_UNEXP BIT(21)
> +#define XILINX_CPM_PCIE_INTR_SLV_COMPL BIT(22)
> +#define XILINX_CPM_PCIE_INTR_SLV_ERRP BIT(23)
> +#define XILINX_CPM_PCIE_INTR_SLV_CMPABT BIT(24)
> +#define XILINX_CPM_PCIE_INTR_SLV_ILLBUR BIT(25)
> +#define XILINX_CPM_PCIE_INTR_MST_DECERR BIT(26)
> +#define XILINX_CPM_PCIE_INTR_MST_SLVERR BIT(27)
> +#define XILINX_CPM_PCIE_IMR_ALL_MASK 0x1FF39FF9
> +#define XILINX_CPM_PCIE_IDR_ALL_MASK 0xFFFFFFFF
> +#define XILINX_CPM_PCIE_IDRN_MASK GENMASK(19, 16)
> +#define XILINX_CPM_PCIE_INTR_CFG_PCIE_TIMEOUT BIT(4)
> +#define XILINX_CPM_PCIE_INTR_CFG_ERR_POISON BIT(12)
> +#define XILINX_CPM_PCIE_INTR_PME_TO_ACK_RCVD BIT(15)
> +#define XILINX_CPM_PCIE_INTR_PM_PME_RCVD BIT(17)
> +#define XILINX_CPM_PCIE_INTR_SLV_PCIE_TIMEOUT BIT(28)
> +#define XILINX_CPM_PCIE_IDRN_SHIFT 16
> +
> +/* Root Port Error FIFO Read Register definitions */
> +#define XILINX_CPM_PCIE_RPEFR_ERR_VALID BIT(18)
> +#define XILINX_CPM_PCIE_RPEFR_REQ_ID GENMASK(15, 0)
> +#define XILINX_CPM_PCIE_RPEFR_ALL_MASK 0xFFFFFFFF
> +
> +/* Root Port Status/control Register definitions */
> +#define XILINX_CPM_PCIE_REG_RPSC_BEN BIT(0)
> +
> +/* Phy Status/Control Register definitions */
> +#define XILINX_CPM_PCIE_REG_PSCR_LNKUP BIT(11)
> +
> +/* ECAM definitions */
> +#define ECAM_BUS_NUM_SHIFT 20
> +#define ECAM_DEV_NUM_SHIFT 12
> +
> +/**
> + * struct xilinx_cpm_pcie_port - PCIe port information
> + * @reg_base: Bridge Register Base
> + * @cpm_base: CPM System Level Control and Status Register(SLCR) Base
> + * @irq: Interrupt number
> + * @root_busno: Root Bus number
> + * @dev: Device pointer
> + * @leg_domain: Legacy IRQ domain pointer
> + * @irq_misc: Legacy and error interrupt number */ struct
> +xilinx_cpm_pcie_port {
> + void __iomem *reg_base;
> + void __iomem *cpm_base;
> + u32 irq;
> + u8 root_busno;
> + struct device *dev;
> + struct irq_domain *leg_domain;
> + int irq_misc;
> +};
> +
> +static inline u32 pcie_read(struct xilinx_cpm_pcie_port *port, u32 reg)
> +{
> + return readl(port->reg_base + reg);
> +}
> +
> +static inline void pcie_write(struct xilinx_cpm_pcie_port *port,
> + u32 val, u32 reg)
> +{
> + writel(val, port->reg_base + reg);
> +}
> +
> +static inline bool cpm_pcie_link_up(struct xilinx_cpm_pcie_port *port)
> +{
> + return (pcie_read(port, XILINX_CPM_PCIE_REG_PSCR) &
> + XILINX_CPM_PCIE_REG_PSCR_LNKUP) ? 1 : 0; }
> +
> +/**
> + * xilinx_cpm_pcie_clear_err_interrupts - Clear Error Interrupts
> + * @port: PCIe port information
> + */
> +static void cpm_pcie_clear_err_interrupts(struct xilinx_cpm_pcie_port
> +*port) {
> + unsigned long val = pcie_read(port, XILINX_CPM_PCIE_REG_RPEFR);
> +
> + if (val & XILINX_CPM_PCIE_RPEFR_ERR_VALID) {
> + dev_dbg(port->dev, "Requester ID %lu\n",
> + val & XILINX_CPM_PCIE_RPEFR_REQ_ID);
> + pcie_write(port, XILINX_CPM_PCIE_RPEFR_ALL_MASK,
> + XILINX_CPM_PCIE_REG_RPEFR);
> + }
> +}
> +
> +/**
> + * xilinx_cpm_pcie_map_bus - Get configuration base
> + * @bus: PCI Bus structure
> + * @devfn: Device/function
> + * @where: Offset from base
> + *
> + * Return: Base address of the configuration space needed to be
> + * accessed.
> + */
> +static void __iomem *xilinx_cpm_pcie_map_bus(struct pci_bus *bus,
> + unsigned int devfn, int where) {
> + struct xilinx_cpm_pcie_port *port = bus->sysdata;
> + int relbus;
> +
> + relbus = (bus->number << ECAM_BUS_NUM_SHIFT) |
> + (devfn << ECAM_DEV_NUM_SHIFT);
> +
> + return port->reg_base + relbus + where; }
> +
> +/* PCIe operations */
> +static struct pci_ops xilinx_cpm_pcie_ops = {
> + .map_bus = xilinx_cpm_pcie_map_bus,
> + .read = pci_generic_config_read,
> + .write = pci_generic_config_write,
> +};
> +
> +/**
> + * xilinx_cpm_pcie_intx_map - Set the handler for the INTx and mark IRQ
> +as valid
> + * @domain: IRQ domain
> + * @irq: Virtual IRQ number
> + * @hwirq: HW interrupt number
> + *
> + * Return: Always returns 0.
> + */
> +static int xilinx_cpm_pcie_intx_map(struct irq_domain *domain,
> + unsigned int irq, irq_hw_number_t hwirq) {
> + irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
> + irq_set_chip_data(irq, domain->host_data);
> + irq_set_status_flags(irq, IRQ_LEVEL);
> +
> + return 0;
> +}
> +
> +/* INTx IRQ Domain operations */
> +static const struct irq_domain_ops intx_domain_ops = {
> + .map = xilinx_cpm_pcie_intx_map,
> + .xlate = pci_irqd_intx_xlate,
> +};
> +
> +/**
> + * xilinx_cpm_pcie_intr_handler - Interrupt Service Handler
> + * @irq: IRQ number
> + * @data: PCIe port information
> + *
> + * Return: IRQ_HANDLED on success and IRQ_NONE on failure */ static
> +irqreturn_t xilinx_cpm_pcie_intr_handler(int irq, void *data) {
> + struct xilinx_cpm_pcie_port *port = data;
> + struct device *dev = port->dev;
> + u32 val, mask, status, bit;
> + unsigned long intr_val;
> +
> + /* Read interrupt decode and mask registers */
> + val = pcie_read(port, XILINX_CPM_PCIE_REG_IDR);
> + mask = pcie_read(port, XILINX_CPM_PCIE_REG_IMR);
> +
> + status = val & mask;
> + if (!status)
> + return IRQ_NONE;
> +
> + if (status & XILINX_CPM_PCIE_INTR_LINK_DOWN)
> + dev_warn(dev, "Link Down\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_HOT_RESET)
> + dev_info(dev, "Hot reset\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_CFG_TIMEOUT)
> + dev_warn(dev, "ECAM access timeout\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_CORRECTABLE) {
> + dev_warn(dev, "Correctable error message\n");
> + cpm_pcie_clear_err_interrupts(port);
> + }
> +
> + if (status & XILINX_CPM_PCIE_INTR_NONFATAL) {
> + dev_warn(dev, "Non fatal error message\n");
> + cpm_pcie_clear_err_interrupts(port);
> + }
> +
> + if (status & XILINX_CPM_PCIE_INTR_FATAL) {
> + dev_warn(dev, "Fatal error message\n");
> + cpm_pcie_clear_err_interrupts(port);
> + }
> +
> + if (status & XILINX_CPM_PCIE_INTR_INTX) {
> + /* Handle INTx Interrupt */
> + intr_val = pcie_read(port, XILINX_CPM_PCIE_REG_IDRN);
> + intr_val = intr_val >> XILINX_CPM_PCIE_IDRN_SHIFT;
> +
> + for_each_set_bit(bit, &intr_val, PCI_NUM_INTX)
> + generic_handle_irq(irq_find_mapping(port-
> >leg_domain,
> + bit));
> + }
> +
> + if (status & XILINX_CPM_PCIE_INTR_SLV_UNSUPP)
> + dev_warn(dev, "Slave unsupported request\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_SLV_UNEXP)
> + dev_warn(dev, "Slave unexpected completion\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_SLV_COMPL)
> + dev_warn(dev, "Slave completion timeout\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_SLV_ERRP)
> + dev_warn(dev, "Slave Error Poison\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_SLV_CMPABT)
> + dev_warn(dev, "Slave Completer Abort\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_SLV_ILLBUR)
> + dev_warn(dev, "Slave Illegal Burst\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_MST_DECERR)
> + dev_warn(dev, "Master decode error\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_MST_SLVERR)
> + dev_warn(dev, "Master slave error\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_CFG_PCIE_TIMEOUT)
> + dev_warn(dev, "PCIe ECAM access timeout\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_CFG_ERR_POISON)
> + dev_warn(dev, "ECAM poisoned completion received\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_PME_TO_ACK_RCVD)
> + dev_warn(dev, "PME_TO_ACK message received\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_PM_PME_RCVD)
> + dev_warn(dev, "PM_PME message received\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_SLV_PCIE_TIMEOUT)
> + dev_warn(dev, "PCIe completion timeout received\n");
> +
> + /* Clear the Interrupt Decode register */
> + pcie_write(port, status, XILINX_CPM_PCIE_REG_IDR);
> +
> + /*
> + * XILINX_CPM_PCIE_MISC_IR_STATUS register is mapped to
> + * CPM SLCR block.
> + */
> + val = readl(port->cpm_base + XILINX_CPM_PCIE_MISC_IR_STATUS);
> + if (val)
> + writel(val, port->cpm_base +
> XILINX_CPM_PCIE_MISC_IR_STATUS);
> +
> + return IRQ_HANDLED;
> +}
> +
> +/**
> + * xilinx_cpm_pcie_init_irq_domain - Initialize IRQ domain
> + * @port: PCIe port information
> + *
> + * Return: '0' on success and error value on failure */ static int
> +xilinx_cpm_pcie_init_irq_domain(struct xilinx_cpm_pcie_port *port) {
> + struct device *dev = port->dev;
> + struct device_node *node = dev->of_node;
> + struct device_node *pcie_intc_node;
> +
> + /* Setup INTx */
> + pcie_intc_node = of_get_next_child(node, NULL);
> + if (!pcie_intc_node) {
> + dev_err(dev, "No PCIe Intc node found\n");
> + return -EINVAL;
> + }
> +
> + port->leg_domain = irq_domain_add_linear(pcie_intc_node,
> PCI_NUM_INTX,
> + &intx_domain_ops,
> + port);
> + if (!port->leg_domain) {
> + dev_err(dev, "Failed to get a INTx IRQ domain\n");
> + return -ENOMEM;
> + }
> +
> + return 0;
> +}
> +
> +/**
> + * xilinx_cpm_pcie_init_port - Initialize hardware
> + * @port: PCIe port information
> + */
> +static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie_port
> +*port) {
> + if (cpm_pcie_link_up(port))
> + dev_info(port->dev, "PCIe Link is UP\n");
> + else
> + dev_info(port->dev, "PCIe Link is DOWN\n");
> +
> + /* Disable all interrupts */
> + pcie_write(port, ~XILINX_CPM_PCIE_IDR_ALL_MASK,
> + XILINX_CPM_PCIE_REG_IMR);
> +
> + /* Clear pending interrupts */
> + pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_IDR) &
> + XILINX_CPM_PCIE_IMR_ALL_MASK,
> + XILINX_CPM_PCIE_REG_IDR);
> +
> + /* Enable all interrupts */
> + pcie_write(port, XILINX_CPM_PCIE_IMR_ALL_MASK,
> + XILINX_CPM_PCIE_REG_IMR);
> + pcie_write(port, XILINX_CPM_PCIE_IDRN_MASK,
> + XILINX_CPM_PCIE_REG_IDRN_MASK);
> +
> + /*
> + * XILINX_CPM_PCIE_MISC_IR_ENABLE register is mapped to
> + * CPM SLCR block.
> + */
> + writel(XILINX_CPM_PCIE_MISC_IR_LOCAL,
> + port->cpm_base + XILINX_CPM_PCIE_MISC_IR_ENABLE);
> + /* Enable the Bridge enable bit */
> + pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_RPSC) |
> + XILINX_CPM_PCIE_REG_RPSC_BEN,
> + XILINX_CPM_PCIE_REG_RPSC);
> +}
> +
> +static int xilinx_cpm_request_misc_irq(struct xilinx_cpm_pcie_port
> +*port) {
> + struct device *dev = port->dev;
> + struct platform_device *pdev = to_platform_device(dev);
> + int err;
> +
> + port->irq_misc = platform_get_irq_byname(pdev, "misc");
> + if (port->irq_misc <= 0) {
> + dev_err(dev, "Unable to find misc IRQ line\n");
> + return port->irq_misc;
> + }
> + err = devm_request_irq(dev, port->irq_misc,
> + xilinx_cpm_pcie_intr_handler,
> + IRQF_SHARED | IRQF_NO_THREAD,
> + "xilinx-pcie", port);
> + if (err) {
> + dev_err(dev, "unable to request misc IRQ line %d\n",
> + port->irq_misc);
> + return err;
> + }
> +
> + return 0;
> +}
> +
> +/**
> + * xilinx_cpm_pcie_parse_dt - Parse Device tree
> + * @port: PCIe port information
> + *
> + * Return: '0' on success and error value on failure */ static int
> +xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie_port *port) {
> + struct device *dev = port->dev;
> + struct platform_device *pdev = to_platform_device(dev);
> + struct resource *res;
> + int err;
> +
> + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
> + port->reg_base = devm_ioremap_resource(dev, res);
> + if (IS_ERR(port->reg_base))
> + return PTR_ERR(port->reg_base);
> +
> + res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
> + "cpm_slcr");
> + port->cpm_base = devm_ioremap_resource(dev, res);
> + if (IS_ERR(port->cpm_base))
> + return PTR_ERR(port->cpm_base);
> +
> + err = xilinx_cpm_request_misc_irq(port);
> + if (err)
> + return err;
> +
> + return 0;
> +}
> +
> +/**
> + * xilinx_cpm_pcie_probe - Probe function
> + * @pdev: Platform device pointer
> + *
> + * Return: '0' on success and error value on failure */ static int
> +xilinx_cpm_pcie_probe(struct platform_device *pdev) {
> + struct xilinx_cpm_pcie_port *port;
> + struct device *dev = &pdev->dev;
> + struct pci_bus *bus;
> + struct pci_bus *child;
> + struct pci_host_bridge *bridge;
> + int err;
> +
> + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
> + if (!bridge)
> + return -ENODEV;
> +
> + port = pci_host_bridge_priv(bridge);
> +
> + port->dev = dev;
> +
> + err = xilinx_cpm_pcie_parse_dt(port);
> + if (err) {
> + dev_err(dev, "Parsing DT failed\n");
> + return err;
> + }
> +
> + xilinx_cpm_pcie_init_port(port);
> +
> + err = xilinx_cpm_pcie_init_irq_domain(port);
> + if (err) {
> + dev_err(dev, "Failed creating IRQ Domain\n");
> + return err;
> + }
> +
> + err = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
> + &bridge->dma_ranges, NULL);
> + if (err) {
> + dev_err(dev, "Getting bridge resources failed\n");
> + return err;
> + }
> +
> + bridge->dev.parent = dev;
> + bridge->sysdata = port;
> + bridge->busnr = port->root_busno;
> + bridge->ops = &xilinx_cpm_pcie_ops;
> + bridge->map_irq = of_irq_parse_and_map_pci;
> + bridge->swizzle_irq = pci_common_swizzle;
> +
> + err = pci_scan_root_bus_bridge(bridge);
> + if (err)
> + return err;
> +
> + bus = bridge->bus;
> +
> + pci_assign_unassigned_bus_resources(bus);
> + list_for_each_entry(child, &bus->children, node)
> + pcie_bus_configure_settings(child);
> + pci_bus_add_devices(bus);
> + return 0;
> +}
> +
> +static const struct of_device_id xilinx_cpm_pcie_of_match[] = {
> + { .compatible = "xlnx,versal-cpm-host-1.00", },
> + {}
> +};
> +
> +static struct platform_driver xilinx_cpm_pcie_driver = {
> + .driver = {
> + .name = "xilinx-cpm-pcie",
> + .of_match_table = xilinx_cpm_pcie_of_match,
> + .suppress_bind_attrs = true,
> + },
> + .probe = xilinx_cpm_pcie_probe,
> +};
> +
> +builtin_platform_driver(xilinx_cpm_pcie_driver);
> --
> 2.7.4
Lorenzo and Andrew take care of drivers/pci/controllers/*. I'm sure
this is on their radar already but I cc'd them for good measure.
On Thu, Jan 30, 2020 at 09:42:51PM +0530, Bharat Kumar Gogada wrote:
> - Add support for Versal CPM as Root Port.
> - The Versal ACAP devices include CCIX-PCIe Module (CPM). The integrated
> block for CPM along with the integrated bridge can function
> as PCIe Root Port.
> - CPM Versal uses GICv3 ITS feature for achieving assigning MSI/MSI-X
> vectors and handling MSI/MSI-X interrupts.
> - Bridge error and legacy interrupts in Versal CPM are handled using
> Versal CPM specific MISC interrupt line.
>
> Changes v5:
> - Removed xilinx_cpm_pcie_valid_device function
I don't include this sort of history in the commit log because it's
not really of enduring interest. Lorenzo will probably take it out
for you, so no need to repost just for that.
> Signed-off-by: Bharat Kumar Gogada <[email protected]>
> ---
> drivers/pci/controller/Kconfig | 8 +
> drivers/pci/controller/Makefile | 1 +
> drivers/pci/controller/pcie-xilinx-cpm.c | 491 +++++++++++++++++++++++++++++++
> 3 files changed, 500 insertions(+)
> create mode 100644 drivers/pci/controller/pcie-xilinx-cpm.c
>
> diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
> index c77069c..362f4db 100644
> --- a/drivers/pci/controller/Kconfig
> +++ b/drivers/pci/controller/Kconfig
> @@ -81,6 +81,14 @@ config PCIE_XILINX
> Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
> Host Bridge driver.
>
> +config PCIE_XILINX_CPM
I would consider making this PCIE_XILINX_CPM_HOST because many of
these chips can be either a host or an endpoint, and if you add _HOST
now there is room for a future endpoint driver. E.g., see
CONFIG_PCI_DRA7XX_HOST and CONFIG_PCI_DRA7XX_EP.
> + bool "Xilinx Versal CPM host bridge support"
> + depends on ARCH_ZYNQMP || COMPILE_TEST
> + help
> + Say 'Y' here if you want kernel support for the
> + Xilinx Versal CPM host bridge. The driver supports
> + MSI/MSI-X interrupts using GICv3 ITS feature.
> +
> config PCI_XGENE
> bool "X-Gene PCIe controller"
> depends on ARM64 || COMPILE_TEST
> diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
> index 3d4f597..6c936e9 100644
> --- a/drivers/pci/controller/Makefile
> +++ b/drivers/pci/controller/Makefile
> @@ -12,6 +12,7 @@ obj-$(CONFIG_PCI_HOST_COMMON) += pci-host-common.o
> obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o
> obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
> obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o
> +obj-$(CONFIG_PCIE_XILINX_CPM) += pcie-xilinx-cpm.o
> obj-$(CONFIG_PCI_V3_SEMI) += pci-v3-semi.o
> obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o
> obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o
> diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c
> new file mode 100644
> index 0000000..4e4c0f0
> --- /dev/null
> +++ b/drivers/pci/controller/pcie-xilinx-cpm.c
> @@ -0,0 +1,491 @@
> +// SPDX-License-Identifier: GPL-2.0+
> +/*
> + * PCIe host controller driver for Xilinx Versal CPM DMA Bridge
> + *
> + * (C) Copyright 2019 - 2020, Xilinx, Inc.
> + */
> +
> +#include <linux/interrupt.h>
> +#include <linux/irq.h>
> +#include <linux/irqdomain.h>
> +#include <linux/kernel.h>
> +#include <linux/module.h>
> +#include <linux/of_address.h>
> +#include <linux/of_pci.h>
> +#include <linux/of_platform.h>
> +#include <linux/of_irq.h>
> +#include <linux/pci.h>
> +#include <linux/platform_device.h>
> +
> +#include "../pci.h"
> +
> +/* Register definitions */
> +#define XILINX_CPM_PCIE_REG_IDR 0x00000E10
> +#define XILINX_CPM_PCIE_REG_IMR 0x00000E14
> +#define XILINX_CPM_PCIE_REG_PSCR 0x00000E1C
> +#define XILINX_CPM_PCIE_REG_RPSC 0x00000E20
> +#define XILINX_CPM_PCIE_REG_RPEFR 0x00000E2C
> +#define XILINX_CPM_PCIE_REG_IDRN 0x00000E38
> +#define XILINX_CPM_PCIE_REG_IDRN_MASK 0x00000E3C
> +#define XILINX_CPM_PCIE_MISC_IR_STATUS 0x00000340
> +#define XILINX_CPM_PCIE_MISC_IR_ENABLE 0x00000348
> +#define XILINX_CPM_PCIE_MISC_IR_LOCAL BIT(1)
> +
> +/* Interrupt registers definitions */
> +#define XILINX_CPM_PCIE_INTR_LINK_DOWN BIT(0)
> +#define XILINX_CPM_PCIE_INTR_HOT_RESET BIT(3)
> +#define XILINX_CPM_PCIE_INTR_CFG_TIMEOUT BIT(8)
> +#define XILINX_CPM_PCIE_INTR_CORRECTABLE BIT(9)
> +#define XILINX_CPM_PCIE_INTR_NONFATAL BIT(10)
> +#define XILINX_CPM_PCIE_INTR_FATAL BIT(11)
> +#define XILINX_CPM_PCIE_INTR_INTX BIT(16)
> +#define XILINX_CPM_PCIE_INTR_MSI BIT(17)
> +#define XILINX_CPM_PCIE_INTR_SLV_UNSUPP BIT(20)
> +#define XILINX_CPM_PCIE_INTR_SLV_UNEXP BIT(21)
> +#define XILINX_CPM_PCIE_INTR_SLV_COMPL BIT(22)
> +#define XILINX_CPM_PCIE_INTR_SLV_ERRP BIT(23)
> +#define XILINX_CPM_PCIE_INTR_SLV_CMPABT BIT(24)
> +#define XILINX_CPM_PCIE_INTR_SLV_ILLBUR BIT(25)
> +#define XILINX_CPM_PCIE_INTR_MST_DECERR BIT(26)
> +#define XILINX_CPM_PCIE_INTR_MST_SLVERR BIT(27)
> +#define XILINX_CPM_PCIE_IMR_ALL_MASK 0x1FF39FF9
> +#define XILINX_CPM_PCIE_IDR_ALL_MASK 0xFFFFFFFF
> +#define XILINX_CPM_PCIE_IDRN_MASK GENMASK(19, 16)
> +#define XILINX_CPM_PCIE_INTR_CFG_PCIE_TIMEOUT BIT(4)
> +#define XILINX_CPM_PCIE_INTR_CFG_ERR_POISON BIT(12)
> +#define XILINX_CPM_PCIE_INTR_PME_TO_ACK_RCVD BIT(15)
> +#define XILINX_CPM_PCIE_INTR_PM_PME_RCVD BIT(17)
> +#define XILINX_CPM_PCIE_INTR_SLV_PCIE_TIMEOUT BIT(28)
> +#define XILINX_CPM_PCIE_IDRN_SHIFT 16
> +
> +/* Root Port Error FIFO Read Register definitions */
> +#define XILINX_CPM_PCIE_RPEFR_ERR_VALID BIT(18)
> +#define XILINX_CPM_PCIE_RPEFR_REQ_ID GENMASK(15, 0)
> +#define XILINX_CPM_PCIE_RPEFR_ALL_MASK 0xFFFFFFFF
> +
> +/* Root Port Status/control Register definitions */
> +#define XILINX_CPM_PCIE_REG_RPSC_BEN BIT(0)
> +
> +/* Phy Status/Control Register definitions */
> +#define XILINX_CPM_PCIE_REG_PSCR_LNKUP BIT(11)
> +
> +/* ECAM definitions */
> +#define ECAM_BUS_NUM_SHIFT 20
> +#define ECAM_DEV_NUM_SHIFT 12
> +
> +/**
> + * struct xilinx_cpm_pcie_port - PCIe port information
> + * @reg_base: Bridge Register Base
> + * @cpm_base: CPM System Level Control and Status Register(SLCR) Base
> + * @irq: Interrupt number
> + * @root_busno: Root Bus number
> + * @dev: Device pointer
> + * @leg_domain: Legacy IRQ domain pointer
> + * @irq_misc: Legacy and error interrupt number
> + */
> +struct xilinx_cpm_pcie_port {
> + void __iomem *reg_base;
> + void __iomem *cpm_base;
> + u32 irq;
> + u8 root_busno;
> + struct device *dev;
> + struct irq_domain *leg_domain;
> + int irq_misc;
> +};
> +
> +static inline u32 pcie_read(struct xilinx_cpm_pcie_port *port, u32 reg)
> +{
> + return readl(port->reg_base + reg);
> +}
> +
> +static inline void pcie_write(struct xilinx_cpm_pcie_port *port,
> + u32 val, u32 reg)
> +{
> + writel(val, port->reg_base + reg);
> +}
> +
> +static inline bool cpm_pcie_link_up(struct xilinx_cpm_pcie_port *port)
> +{
> + return (pcie_read(port, XILINX_CPM_PCIE_REG_PSCR) &
> + XILINX_CPM_PCIE_REG_PSCR_LNKUP) ? 1 : 0;
> +}
> +
> +/**
> + * xilinx_cpm_pcie_clear_err_interrupts - Clear Error Interrupts
> + * @port: PCIe port information
> + */
> +static void cpm_pcie_clear_err_interrupts(struct xilinx_cpm_pcie_port *port)
> +{
> + unsigned long val = pcie_read(port, XILINX_CPM_PCIE_REG_RPEFR);
> +
> + if (val & XILINX_CPM_PCIE_RPEFR_ERR_VALID) {
> + dev_dbg(port->dev, "Requester ID %lu\n",
> + val & XILINX_CPM_PCIE_RPEFR_REQ_ID);
> + pcie_write(port, XILINX_CPM_PCIE_RPEFR_ALL_MASK,
> + XILINX_CPM_PCIE_REG_RPEFR);
> + }
> +}
> +
> +/**
> + * xilinx_cpm_pcie_map_bus - Get configuration base
> + * @bus: PCI Bus structure
> + * @devfn: Device/function
> + * @where: Offset from base
> + *
> + * Return: Base address of the configuration space needed to be
> + * accessed.
> + */
> +static void __iomem *xilinx_cpm_pcie_map_bus(struct pci_bus *bus,
> + unsigned int devfn, int where)
> +{
> + struct xilinx_cpm_pcie_port *port = bus->sysdata;
> + int relbus;
> +
> + relbus = (bus->number << ECAM_BUS_NUM_SHIFT) |
> + (devfn << ECAM_DEV_NUM_SHIFT);
> +
> + return port->reg_base + relbus + where;
I *think* this is exactly pci_ecam_map_bus(), so maybe you could just
use pci_generic_ecam_ops instead of defining xilinx_cpm_pcie_ops? If
so, congratulations, you've achieved what seems to be almost
impossible.
> +}
> +
> +/* PCIe operations */
> +static struct pci_ops xilinx_cpm_pcie_ops = {
> + .map_bus = xilinx_cpm_pcie_map_bus,
> + .read = pci_generic_config_read,
> + .write = pci_generic_config_write,
> +};
> +
> +/**
> + * xilinx_cpm_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid
> + * @domain: IRQ domain
> + * @irq: Virtual IRQ number
> + * @hwirq: HW interrupt number
> + *
> + * Return: Always returns 0.
> + */
> +static int xilinx_cpm_pcie_intx_map(struct irq_domain *domain,
> + unsigned int irq, irq_hw_number_t hwirq)
> +{
> + irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
> + irq_set_chip_data(irq, domain->host_data);
> + irq_set_status_flags(irq, IRQ_LEVEL);
> +
> + return 0;
> +}
> +
> +/* INTx IRQ Domain operations */
> +static const struct irq_domain_ops intx_domain_ops = {
> + .map = xilinx_cpm_pcie_intx_map,
> + .xlate = pci_irqd_intx_xlate,
> +};
> +
> +/**
> + * xilinx_cpm_pcie_intr_handler - Interrupt Service Handler
> + * @irq: IRQ number
> + * @data: PCIe port information
> + *
> + * Return: IRQ_HANDLED on success and IRQ_NONE on failure
> + */
> +static irqreturn_t xilinx_cpm_pcie_intr_handler(int irq, void *data)
> +{
> + struct xilinx_cpm_pcie_port *port = data;
> + struct device *dev = port->dev;
> + u32 val, mask, status, bit;
> + unsigned long intr_val;
> +
> + /* Read interrupt decode and mask registers */
> + val = pcie_read(port, XILINX_CPM_PCIE_REG_IDR);
> + mask = pcie_read(port, XILINX_CPM_PCIE_REG_IMR);
> +
> + status = val & mask;
> + if (!status)
> + return IRQ_NONE;
> +
> + if (status & XILINX_CPM_PCIE_INTR_LINK_DOWN)
> + dev_warn(dev, "Link Down\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_HOT_RESET)
> + dev_info(dev, "Hot reset\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_CFG_TIMEOUT)
> + dev_warn(dev, "ECAM access timeout\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_CORRECTABLE) {
> + dev_warn(dev, "Correctable error message\n");
> + cpm_pcie_clear_err_interrupts(port);
> + }
> +
> + if (status & XILINX_CPM_PCIE_INTR_NONFATAL) {
> + dev_warn(dev, "Non fatal error message\n");
> + cpm_pcie_clear_err_interrupts(port);
> + }
> +
> + if (status & XILINX_CPM_PCIE_INTR_FATAL) {
> + dev_warn(dev, "Fatal error message\n");
> + cpm_pcie_clear_err_interrupts(port);
> + }
> +
> + if (status & XILINX_CPM_PCIE_INTR_INTX) {
> + /* Handle INTx Interrupt */
> + intr_val = pcie_read(port, XILINX_CPM_PCIE_REG_IDRN);
> + intr_val = intr_val >> XILINX_CPM_PCIE_IDRN_SHIFT;
> +
> + for_each_set_bit(bit, &intr_val, PCI_NUM_INTX)
> + generic_handle_irq(irq_find_mapping(port->leg_domain,
> + bit));
> + }
> +
> + if (status & XILINX_CPM_PCIE_INTR_SLV_UNSUPP)
> + dev_warn(dev, "Slave unsupported request\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_SLV_UNEXP)
> + dev_warn(dev, "Slave unexpected completion\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_SLV_COMPL)
> + dev_warn(dev, "Slave completion timeout\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_SLV_ERRP)
> + dev_warn(dev, "Slave Error Poison\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_SLV_CMPABT)
> + dev_warn(dev, "Slave Completer Abort\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_SLV_ILLBUR)
> + dev_warn(dev, "Slave Illegal Burst\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_MST_DECERR)
> + dev_warn(dev, "Master decode error\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_MST_SLVERR)
> + dev_warn(dev, "Master slave error\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_CFG_PCIE_TIMEOUT)
> + dev_warn(dev, "PCIe ECAM access timeout\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_CFG_ERR_POISON)
> + dev_warn(dev, "ECAM poisoned completion received\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_PME_TO_ACK_RCVD)
> + dev_warn(dev, "PME_TO_ACK message received\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_PM_PME_RCVD)
> + dev_warn(dev, "PM_PME message received\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_SLV_PCIE_TIMEOUT)
> + dev_warn(dev, "PCIe completion timeout received\n");
> +
> + /* Clear the Interrupt Decode register */
> + pcie_write(port, status, XILINX_CPM_PCIE_REG_IDR);
> +
> + /*
> + * XILINX_CPM_PCIE_MISC_IR_STATUS register is mapped to
> + * CPM SLCR block.
> + */
> + val = readl(port->cpm_base + XILINX_CPM_PCIE_MISC_IR_STATUS);
> + if (val)
> + writel(val, port->cpm_base + XILINX_CPM_PCIE_MISC_IR_STATUS);
> +
> + return IRQ_HANDLED;
> +}
> +
> +/**
> + * xilinx_cpm_pcie_init_irq_domain - Initialize IRQ domain
> + * @port: PCIe port information
> + *
> + * Return: '0' on success and error value on failure
> + */
> +static int xilinx_cpm_pcie_init_irq_domain(struct xilinx_cpm_pcie_port *port)
> +{
> + struct device *dev = port->dev;
> + struct device_node *node = dev->of_node;
> + struct device_node *pcie_intc_node;
> +
> + /* Setup INTx */
> + pcie_intc_node = of_get_next_child(node, NULL);
> + if (!pcie_intc_node) {
> + dev_err(dev, "No PCIe Intc node found\n");
> + return -EINVAL;
> + }
> +
> + port->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
> + &intx_domain_ops,
> + port);
> + if (!port->leg_domain) {
> + dev_err(dev, "Failed to get a INTx IRQ domain\n");
> + return -ENOMEM;
> + }
> +
> + return 0;
> +}
> +
> +/**
> + * xilinx_cpm_pcie_init_port - Initialize hardware
> + * @port: PCIe port information
> + */
> +static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie_port *port)
> +{
> + if (cpm_pcie_link_up(port))
> + dev_info(port->dev, "PCIe Link is UP\n");
> + else
> + dev_info(port->dev, "PCIe Link is DOWN\n");
> +
> + /* Disable all interrupts */
> + pcie_write(port, ~XILINX_CPM_PCIE_IDR_ALL_MASK,
> + XILINX_CPM_PCIE_REG_IMR);
> +
> + /* Clear pending interrupts */
> + pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_IDR) &
> + XILINX_CPM_PCIE_IMR_ALL_MASK,
> + XILINX_CPM_PCIE_REG_IDR);
> +
> + /* Enable all interrupts */
> + pcie_write(port, XILINX_CPM_PCIE_IMR_ALL_MASK,
> + XILINX_CPM_PCIE_REG_IMR);
> + pcie_write(port, XILINX_CPM_PCIE_IDRN_MASK,
> + XILINX_CPM_PCIE_REG_IDRN_MASK);
> +
> + /*
> + * XILINX_CPM_PCIE_MISC_IR_ENABLE register is mapped to
> + * CPM SLCR block.
> + */
> + writel(XILINX_CPM_PCIE_MISC_IR_LOCAL,
> + port->cpm_base + XILINX_CPM_PCIE_MISC_IR_ENABLE);
> + /* Enable the Bridge enable bit */
> + pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_RPSC) |
> + XILINX_CPM_PCIE_REG_RPSC_BEN,
> + XILINX_CPM_PCIE_REG_RPSC);
> +}
> +
> +static int xilinx_cpm_request_misc_irq(struct xilinx_cpm_pcie_port *port)
> +{
> + struct device *dev = port->dev;
> + struct platform_device *pdev = to_platform_device(dev);
> + int err;
> +
> + port->irq_misc = platform_get_irq_byname(pdev, "misc");
> + if (port->irq_misc <= 0) {
> + dev_err(dev, "Unable to find misc IRQ line\n");
> + return port->irq_misc;
> + }
> + err = devm_request_irq(dev, port->irq_misc,
> + xilinx_cpm_pcie_intr_handler,
> + IRQF_SHARED | IRQF_NO_THREAD,
> + "xilinx-pcie", port);
> + if (err) {
> + dev_err(dev, "unable to request misc IRQ line %d\n",
> + port->irq_misc);
> + return err;
> + }
> +
> + return 0;
> +}
> +
> +/**
> + * xilinx_cpm_pcie_parse_dt - Parse Device tree
> + * @port: PCIe port information
> + *
> + * Return: '0' on success and error value on failure
> + */
> +static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie_port *port)
> +{
> + struct device *dev = port->dev;
> + struct platform_device *pdev = to_platform_device(dev);
> + struct resource *res;
> + int err;
> +
> + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
> + port->reg_base = devm_ioremap_resource(dev, res);
> + if (IS_ERR(port->reg_base))
> + return PTR_ERR(port->reg_base);
> +
> + res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
> + "cpm_slcr");
> + port->cpm_base = devm_ioremap_resource(dev, res);
> + if (IS_ERR(port->cpm_base))
> + return PTR_ERR(port->cpm_base);
> +
> + err = xilinx_cpm_request_misc_irq(port);
> + if (err)
> + return err;
> +
> + return 0;
> +}
> +
> +/**
> + * xilinx_cpm_pcie_probe - Probe function
> + * @pdev: Platform device pointer
> + *
> + * Return: '0' on success and error value on failure
> + */
> +static int xilinx_cpm_pcie_probe(struct platform_device *pdev)
> +{
> + struct xilinx_cpm_pcie_port *port;
> + struct device *dev = &pdev->dev;
> + struct pci_bus *bus;
> + struct pci_bus *child;
> + struct pci_host_bridge *bridge;
> + int err;
> +
> + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
> + if (!bridge)
> + return -ENODEV;
> +
> + port = pci_host_bridge_priv(bridge);
> +
> + port->dev = dev;
> +
> + err = xilinx_cpm_pcie_parse_dt(port);
> + if (err) {
> + dev_err(dev, "Parsing DT failed\n");
> + return err;
> + }
> +
> + xilinx_cpm_pcie_init_port(port);
> +
> + err = xilinx_cpm_pcie_init_irq_domain(port);
> + if (err) {
> + dev_err(dev, "Failed creating IRQ Domain\n");
> + return err;
> + }
> +
> + err = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
> + &bridge->dma_ranges, NULL);
> + if (err) {
> + dev_err(dev, "Getting bridge resources failed\n");
> + return err;
> + }
> +
> + bridge->dev.parent = dev;
> + bridge->sysdata = port;
> + bridge->busnr = port->root_busno;
> + bridge->ops = &xilinx_cpm_pcie_ops;
> + bridge->map_irq = of_irq_parse_and_map_pci;
> + bridge->swizzle_irq = pci_common_swizzle;
> +
> + err = pci_scan_root_bus_bridge(bridge);
> + if (err)
> + return err;
> +
> + bus = bridge->bus;
> +
> + pci_assign_unassigned_bus_resources(bus);
> + list_for_each_entry(child, &bus->children, node)
> + pcie_bus_configure_settings(child);
> + pci_bus_add_devices(bus);
> + return 0;
> +}
> +
> +static const struct of_device_id xilinx_cpm_pcie_of_match[] = {
> + { .compatible = "xlnx,versal-cpm-host-1.00", },
> + {}
> +};
> +
> +static struct platform_driver xilinx_cpm_pcie_driver = {
> + .driver = {
> + .name = "xilinx-cpm-pcie",
> + .of_match_table = xilinx_cpm_pcie_of_match,
> + .suppress_bind_attrs = true,
> + },
> + .probe = xilinx_cpm_pcie_probe,
> +};
> +
> +builtin_platform_driver(xilinx_cpm_pcie_driver);
> --
> 2.7.4
>
Thanks Bjorn.
Hi Lorenzo and Andrew, please let me know if you have any further queries.
> -----Original Message-----
> From: Bjorn Helgaas <[email protected]>
> Sent: Tuesday, February 18, 2020 5:26 AM
> To: Bharat Kumar Gogada <[email protected]>
> Cc: [email protected]; [email protected]; Ravikiran
> Gummaluri <[email protected]>; [email protected]; Andrew Murray
> <[email protected]>
> Subject: Re: [PATCH v5 2/2] PCI: xilinx-cpm: Add Versal CPM Root Port driver
>
> Lorenzo and Andrew take care of drivers/pci/controllers/*. I'm sure this is on
> their radar already but I cc'd them for good measure.
>
> On Thu, Jan 30, 2020 at 09:42:51PM +0530, Bharat Kumar Gogada wrote:
> > - Add support for Versal CPM as Root Port.
> > - The Versal ACAP devices include CCIX-PCIe Module (CPM). The integrated
> > block for CPM along with the integrated bridge can function
> > as PCIe Root Port.
> > - CPM Versal uses GICv3 ITS feature for achieving assigning MSI/MSI-X
> > vectors and handling MSI/MSI-X interrupts.
> > - Bridge error and legacy interrupts in Versal CPM are handled using
> > Versal CPM specific MISC interrupt line.
> >
> > Changes v5:
> > - Removed xilinx_cpm_pcie_valid_device function
>
> I don't include this sort of history in the commit log because it's not really of
> enduring interest. Lorenzo will probably take it out for you, so no need to
> repost just for that.
>
> > Signed-off-by: Bharat Kumar Gogada <[email protected]>
> > ---
> > drivers/pci/controller/Kconfig | 8 +
> > drivers/pci/controller/Makefile | 1 +
> > drivers/pci/controller/pcie-xilinx-cpm.c | 491
> > +++++++++++++++++++++++++++++++
> > 3 files changed, 500 insertions(+)
> > create mode 100644 drivers/pci/controller/pcie-xilinx-cpm.c
> >
> > diff --git a/drivers/pci/controller/Kconfig
> > b/drivers/pci/controller/Kconfig index c77069c..362f4db 100644
> > --- a/drivers/pci/controller/Kconfig
> > +++ b/drivers/pci/controller/Kconfig
> > @@ -81,6 +81,14 @@ config PCIE_XILINX
> > Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
> > Host Bridge driver.
> >
> > +config PCIE_XILINX_CPM
>
> I would consider making this PCIE_XILINX_CPM_HOST because many of these
> chips can be either a host or an endpoint, and if you add _HOST now there is
> room for a future endpoint driver. E.g., see CONFIG_PCI_DRA7XX_HOST and
> CONFIG_PCI_DRA7XX_EP.
>
> > + bool "Xilinx Versal CPM host bridge support"
> > + depends on ARCH_ZYNQMP || COMPILE_TEST
> > + help
> > + Say 'Y' here if you want kernel support for the
> > + Xilinx Versal CPM host bridge. The driver supports
> > + MSI/MSI-X interrupts using GICv3 ITS feature.
> > +
> > config PCI_XGENE
> > bool "X-Gene PCIe controller"
> > depends on ARM64 || COMPILE_TEST
> > diff --git a/drivers/pci/controller/Makefile
> > b/drivers/pci/controller/Makefile index 3d4f597..6c936e9 100644
> > --- a/drivers/pci/controller/Makefile
> > +++ b/drivers/pci/controller/Makefile
> > @@ -12,6 +12,7 @@ obj-$(CONFIG_PCI_HOST_COMMON) += pci-host-
> common.o
> > obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o
> > obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
> > obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o
> > +obj-$(CONFIG_PCIE_XILINX_CPM) += pcie-xilinx-cpm.o
> > obj-$(CONFIG_PCI_V3_SEMI) += pci-v3-semi.o
> > obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o
> > obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o diff --git
> > a/drivers/pci/controller/pcie-xilinx-cpm.c
> > b/drivers/pci/controller/pcie-xilinx-cpm.c
> > new file mode 100644
> > index 0000000..4e4c0f0
> > --- /dev/null
> > +++ b/drivers/pci/controller/pcie-xilinx-cpm.c
> > @@ -0,0 +1,491 @@
> > +// SPDX-License-Identifier: GPL-2.0+
> > +/*
> > + * PCIe host controller driver for Xilinx Versal CPM DMA Bridge
> > + *
> > + * (C) Copyright 2019 - 2020, Xilinx, Inc.
> > + */
> > +
> > +#include <linux/interrupt.h>
> > +#include <linux/irq.h>
> > +#include <linux/irqdomain.h>
> > +#include <linux/kernel.h>
> > +#include <linux/module.h>
> > +#include <linux/of_address.h>
> > +#include <linux/of_pci.h>
> > +#include <linux/of_platform.h>
> > +#include <linux/of_irq.h>
> > +#include <linux/pci.h>
> > +#include <linux/platform_device.h>
> > +
> > +#include "../pci.h"
> > +
> > +/* Register definitions */
> > +#define XILINX_CPM_PCIE_REG_IDR 0x00000E10
> > +#define XILINX_CPM_PCIE_REG_IMR 0x00000E14
> > +#define XILINX_CPM_PCIE_REG_PSCR 0x00000E1C
> > +#define XILINX_CPM_PCIE_REG_RPSC 0x00000E20
> > +#define XILINX_CPM_PCIE_REG_RPEFR 0x00000E2C
> > +#define XILINX_CPM_PCIE_REG_IDRN 0x00000E38
> > +#define XILINX_CPM_PCIE_REG_IDRN_MASK 0x00000E3C
> > +#define XILINX_CPM_PCIE_MISC_IR_STATUS 0x00000340
> > +#define XILINX_CPM_PCIE_MISC_IR_ENABLE 0x00000348
> > +#define XILINX_CPM_PCIE_MISC_IR_LOCAL BIT(1)
> > +
> > +/* Interrupt registers definitions */
> > +#define XILINX_CPM_PCIE_INTR_LINK_DOWN BIT(0)
> > +#define XILINX_CPM_PCIE_INTR_HOT_RESET BIT(3)
> > +#define XILINX_CPM_PCIE_INTR_CFG_TIMEOUT BIT(8)
> > +#define XILINX_CPM_PCIE_INTR_CORRECTABLE BIT(9)
> > +#define XILINX_CPM_PCIE_INTR_NONFATAL BIT(10)
> > +#define XILINX_CPM_PCIE_INTR_FATAL BIT(11)
> > +#define XILINX_CPM_PCIE_INTR_INTX BIT(16)
> > +#define XILINX_CPM_PCIE_INTR_MSI BIT(17)
> > +#define XILINX_CPM_PCIE_INTR_SLV_UNSUPP BIT(20)
> > +#define XILINX_CPM_PCIE_INTR_SLV_UNEXP BIT(21)
> > +#define XILINX_CPM_PCIE_INTR_SLV_COMPL BIT(22)
> > +#define XILINX_CPM_PCIE_INTR_SLV_ERRP BIT(23)
> > +#define XILINX_CPM_PCIE_INTR_SLV_CMPABT BIT(24)
> > +#define XILINX_CPM_PCIE_INTR_SLV_ILLBUR BIT(25)
> > +#define XILINX_CPM_PCIE_INTR_MST_DECERR BIT(26)
> > +#define XILINX_CPM_PCIE_INTR_MST_SLVERR BIT(27)
> > +#define XILINX_CPM_PCIE_IMR_ALL_MASK 0x1FF39FF9
> > +#define XILINX_CPM_PCIE_IDR_ALL_MASK 0xFFFFFFFF
> > +#define XILINX_CPM_PCIE_IDRN_MASK GENMASK(19, 16)
> > +#define XILINX_CPM_PCIE_INTR_CFG_PCIE_TIMEOUT BIT(4)
> > +#define XILINX_CPM_PCIE_INTR_CFG_ERR_POISON BIT(12)
> > +#define XILINX_CPM_PCIE_INTR_PME_TO_ACK_RCVD BIT(15)
> > +#define XILINX_CPM_PCIE_INTR_PM_PME_RCVD BIT(17)
> > +#define XILINX_CPM_PCIE_INTR_SLV_PCIE_TIMEOUT BIT(28)
> > +#define XILINX_CPM_PCIE_IDRN_SHIFT 16
> > +
> > +/* Root Port Error FIFO Read Register definitions */
> > +#define XILINX_CPM_PCIE_RPEFR_ERR_VALID BIT(18)
> > +#define XILINX_CPM_PCIE_RPEFR_REQ_ID GENMASK(15, 0)
> > +#define XILINX_CPM_PCIE_RPEFR_ALL_MASK 0xFFFFFFFF
> > +
> > +/* Root Port Status/control Register definitions */
> > +#define XILINX_CPM_PCIE_REG_RPSC_BEN BIT(0)
> > +
> > +/* Phy Status/Control Register definitions */
> > +#define XILINX_CPM_PCIE_REG_PSCR_LNKUP BIT(11)
> > +
> > +/* ECAM definitions */
> > +#define ECAM_BUS_NUM_SHIFT 20
> > +#define ECAM_DEV_NUM_SHIFT 12
> > +
> > +/**
> > + * struct xilinx_cpm_pcie_port - PCIe port information
> > + * @reg_base: Bridge Register Base
> > + * @cpm_base: CPM System Level Control and Status Register(SLCR) Base
> > + * @irq: Interrupt number
> > + * @root_busno: Root Bus number
> > + * @dev: Device pointer
> > + * @leg_domain: Legacy IRQ domain pointer
> > + * @irq_misc: Legacy and error interrupt number */ struct
> > +xilinx_cpm_pcie_port {
> > + void __iomem *reg_base;
> > + void __iomem *cpm_base;
> > + u32 irq;
> > + u8 root_busno;
> > + struct device *dev;
> > + struct irq_domain *leg_domain;
> > + int irq_misc;
> > +};
> > +
> > +static inline u32 pcie_read(struct xilinx_cpm_pcie_port *port, u32
> > +reg) {
> > + return readl(port->reg_base + reg);
> > +}
> > +
> > +static inline void pcie_write(struct xilinx_cpm_pcie_port *port,
> > + u32 val, u32 reg)
> > +{
> > + writel(val, port->reg_base + reg);
> > +}
> > +
> > +static inline bool cpm_pcie_link_up(struct xilinx_cpm_pcie_port
> > +*port) {
> > + return (pcie_read(port, XILINX_CPM_PCIE_REG_PSCR) &
> > + XILINX_CPM_PCIE_REG_PSCR_LNKUP) ? 1 : 0; }
> > +
> > +/**
> > + * xilinx_cpm_pcie_clear_err_interrupts - Clear Error Interrupts
> > + * @port: PCIe port information
> > + */
> > +static void cpm_pcie_clear_err_interrupts(struct xilinx_cpm_pcie_port
> > +*port) {
> > + unsigned long val = pcie_read(port, XILINX_CPM_PCIE_REG_RPEFR);
> > +
> > + if (val & XILINX_CPM_PCIE_RPEFR_ERR_VALID) {
> > + dev_dbg(port->dev, "Requester ID %lu\n",
> > + val & XILINX_CPM_PCIE_RPEFR_REQ_ID);
> > + pcie_write(port, XILINX_CPM_PCIE_RPEFR_ALL_MASK,
> > + XILINX_CPM_PCIE_REG_RPEFR);
> > + }
> > +}
> > +
> > +/**
> > + * xilinx_cpm_pcie_map_bus - Get configuration base
> > + * @bus: PCI Bus structure
> > + * @devfn: Device/function
> > + * @where: Offset from base
> > + *
> > + * Return: Base address of the configuration space needed to be
> > + * accessed.
> > + */
> > +static void __iomem *xilinx_cpm_pcie_map_bus(struct pci_bus *bus,
> > + unsigned int devfn, int where) {
> > + struct xilinx_cpm_pcie_port *port = bus->sysdata;
> > + int relbus;
> > +
> > + relbus = (bus->number << ECAM_BUS_NUM_SHIFT) |
> > + (devfn << ECAM_DEV_NUM_SHIFT);
> > +
> > + return port->reg_base + relbus + where;
>
> I *think* this is exactly pci_ecam_map_bus(), so maybe you could just use
> pci_generic_ecam_ops instead of defining xilinx_cpm_pcie_ops? If so,
> congratulations, you've achieved what seems to be almost impossible.
>
> > +}
> > +
> > +/* PCIe operations */
> > +static struct pci_ops xilinx_cpm_pcie_ops = {
> > + .map_bus = xilinx_cpm_pcie_map_bus,
> > + .read = pci_generic_config_read,
> > + .write = pci_generic_config_write,
> > +};
> > +
> > +/**
> > + * xilinx_cpm_pcie_intx_map - Set the handler for the INTx and mark
> > +IRQ as valid
> > + * @domain: IRQ domain
> > + * @irq: Virtual IRQ number
> > + * @hwirq: HW interrupt number
> > + *
> > + * Return: Always returns 0.
> > + */
> > +static int xilinx_cpm_pcie_intx_map(struct irq_domain *domain,
> > + unsigned int irq, irq_hw_number_t hwirq) {
> > + irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
> > + irq_set_chip_data(irq, domain->host_data);
> > + irq_set_status_flags(irq, IRQ_LEVEL);
> > +
> > + return 0;
> > +}
> > +
> > +/* INTx IRQ Domain operations */
> > +static const struct irq_domain_ops intx_domain_ops = {
> > + .map = xilinx_cpm_pcie_intx_map,
> > + .xlate = pci_irqd_intx_xlate,
> > +};
> > +
> > +/**
> > + * xilinx_cpm_pcie_intr_handler - Interrupt Service Handler
> > + * @irq: IRQ number
> > + * @data: PCIe port information
> > + *
> > + * Return: IRQ_HANDLED on success and IRQ_NONE on failure */ static
> > +irqreturn_t xilinx_cpm_pcie_intr_handler(int irq, void *data) {
> > + struct xilinx_cpm_pcie_port *port = data;
> > + struct device *dev = port->dev;
> > + u32 val, mask, status, bit;
> > + unsigned long intr_val;
> > +
> > + /* Read interrupt decode and mask registers */
> > + val = pcie_read(port, XILINX_CPM_PCIE_REG_IDR);
> > + mask = pcie_read(port, XILINX_CPM_PCIE_REG_IMR);
> > +
> > + status = val & mask;
> > + if (!status)
> > + return IRQ_NONE;
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_LINK_DOWN)
> > + dev_warn(dev, "Link Down\n");
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_HOT_RESET)
> > + dev_info(dev, "Hot reset\n");
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_CFG_TIMEOUT)
> > + dev_warn(dev, "ECAM access timeout\n");
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_CORRECTABLE) {
> > + dev_warn(dev, "Correctable error message\n");
> > + cpm_pcie_clear_err_interrupts(port);
> > + }
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_NONFATAL) {
> > + dev_warn(dev, "Non fatal error message\n");
> > + cpm_pcie_clear_err_interrupts(port);
> > + }
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_FATAL) {
> > + dev_warn(dev, "Fatal error message\n");
> > + cpm_pcie_clear_err_interrupts(port);
> > + }
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_INTX) {
> > + /* Handle INTx Interrupt */
> > + intr_val = pcie_read(port, XILINX_CPM_PCIE_REG_IDRN);
> > + intr_val = intr_val >> XILINX_CPM_PCIE_IDRN_SHIFT;
> > +
> > + for_each_set_bit(bit, &intr_val, PCI_NUM_INTX)
> > + generic_handle_irq(irq_find_mapping(port-
> >leg_domain,
> > + bit));
> > + }
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_SLV_UNSUPP)
> > + dev_warn(dev, "Slave unsupported request\n");
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_SLV_UNEXP)
> > + dev_warn(dev, "Slave unexpected completion\n");
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_SLV_COMPL)
> > + dev_warn(dev, "Slave completion timeout\n");
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_SLV_ERRP)
> > + dev_warn(dev, "Slave Error Poison\n");
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_SLV_CMPABT)
> > + dev_warn(dev, "Slave Completer Abort\n");
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_SLV_ILLBUR)
> > + dev_warn(dev, "Slave Illegal Burst\n");
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_MST_DECERR)
> > + dev_warn(dev, "Master decode error\n");
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_MST_SLVERR)
> > + dev_warn(dev, "Master slave error\n");
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_CFG_PCIE_TIMEOUT)
> > + dev_warn(dev, "PCIe ECAM access timeout\n");
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_CFG_ERR_POISON)
> > + dev_warn(dev, "ECAM poisoned completion received\n");
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_PME_TO_ACK_RCVD)
> > + dev_warn(dev, "PME_TO_ACK message received\n");
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_PM_PME_RCVD)
> > + dev_warn(dev, "PM_PME message received\n");
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_SLV_PCIE_TIMEOUT)
> > + dev_warn(dev, "PCIe completion timeout received\n");
> > +
> > + /* Clear the Interrupt Decode register */
> > + pcie_write(port, status, XILINX_CPM_PCIE_REG_IDR);
> > +
> > + /*
> > + * XILINX_CPM_PCIE_MISC_IR_STATUS register is mapped to
> > + * CPM SLCR block.
> > + */
> > + val = readl(port->cpm_base + XILINX_CPM_PCIE_MISC_IR_STATUS);
> > + if (val)
> > + writel(val, port->cpm_base +
> XILINX_CPM_PCIE_MISC_IR_STATUS);
> > +
> > + return IRQ_HANDLED;
> > +}
> > +
> > +/**
> > + * xilinx_cpm_pcie_init_irq_domain - Initialize IRQ domain
> > + * @port: PCIe port information
> > + *
> > + * Return: '0' on success and error value on failure */ static int
> > +xilinx_cpm_pcie_init_irq_domain(struct xilinx_cpm_pcie_port *port) {
> > + struct device *dev = port->dev;
> > + struct device_node *node = dev->of_node;
> > + struct device_node *pcie_intc_node;
> > +
> > + /* Setup INTx */
> > + pcie_intc_node = of_get_next_child(node, NULL);
> > + if (!pcie_intc_node) {
> > + dev_err(dev, "No PCIe Intc node found\n");
> > + return -EINVAL;
> > + }
> > +
> > + port->leg_domain = irq_domain_add_linear(pcie_intc_node,
> PCI_NUM_INTX,
> > + &intx_domain_ops,
> > + port);
> > + if (!port->leg_domain) {
> > + dev_err(dev, "Failed to get a INTx IRQ domain\n");
> > + return -ENOMEM;
> > + }
> > +
> > + return 0;
> > +}
> > +
> > +/**
> > + * xilinx_cpm_pcie_init_port - Initialize hardware
> > + * @port: PCIe port information
> > + */
> > +static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie_port
> > +*port) {
> > + if (cpm_pcie_link_up(port))
> > + dev_info(port->dev, "PCIe Link is UP\n");
> > + else
> > + dev_info(port->dev, "PCIe Link is DOWN\n");
> > +
> > + /* Disable all interrupts */
> > + pcie_write(port, ~XILINX_CPM_PCIE_IDR_ALL_MASK,
> > + XILINX_CPM_PCIE_REG_IMR);
> > +
> > + /* Clear pending interrupts */
> > + pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_IDR) &
> > + XILINX_CPM_PCIE_IMR_ALL_MASK,
> > + XILINX_CPM_PCIE_REG_IDR);
> > +
> > + /* Enable all interrupts */
> > + pcie_write(port, XILINX_CPM_PCIE_IMR_ALL_MASK,
> > + XILINX_CPM_PCIE_REG_IMR);
> > + pcie_write(port, XILINX_CPM_PCIE_IDRN_MASK,
> > + XILINX_CPM_PCIE_REG_IDRN_MASK);
> > +
> > + /*
> > + * XILINX_CPM_PCIE_MISC_IR_ENABLE register is mapped to
> > + * CPM SLCR block.
> > + */
> > + writel(XILINX_CPM_PCIE_MISC_IR_LOCAL,
> > + port->cpm_base + XILINX_CPM_PCIE_MISC_IR_ENABLE);
> > + /* Enable the Bridge enable bit */
> > + pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_RPSC) |
> > + XILINX_CPM_PCIE_REG_RPSC_BEN,
> > + XILINX_CPM_PCIE_REG_RPSC);
> > +}
> > +
> > +static int xilinx_cpm_request_misc_irq(struct xilinx_cpm_pcie_port
> > +*port) {
> > + struct device *dev = port->dev;
> > + struct platform_device *pdev = to_platform_device(dev);
> > + int err;
> > +
> > + port->irq_misc = platform_get_irq_byname(pdev, "misc");
> > + if (port->irq_misc <= 0) {
> > + dev_err(dev, "Unable to find misc IRQ line\n");
> > + return port->irq_misc;
> > + }
> > + err = devm_request_irq(dev, port->irq_misc,
> > + xilinx_cpm_pcie_intr_handler,
> > + IRQF_SHARED | IRQF_NO_THREAD,
> > + "xilinx-pcie", port);
> > + if (err) {
> > + dev_err(dev, "unable to request misc IRQ line %d\n",
> > + port->irq_misc);
> > + return err;
> > + }
> > +
> > + return 0;
> > +}
> > +
> > +/**
> > + * xilinx_cpm_pcie_parse_dt - Parse Device tree
> > + * @port: PCIe port information
> > + *
> > + * Return: '0' on success and error value on failure */ static int
> > +xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie_port *port) {
> > + struct device *dev = port->dev;
> > + struct platform_device *pdev = to_platform_device(dev);
> > + struct resource *res;
> > + int err;
> > +
> > + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
> > + port->reg_base = devm_ioremap_resource(dev, res);
> > + if (IS_ERR(port->reg_base))
> > + return PTR_ERR(port->reg_base);
> > +
> > + res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
> > + "cpm_slcr");
> > + port->cpm_base = devm_ioremap_resource(dev, res);
> > + if (IS_ERR(port->cpm_base))
> > + return PTR_ERR(port->cpm_base);
> > +
> > + err = xilinx_cpm_request_misc_irq(port);
> > + if (err)
> > + return err;
> > +
> > + return 0;
> > +}
> > +
> > +/**
> > + * xilinx_cpm_pcie_probe - Probe function
> > + * @pdev: Platform device pointer
> > + *
> > + * Return: '0' on success and error value on failure */ static int
> > +xilinx_cpm_pcie_probe(struct platform_device *pdev) {
> > + struct xilinx_cpm_pcie_port *port;
> > + struct device *dev = &pdev->dev;
> > + struct pci_bus *bus;
> > + struct pci_bus *child;
> > + struct pci_host_bridge *bridge;
> > + int err;
> > +
> > + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
> > + if (!bridge)
> > + return -ENODEV;
> > +
> > + port = pci_host_bridge_priv(bridge);
> > +
> > + port->dev = dev;
> > +
> > + err = xilinx_cpm_pcie_parse_dt(port);
> > + if (err) {
> > + dev_err(dev, "Parsing DT failed\n");
> > + return err;
> > + }
> > +
> > + xilinx_cpm_pcie_init_port(port);
> > +
> > + err = xilinx_cpm_pcie_init_irq_domain(port);
> > + if (err) {
> > + dev_err(dev, "Failed creating IRQ Domain\n");
> > + return err;
> > + }
> > +
> > + err = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
> > + &bridge->dma_ranges, NULL);
> > + if (err) {
> > + dev_err(dev, "Getting bridge resources failed\n");
> > + return err;
> > + }
> > +
> > + bridge->dev.parent = dev;
> > + bridge->sysdata = port;
> > + bridge->busnr = port->root_busno;
> > + bridge->ops = &xilinx_cpm_pcie_ops;
> > + bridge->map_irq = of_irq_parse_and_map_pci;
> > + bridge->swizzle_irq = pci_common_swizzle;
> > +
> > + err = pci_scan_root_bus_bridge(bridge);
> > + if (err)
> > + return err;
> > +
> > + bus = bridge->bus;
> > +
> > + pci_assign_unassigned_bus_resources(bus);
> > + list_for_each_entry(child, &bus->children, node)
> > + pcie_bus_configure_settings(child);
> > + pci_bus_add_devices(bus);
> > + return 0;
> > +}
> > +
> > +static const struct of_device_id xilinx_cpm_pcie_of_match[] = {
> > + { .compatible = "xlnx,versal-cpm-host-1.00", },
> > + {}
> > +};
> > +
> > +static struct platform_driver xilinx_cpm_pcie_driver = {
> > + .driver = {
> > + .name = "xilinx-cpm-pcie",
> > + .of_match_table = xilinx_cpm_pcie_of_match,
> > + .suppress_bind_attrs = true,
> > + },
> > + .probe = xilinx_cpm_pcie_probe,
> > +};
> > +
> > +builtin_platform_driver(xilinx_cpm_pcie_driver);
> > --
> > 2.7.4
> >
On Thu, Jan 30, 2020 at 09:42:51PM +0530, Bharat Kumar Gogada wrote:
> - Add support for Versal CPM as Root Port.
> - The Versal ACAP devices include CCIX-PCIe Module (CPM). The integrated
> block for CPM along with the integrated bridge can function
> as PCIe Root Port.
> - CPM Versal uses GICv3 ITS feature for achieving assigning MSI/MSI-X
> vectors and handling MSI/MSI-X interrupts.
This is not relevant information.
> - Bridge error and legacy interrupts in Versal CPM are handled using
> Versal CPM specific MISC interrupt line.
>
> Changes v5:
> - Removed xilinx_cpm_pcie_valid_device function
Remove Changes log from the commit log.
> Signed-off-by: Bharat Kumar Gogada <[email protected]>
> ---
> drivers/pci/controller/Kconfig | 8 +
> drivers/pci/controller/Makefile | 1 +
> drivers/pci/controller/pcie-xilinx-cpm.c | 491 +++++++++++++++++++++++++++++++
> 3 files changed, 500 insertions(+)
> create mode 100644 drivers/pci/controller/pcie-xilinx-cpm.c
>
> diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
> index c77069c..362f4db 100644
> --- a/drivers/pci/controller/Kconfig
> +++ b/drivers/pci/controller/Kconfig
> @@ -81,6 +81,14 @@ config PCIE_XILINX
> Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
> Host Bridge driver.
>
> +config PCIE_XILINX_CPM
> + bool "Xilinx Versal CPM host bridge support"
> + depends on ARCH_ZYNQMP || COMPILE_TEST
> + help
> + Say 'Y' here if you want kernel support for the
> + Xilinx Versal CPM host bridge. The driver supports
> + MSI/MSI-X interrupts using GICv3 ITS feature.
> +
> config PCI_XGENE
> bool "X-Gene PCIe controller"
> depends on ARM64 || COMPILE_TEST
> diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
> index 3d4f597..6c936e9 100644
> --- a/drivers/pci/controller/Makefile
> +++ b/drivers/pci/controller/Makefile
> @@ -12,6 +12,7 @@ obj-$(CONFIG_PCI_HOST_COMMON) += pci-host-common.o
> obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o
> obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
> obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o
> +obj-$(CONFIG_PCIE_XILINX_CPM) += pcie-xilinx-cpm.o
> obj-$(CONFIG_PCI_V3_SEMI) += pci-v3-semi.o
> obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o
> obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o
> diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c
> new file mode 100644
> index 0000000..4e4c0f0
> --- /dev/null
> +++ b/drivers/pci/controller/pcie-xilinx-cpm.c
> @@ -0,0 +1,491 @@
> +// SPDX-License-Identifier: GPL-2.0+
> +/*
> + * PCIe host controller driver for Xilinx Versal CPM DMA Bridge
> + *
> + * (C) Copyright 2019 - 2020, Xilinx, Inc.
> + */
> +
> +#include <linux/interrupt.h>
> +#include <linux/irq.h>
> +#include <linux/irqdomain.h>
> +#include <linux/kernel.h>
> +#include <linux/module.h>
> +#include <linux/of_address.h>
> +#include <linux/of_pci.h>
> +#include <linux/of_platform.h>
> +#include <linux/of_irq.h>
> +#include <linux/pci.h>
> +#include <linux/platform_device.h>
> +
> +#include "../pci.h"
> +
> +/* Register definitions */
> +#define XILINX_CPM_PCIE_REG_IDR 0x00000E10
> +#define XILINX_CPM_PCIE_REG_IMR 0x00000E14
> +#define XILINX_CPM_PCIE_REG_PSCR 0x00000E1C
> +#define XILINX_CPM_PCIE_REG_RPSC 0x00000E20
> +#define XILINX_CPM_PCIE_REG_RPEFR 0x00000E2C
> +#define XILINX_CPM_PCIE_REG_IDRN 0x00000E38
> +#define XILINX_CPM_PCIE_REG_IDRN_MASK 0x00000E3C
> +#define XILINX_CPM_PCIE_MISC_IR_STATUS 0x00000340
> +#define XILINX_CPM_PCIE_MISC_IR_ENABLE 0x00000348
> +#define XILINX_CPM_PCIE_MISC_IR_LOCAL BIT(1)
> +
> +/* Interrupt registers definitions */
> +#define XILINX_CPM_PCIE_INTR_LINK_DOWN BIT(0)
> +#define XILINX_CPM_PCIE_INTR_HOT_RESET BIT(3)
> +#define XILINX_CPM_PCIE_INTR_CFG_TIMEOUT BIT(8)
> +#define XILINX_CPM_PCIE_INTR_CORRECTABLE BIT(9)
> +#define XILINX_CPM_PCIE_INTR_NONFATAL BIT(10)
> +#define XILINX_CPM_PCIE_INTR_FATAL BIT(11)
> +#define XILINX_CPM_PCIE_INTR_INTX BIT(16)
> +#define XILINX_CPM_PCIE_INTR_MSI BIT(17)
> +#define XILINX_CPM_PCIE_INTR_SLV_UNSUPP BIT(20)
> +#define XILINX_CPM_PCIE_INTR_SLV_UNEXP BIT(21)
> +#define XILINX_CPM_PCIE_INTR_SLV_COMPL BIT(22)
> +#define XILINX_CPM_PCIE_INTR_SLV_ERRP BIT(23)
> +#define XILINX_CPM_PCIE_INTR_SLV_CMPABT BIT(24)
> +#define XILINX_CPM_PCIE_INTR_SLV_ILLBUR BIT(25)
> +#define XILINX_CPM_PCIE_INTR_MST_DECERR BIT(26)
> +#define XILINX_CPM_PCIE_INTR_MST_SLVERR BIT(27)
> +#define XILINX_CPM_PCIE_IMR_ALL_MASK 0x1FF39FF9
> +#define XILINX_CPM_PCIE_IDR_ALL_MASK 0xFFFFFFFF
> +#define XILINX_CPM_PCIE_IDRN_MASK GENMASK(19, 16)
> +#define XILINX_CPM_PCIE_INTR_CFG_PCIE_TIMEOUT BIT(4)
> +#define XILINX_CPM_PCIE_INTR_CFG_ERR_POISON BIT(12)
> +#define XILINX_CPM_PCIE_INTR_PME_TO_ACK_RCVD BIT(15)
> +#define XILINX_CPM_PCIE_INTR_PM_PME_RCVD BIT(17)
> +#define XILINX_CPM_PCIE_INTR_SLV_PCIE_TIMEOUT BIT(28)
> +#define XILINX_CPM_PCIE_IDRN_SHIFT 16
> +
> +/* Root Port Error FIFO Read Register definitions */
> +#define XILINX_CPM_PCIE_RPEFR_ERR_VALID BIT(18)
> +#define XILINX_CPM_PCIE_RPEFR_REQ_ID GENMASK(15, 0)
> +#define XILINX_CPM_PCIE_RPEFR_ALL_MASK 0xFFFFFFFF
> +
> +/* Root Port Status/control Register definitions */
> +#define XILINX_CPM_PCIE_REG_RPSC_BEN BIT(0)
> +
> +/* Phy Status/Control Register definitions */
> +#define XILINX_CPM_PCIE_REG_PSCR_LNKUP BIT(11)
> +
> +/* ECAM definitions */
> +#define ECAM_BUS_NUM_SHIFT 20
> +#define ECAM_DEV_NUM_SHIFT 12
You don't need these ECAM_* defines, you can use pci_generic_ecam_ops.
> +
> +/**
> + * struct xilinx_cpm_pcie_port - PCIe port information
> + * @reg_base: Bridge Register Base
> + * @cpm_base: CPM System Level Control and Status Register(SLCR) Base
> + * @irq: Interrupt number
> + * @root_busno: Root Bus number
> + * @dev: Device pointer
> + * @leg_domain: Legacy IRQ domain pointer
> + * @irq_misc: Legacy and error interrupt number
> + */
> +struct xilinx_cpm_pcie_port {
> + void __iomem *reg_base;
> + void __iomem *cpm_base;
> + u32 irq;
> + u8 root_busno;
> + struct device *dev;
> + struct irq_domain *leg_domain;
> + int irq_misc;
> +};
> +
> +static inline u32 pcie_read(struct xilinx_cpm_pcie_port *port, u32 reg)
> +{
> + return readl(port->reg_base + reg);
> +}
> +
> +static inline void pcie_write(struct xilinx_cpm_pcie_port *port,
> + u32 val, u32 reg)
> +{
> + writel(val, port->reg_base + reg);
> +}
> +
> +static inline bool cpm_pcie_link_up(struct xilinx_cpm_pcie_port *port)
> +{
> + return (pcie_read(port, XILINX_CPM_PCIE_REG_PSCR) &
> + XILINX_CPM_PCIE_REG_PSCR_LNKUP) ? 1 : 0;
u32 val = pcie_read(port, XILINX_CPM_PCIE_REG_PSCR);
return val & XILINX_CPM_PCIE_REG_PSCR_LNKUP;
And this function call is not that informative anyway - it is used just
to print a log whose usefulness is questionable.
> +}
> +
> +/**
> + * xilinx_cpm_pcie_clear_err_interrupts - Clear Error Interrupts
> + * @port: PCIe port information
> + */
> +static void cpm_pcie_clear_err_interrupts(struct xilinx_cpm_pcie_port *port)
> +{
> + unsigned long val = pcie_read(port, XILINX_CPM_PCIE_REG_RPEFR);
> +
> + if (val & XILINX_CPM_PCIE_RPEFR_ERR_VALID) {
> + dev_dbg(port->dev, "Requester ID %lu\n",
> + val & XILINX_CPM_PCIE_RPEFR_REQ_ID);
> + pcie_write(port, XILINX_CPM_PCIE_RPEFR_ALL_MASK,
> + XILINX_CPM_PCIE_REG_RPEFR);
> + }
> +}
> +
> +/**
> + * xilinx_cpm_pcie_map_bus - Get configuration base
> + * @bus: PCI Bus structure
> + * @devfn: Device/function
> + * @where: Offset from base
> + *
> + * Return: Base address of the configuration space needed to be
> + * accessed.
> + */
> +static void __iomem *xilinx_cpm_pcie_map_bus(struct pci_bus *bus,
> + unsigned int devfn, int where)
> +{
> + struct xilinx_cpm_pcie_port *port = bus->sysdata;
> + int relbus;
> +
> + relbus = (bus->number << ECAM_BUS_NUM_SHIFT) |
> + (devfn << ECAM_DEV_NUM_SHIFT);
> +
> + return port->reg_base + relbus + where;
> +}
You don't need this function, you can rely on pci_generic_ecam_ops.
> +/* PCIe operations */
> +static struct pci_ops xilinx_cpm_pcie_ops = {
> + .map_bus = xilinx_cpm_pcie_map_bus,
> + .read = pci_generic_config_read,
> + .write = pci_generic_config_write,
> +};
See above.
> +
> +/**
> + * xilinx_cpm_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid
> + * @domain: IRQ domain
> + * @irq: Virtual IRQ number
> + * @hwirq: HW interrupt number
> + *
> + * Return: Always returns 0.
> + */
> +static int xilinx_cpm_pcie_intx_map(struct irq_domain *domain,
> + unsigned int irq, irq_hw_number_t hwirq)
> +{
> + irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
INTX are level IRQs, the flow handler must be handle_level_irq.
> + irq_set_chip_data(irq, domain->host_data);
> + irq_set_status_flags(irq, IRQ_LEVEL);
The way INTX are handled in this patch is wrong. You must set-up
a chained IRQ with the appropriate flow handler, current code
uses an IRQ action and that's an IRQ layer violation and it goes
without saying that it is almost certainly broken.
Please read drivers/pci/controller/pci-ft100.c code, that's the
way INTX must be handled; I planned to take that code and make
it a library, please use the same IRQ domain set-up.
> + return 0;
> +}
> +
> +/* INTx IRQ Domain operations */
> +static const struct irq_domain_ops intx_domain_ops = {
> + .map = xilinx_cpm_pcie_intx_map,
> + .xlate = pci_irqd_intx_xlate,
This is wrong, wrong, wrong. There is no need for xlat'ing anything
see my reply on the DT bindings.
> +};
> +
> +/**
> + * xilinx_cpm_pcie_intr_handler - Interrupt Service Handler
> + * @irq: IRQ number
> + * @data: PCIe port information
> + *
> + * Return: IRQ_HANDLED on success and IRQ_NONE on failure
> + */
> +static irqreturn_t xilinx_cpm_pcie_intr_handler(int irq, void *data)
Bad. This must be a chained IRQ flow handler, not an IRQ action.
> +{
> + struct xilinx_cpm_pcie_port *port = data;
> + struct device *dev = port->dev;
> + u32 val, mask, status, bit;
> + unsigned long intr_val;
> +
> + /* Read interrupt decode and mask registers */
> + val = pcie_read(port, XILINX_CPM_PCIE_REG_IDR);
> + mask = pcie_read(port, XILINX_CPM_PCIE_REG_IMR);
> +
> + status = val & mask;
> + if (!status)
> + return IRQ_NONE;
> +
> + if (status & XILINX_CPM_PCIE_INTR_LINK_DOWN)
> + dev_warn(dev, "Link Down\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_HOT_RESET)
> + dev_info(dev, "Hot reset\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_CFG_TIMEOUT)
> + dev_warn(dev, "ECAM access timeout\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_CORRECTABLE) {
> + dev_warn(dev, "Correctable error message\n");
> + cpm_pcie_clear_err_interrupts(port);
> + }
> +
> + if (status & XILINX_CPM_PCIE_INTR_NONFATAL) {
> + dev_warn(dev, "Non fatal error message\n");
> + cpm_pcie_clear_err_interrupts(port);
> + }
> +
> + if (status & XILINX_CPM_PCIE_INTR_FATAL) {
> + dev_warn(dev, "Fatal error message\n");
> + cpm_pcie_clear_err_interrupts(port);
> + }
> +
> + if (status & XILINX_CPM_PCIE_INTR_INTX) {
> + /* Handle INTx Interrupt */
> + intr_val = pcie_read(port, XILINX_CPM_PCIE_REG_IDRN);
> + intr_val = intr_val >> XILINX_CPM_PCIE_IDRN_SHIFT;
> +
> + for_each_set_bit(bit, &intr_val, PCI_NUM_INTX)
> + generic_handle_irq(irq_find_mapping(port->leg_domain,
> + bit));
> + }
> +
> + if (status & XILINX_CPM_PCIE_INTR_SLV_UNSUPP)
> + dev_warn(dev, "Slave unsupported request\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_SLV_UNEXP)
> + dev_warn(dev, "Slave unexpected completion\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_SLV_COMPL)
> + dev_warn(dev, "Slave completion timeout\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_SLV_ERRP)
> + dev_warn(dev, "Slave Error Poison\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_SLV_CMPABT)
> + dev_warn(dev, "Slave Completer Abort\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_SLV_ILLBUR)
> + dev_warn(dev, "Slave Illegal Burst\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_MST_DECERR)
> + dev_warn(dev, "Master decode error\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_MST_SLVERR)
> + dev_warn(dev, "Master slave error\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_CFG_PCIE_TIMEOUT)
> + dev_warn(dev, "PCIe ECAM access timeout\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_CFG_ERR_POISON)
> + dev_warn(dev, "ECAM poisoned completion received\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_PME_TO_ACK_RCVD)
> + dev_warn(dev, "PME_TO_ACK message received\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_PM_PME_RCVD)
> + dev_warn(dev, "PM_PME message received\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_SLV_PCIE_TIMEOUT)
> + dev_warn(dev, "PCIe completion timeout received\n");
> +
> + /* Clear the Interrupt Decode register */
> + pcie_write(port, status, XILINX_CPM_PCIE_REG_IDR);
> +
> + /*
> + * XILINX_CPM_PCIE_MISC_IR_STATUS register is mapped to
> + * CPM SLCR block.
> + */
> + val = readl(port->cpm_base + XILINX_CPM_PCIE_MISC_IR_STATUS);
> + if (val)
> + writel(val, port->cpm_base + XILINX_CPM_PCIE_MISC_IR_STATUS);
> +
> + return IRQ_HANDLED;
> +}
> +
> +/**
> + * xilinx_cpm_pcie_init_irq_domain - Initialize IRQ domain
> + * @port: PCIe port information
> + *
> + * Return: '0' on success and error value on failure
> + */
> +static int xilinx_cpm_pcie_init_irq_domain(struct xilinx_cpm_pcie_port *port)
> +{
> + struct device *dev = port->dev;
> + struct device_node *node = dev->of_node;
> + struct device_node *pcie_intc_node;
> +
> + /* Setup INTx */
> + pcie_intc_node = of_get_next_child(node, NULL);
> + if (!pcie_intc_node) {
> + dev_err(dev, "No PCIe Intc node found\n");
> + return -EINVAL;
> + }
> +
> + port->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
> + &intx_domain_ops,
> + port);
> + if (!port->leg_domain) {
> + dev_err(dev, "Failed to get a INTx IRQ domain\n");
> + return -ENOMEM;
> + }
> +
> + return 0;
> +}
> +
> +/**
> + * xilinx_cpm_pcie_init_port - Initialize hardware
> + * @port: PCIe port information
> + */
> +static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie_port *port)
> +{
> + if (cpm_pcie_link_up(port))
> + dev_info(port->dev, "PCIe Link is UP\n");
> + else
> + dev_info(port->dev, "PCIe Link is DOWN\n");
> +
> + /* Disable all interrupts */
> + pcie_write(port, ~XILINX_CPM_PCIE_IDR_ALL_MASK,
> + XILINX_CPM_PCIE_REG_IMR);
> +
> + /* Clear pending interrupts */
> + pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_IDR) &
> + XILINX_CPM_PCIE_IMR_ALL_MASK,
> + XILINX_CPM_PCIE_REG_IDR);
> +
> + /* Enable all interrupts */
> + pcie_write(port, XILINX_CPM_PCIE_IMR_ALL_MASK,
> + XILINX_CPM_PCIE_REG_IMR);
> + pcie_write(port, XILINX_CPM_PCIE_IDRN_MASK,
> + XILINX_CPM_PCIE_REG_IDRN_MASK);
> +
> + /*
> + * XILINX_CPM_PCIE_MISC_IR_ENABLE register is mapped to
> + * CPM SLCR block.
> + */
> + writel(XILINX_CPM_PCIE_MISC_IR_LOCAL,
> + port->cpm_base + XILINX_CPM_PCIE_MISC_IR_ENABLE);
> + /* Enable the Bridge enable bit */
> + pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_RPSC) |
> + XILINX_CPM_PCIE_REG_RPSC_BEN,
> + XILINX_CPM_PCIE_REG_RPSC);
> +}
> +
> +static int xilinx_cpm_request_misc_irq(struct xilinx_cpm_pcie_port *port)
> +{
> + struct device *dev = port->dev;
> + struct platform_device *pdev = to_platform_device(dev);
> + int err;
> +
> + port->irq_misc = platform_get_irq_byname(pdev, "misc");
> + if (port->irq_misc <= 0) {
> + dev_err(dev, "Unable to find misc IRQ line\n");
> + return port->irq_misc;
> + }
> + err = devm_request_irq(dev, port->irq_misc,
> + xilinx_cpm_pcie_intr_handler,
> + IRQF_SHARED | IRQF_NO_THREAD,
> + "xilinx-pcie", port);
Nope. See above.
> + if (err) {
> + dev_err(dev, "unable to request misc IRQ line %d\n",
> + port->irq_misc);
> + return err;
> + }
> +
> + return 0;
> +}
> +
> +/**
> + * xilinx_cpm_pcie_parse_dt - Parse Device tree
> + * @port: PCIe port information
> + *
> + * Return: '0' on success and error value on failure
> + */
> +static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie_port *port)
> +{
> + struct device *dev = port->dev;
> + struct platform_device *pdev = to_platform_device(dev);
> + struct resource *res;
> + int err;
> +
> + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
> + port->reg_base = devm_ioremap_resource(dev, res);
> + if (IS_ERR(port->reg_base))
> + return PTR_ERR(port->reg_base);
> +
> + res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
> + "cpm_slcr");
> + port->cpm_base = devm_ioremap_resource(dev, res);
> + if (IS_ERR(port->cpm_base))
> + return PTR_ERR(port->cpm_base);
> +
> + err = xilinx_cpm_request_misc_irq(port);
> + if (err)
> + return err;
> +
> + return 0;
> +}
> +
> +/**
> + * xilinx_cpm_pcie_probe - Probe function
> + * @pdev: Platform device pointer
> + *
> + * Return: '0' on success and error value on failure
> + */
> +static int xilinx_cpm_pcie_probe(struct platform_device *pdev)
> +{
> + struct xilinx_cpm_pcie_port *port;
> + struct device *dev = &pdev->dev;
> + struct pci_bus *bus;
> + struct pci_bus *child;
> + struct pci_host_bridge *bridge;
> + int err;
> +
> + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
> + if (!bridge)
> + return -ENODEV;
> +
> + port = pci_host_bridge_priv(bridge);
> +
> + port->dev = dev;
> +
> + err = xilinx_cpm_pcie_parse_dt(port);
> + if (err) {
> + dev_err(dev, "Parsing DT failed\n");
> + return err;
> + }
> +
> + xilinx_cpm_pcie_init_port(port);
> +
> + err = xilinx_cpm_pcie_init_irq_domain(port);
> + if (err) {
> + dev_err(dev, "Failed creating IRQ Domain\n");
> + return err;
> + }
If subsequent calls fail from now onwards, there is work carried
out in this initialization to be undone, which isn't, please add
it.
Thanks,
Lorenzo
> +
> + err = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
> + &bridge->dma_ranges, NULL);
> + if (err) {
> + dev_err(dev, "Getting bridge resources failed\n");
> + return err;
> + }
> +
> + bridge->dev.parent = dev;
> + bridge->sysdata = port;
> + bridge->busnr = port->root_busno;
> + bridge->ops = &xilinx_cpm_pcie_ops;
> + bridge->map_irq = of_irq_parse_and_map_pci;
> + bridge->swizzle_irq = pci_common_swizzle;
> +
> + err = pci_scan_root_bus_bridge(bridge);
> + if (err)
> + return err;
> +
> + bus = bridge->bus;
> +
> + pci_assign_unassigned_bus_resources(bus);
> + list_for_each_entry(child, &bus->children, node)
> + pcie_bus_configure_settings(child);
> + pci_bus_add_devices(bus);
> + return 0;
> +}
> +
> +static const struct of_device_id xilinx_cpm_pcie_of_match[] = {
> + { .compatible = "xlnx,versal-cpm-host-1.00", },
> + {}
> +};
> +
> +static struct platform_driver xilinx_cpm_pcie_driver = {
> + .driver = {
> + .name = "xilinx-cpm-pcie",
> + .of_match_table = xilinx_cpm_pcie_of_match,
> + .suppress_bind_attrs = true,
> + },
> + .probe = xilinx_cpm_pcie_probe,
> +};
> +
> +builtin_platform_driver(xilinx_cpm_pcie_driver);
> --
> 2.7.4
>
On Thu, Jan 30, 2020 at 09:42:51PM +0530, Bharat Kumar Gogada wrote:
> - Add support for Versal CPM as Root Port.
> - The Versal ACAP devices include CCIX-PCIe Module (CPM). The integrated
> block for CPM along with the integrated bridge can function
> as PCIe Root Port.
> - CPM Versal uses GICv3 ITS feature for achieving assigning MSI/MSI-X
> vectors and handling MSI/MSI-X interrupts.
This is not relevant information.
> - Bridge error and legacy interrupts in Versal CPM are handled using
> Versal CPM specific MISC interrupt line.
>
> Changes v5:
> - Removed xilinx_cpm_pcie_valid_device function
Remove Changes log from the commit log.
> Signed-off-by: Bharat Kumar Gogada <[email protected]>
> ---
> drivers/pci/controller/Kconfig | 8 +
> drivers/pci/controller/Makefile | 1 +
> drivers/pci/controller/pcie-xilinx-cpm.c | 491 +++++++++++++++++++++++++++++++
> 3 files changed, 500 insertions(+)
> create mode 100644 drivers/pci/controller/pcie-xilinx-cpm.c
>
> diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
> index c77069c..362f4db 100644
> --- a/drivers/pci/controller/Kconfig
> +++ b/drivers/pci/controller/Kconfig
> @@ -81,6 +81,14 @@ config PCIE_XILINX
> Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
> Host Bridge driver.
>
> +config PCIE_XILINX_CPM
> + bool "Xilinx Versal CPM host bridge support"
> + depends on ARCH_ZYNQMP || COMPILE_TEST
> + help
> + Say 'Y' here if you want kernel support for the
> + Xilinx Versal CPM host bridge. The driver supports
> + MSI/MSI-X interrupts using GICv3 ITS feature.
> +
> config PCI_XGENE
> bool "X-Gene PCIe controller"
> depends on ARM64 || COMPILE_TEST
> diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
> index 3d4f597..6c936e9 100644
> --- a/drivers/pci/controller/Makefile
> +++ b/drivers/pci/controller/Makefile
> @@ -12,6 +12,7 @@ obj-$(CONFIG_PCI_HOST_COMMON) += pci-host-common.o
> obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o
> obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
> obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o
> +obj-$(CONFIG_PCIE_XILINX_CPM) += pcie-xilinx-cpm.o
> obj-$(CONFIG_PCI_V3_SEMI) += pci-v3-semi.o
> obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o
> obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o
> diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c
> new file mode 100644
> index 0000000..4e4c0f0
> --- /dev/null
> +++ b/drivers/pci/controller/pcie-xilinx-cpm.c
> @@ -0,0 +1,491 @@
> +// SPDX-License-Identifier: GPL-2.0+
> +/*
> + * PCIe host controller driver for Xilinx Versal CPM DMA Bridge
> + *
> + * (C) Copyright 2019 - 2020, Xilinx, Inc.
> + */
> +
> +#include <linux/interrupt.h>
> +#include <linux/irq.h>
> +#include <linux/irqdomain.h>
> +#include <linux/kernel.h>
> +#include <linux/module.h>
> +#include <linux/of_address.h>
> +#include <linux/of_pci.h>
> +#include <linux/of_platform.h>
> +#include <linux/of_irq.h>
> +#include <linux/pci.h>
> +#include <linux/platform_device.h>
> +
> +#include "../pci.h"
> +
> +/* Register definitions */
> +#define XILINX_CPM_PCIE_REG_IDR 0x00000E10
> +#define XILINX_CPM_PCIE_REG_IMR 0x00000E14
> +#define XILINX_CPM_PCIE_REG_PSCR 0x00000E1C
> +#define XILINX_CPM_PCIE_REG_RPSC 0x00000E20
> +#define XILINX_CPM_PCIE_REG_RPEFR 0x00000E2C
> +#define XILINX_CPM_PCIE_REG_IDRN 0x00000E38
> +#define XILINX_CPM_PCIE_REG_IDRN_MASK 0x00000E3C
> +#define XILINX_CPM_PCIE_MISC_IR_STATUS 0x00000340
> +#define XILINX_CPM_PCIE_MISC_IR_ENABLE 0x00000348
> +#define XILINX_CPM_PCIE_MISC_IR_LOCAL BIT(1)
> +
> +/* Interrupt registers definitions */
> +#define XILINX_CPM_PCIE_INTR_LINK_DOWN BIT(0)
> +#define XILINX_CPM_PCIE_INTR_HOT_RESET BIT(3)
> +#define XILINX_CPM_PCIE_INTR_CFG_TIMEOUT BIT(8)
> +#define XILINX_CPM_PCIE_INTR_CORRECTABLE BIT(9)
> +#define XILINX_CPM_PCIE_INTR_NONFATAL BIT(10)
> +#define XILINX_CPM_PCIE_INTR_FATAL BIT(11)
> +#define XILINX_CPM_PCIE_INTR_INTX BIT(16)
> +#define XILINX_CPM_PCIE_INTR_MSI BIT(17)
> +#define XILINX_CPM_PCIE_INTR_SLV_UNSUPP BIT(20)
> +#define XILINX_CPM_PCIE_INTR_SLV_UNEXP BIT(21)
> +#define XILINX_CPM_PCIE_INTR_SLV_COMPL BIT(22)
> +#define XILINX_CPM_PCIE_INTR_SLV_ERRP BIT(23)
> +#define XILINX_CPM_PCIE_INTR_SLV_CMPABT BIT(24)
> +#define XILINX_CPM_PCIE_INTR_SLV_ILLBUR BIT(25)
> +#define XILINX_CPM_PCIE_INTR_MST_DECERR BIT(26)
> +#define XILINX_CPM_PCIE_INTR_MST_SLVERR BIT(27)
> +#define XILINX_CPM_PCIE_IMR_ALL_MASK 0x1FF39FF9
> +#define XILINX_CPM_PCIE_IDR_ALL_MASK 0xFFFFFFFF
> +#define XILINX_CPM_PCIE_IDRN_MASK GENMASK(19, 16)
> +#define XILINX_CPM_PCIE_INTR_CFG_PCIE_TIMEOUT BIT(4)
> +#define XILINX_CPM_PCIE_INTR_CFG_ERR_POISON BIT(12)
> +#define XILINX_CPM_PCIE_INTR_PME_TO_ACK_RCVD BIT(15)
> +#define XILINX_CPM_PCIE_INTR_PM_PME_RCVD BIT(17)
> +#define XILINX_CPM_PCIE_INTR_SLV_PCIE_TIMEOUT BIT(28)
> +#define XILINX_CPM_PCIE_IDRN_SHIFT 16
> +
> +/* Root Port Error FIFO Read Register definitions */
> +#define XILINX_CPM_PCIE_RPEFR_ERR_VALID BIT(18)
> +#define XILINX_CPM_PCIE_RPEFR_REQ_ID GENMASK(15, 0)
> +#define XILINX_CPM_PCIE_RPEFR_ALL_MASK 0xFFFFFFFF
> +
> +/* Root Port Status/control Register definitions */
> +#define XILINX_CPM_PCIE_REG_RPSC_BEN BIT(0)
> +
> +/* Phy Status/Control Register definitions */
> +#define XILINX_CPM_PCIE_REG_PSCR_LNKUP BIT(11)
> +
> +/* ECAM definitions */
> +#define ECAM_BUS_NUM_SHIFT 20
> +#define ECAM_DEV_NUM_SHIFT 12
You don't need these ECAM_* defines, you can use pci_generic_ecam_ops.
> +
> +/**
> + * struct xilinx_cpm_pcie_port - PCIe port information
> + * @reg_base: Bridge Register Base
> + * @cpm_base: CPM System Level Control and Status Register(SLCR) Base
> + * @irq: Interrupt number
> + * @root_busno: Root Bus number
> + * @dev: Device pointer
> + * @leg_domain: Legacy IRQ domain pointer
> + * @irq_misc: Legacy and error interrupt number
> + */
> +struct xilinx_cpm_pcie_port {
> + void __iomem *reg_base;
> + void __iomem *cpm_base;
> + u32 irq;
> + u8 root_busno;
> + struct device *dev;
> + struct irq_domain *leg_domain;
> + int irq_misc;
> +};
> +
> +static inline u32 pcie_read(struct xilinx_cpm_pcie_port *port, u32 reg)
> +{
> + return readl(port->reg_base + reg);
> +}
> +
> +static inline void pcie_write(struct xilinx_cpm_pcie_port *port,
> + u32 val, u32 reg)
> +{
> + writel(val, port->reg_base + reg);
> +}
> +
> +static inline bool cpm_pcie_link_up(struct xilinx_cpm_pcie_port *port)
> +{
> + return (pcie_read(port, XILINX_CPM_PCIE_REG_PSCR) &
> + XILINX_CPM_PCIE_REG_PSCR_LNKUP) ? 1 : 0;
u32 val = pcie_read(port, XILINX_CPM_PCIE_REG_PSCR);
return val & XILINX_CPM_PCIE_REG_PSCR_LNKUP;
And this function call is not that informative anyway - it is used just
to print a log whose usefulness is questionable.
> +}
> +
> +/**
> + * xilinx_cpm_pcie_clear_err_interrupts - Clear Error Interrupts
> + * @port: PCIe port information
> + */
> +static void cpm_pcie_clear_err_interrupts(struct xilinx_cpm_pcie_port *port)
> +{
> + unsigned long val = pcie_read(port, XILINX_CPM_PCIE_REG_RPEFR);
> +
> + if (val & XILINX_CPM_PCIE_RPEFR_ERR_VALID) {
> + dev_dbg(port->dev, "Requester ID %lu\n",
> + val & XILINX_CPM_PCIE_RPEFR_REQ_ID);
> + pcie_write(port, XILINX_CPM_PCIE_RPEFR_ALL_MASK,
> + XILINX_CPM_PCIE_REG_RPEFR);
> + }
> +}
> +
> +/**
> + * xilinx_cpm_pcie_map_bus - Get configuration base
> + * @bus: PCI Bus structure
> + * @devfn: Device/function
> + * @where: Offset from base
> + *
> + * Return: Base address of the configuration space needed to be
> + * accessed.
> + */
> +static void __iomem *xilinx_cpm_pcie_map_bus(struct pci_bus *bus,
> + unsigned int devfn, int where)
> +{
> + struct xilinx_cpm_pcie_port *port = bus->sysdata;
> + int relbus;
> +
> + relbus = (bus->number << ECAM_BUS_NUM_SHIFT) |
> + (devfn << ECAM_DEV_NUM_SHIFT);
> +
> + return port->reg_base + relbus + where;
> +}
You don't need this function, you can rely on pci_generic_ecam_ops.
> +/* PCIe operations */
> +static struct pci_ops xilinx_cpm_pcie_ops = {
> + .map_bus = xilinx_cpm_pcie_map_bus,
> + .read = pci_generic_config_read,
> + .write = pci_generic_config_write,
> +};
See above.
> +
> +/**
> + * xilinx_cpm_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid
> + * @domain: IRQ domain
> + * @irq: Virtual IRQ number
> + * @hwirq: HW interrupt number
> + *
> + * Return: Always returns 0.
> + */
> +static int xilinx_cpm_pcie_intx_map(struct irq_domain *domain,
> + unsigned int irq, irq_hw_number_t hwirq)
> +{
> + irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
INTX are level IRQs, the flow handler must be handle_level_irq.
> + irq_set_chip_data(irq, domain->host_data);
> + irq_set_status_flags(irq, IRQ_LEVEL);
The way INTX are handled in this patch is wrong. You must set-up
a chained IRQ with the appropriate flow handler, current code
uses an IRQ action and that's an IRQ layer violation and it goes
without saying that it is almost certainly broken.
Please read drivers/pci/controller/pci-ft100.c code, that's the
way INTX must be handled; I planned to take that code and make
it a library, please use the same IRQ domain set-up.
> + return 0;
> +}
> +
> +/* INTx IRQ Domain operations */
> +static const struct irq_domain_ops intx_domain_ops = {
> + .map = xilinx_cpm_pcie_intx_map,
> + .xlate = pci_irqd_intx_xlate,
This is wrong, wrong, wrong. There is no need for xlat'ing anything
see my reply on the DT bindings.
> +};
> +
> +/**
> + * xilinx_cpm_pcie_intr_handler - Interrupt Service Handler
> + * @irq: IRQ number
> + * @data: PCIe port information
> + *
> + * Return: IRQ_HANDLED on success and IRQ_NONE on failure
> + */
> +static irqreturn_t xilinx_cpm_pcie_intr_handler(int irq, void *data)
Bad. This must be a chained IRQ flow handler, not an IRQ action.
> +{
> + struct xilinx_cpm_pcie_port *port = data;
> + struct device *dev = port->dev;
> + u32 val, mask, status, bit;
> + unsigned long intr_val;
> +
> + /* Read interrupt decode and mask registers */
> + val = pcie_read(port, XILINX_CPM_PCIE_REG_IDR);
> + mask = pcie_read(port, XILINX_CPM_PCIE_REG_IMR);
> +
> + status = val & mask;
> + if (!status)
> + return IRQ_NONE;
> +
> + if (status & XILINX_CPM_PCIE_INTR_LINK_DOWN)
> + dev_warn(dev, "Link Down\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_HOT_RESET)
> + dev_info(dev, "Hot reset\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_CFG_TIMEOUT)
> + dev_warn(dev, "ECAM access timeout\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_CORRECTABLE) {
> + dev_warn(dev, "Correctable error message\n");
> + cpm_pcie_clear_err_interrupts(port);
> + }
> +
> + if (status & XILINX_CPM_PCIE_INTR_NONFATAL) {
> + dev_warn(dev, "Non fatal error message\n");
> + cpm_pcie_clear_err_interrupts(port);
> + }
> +
> + if (status & XILINX_CPM_PCIE_INTR_FATAL) {
> + dev_warn(dev, "Fatal error message\n");
> + cpm_pcie_clear_err_interrupts(port);
> + }
> +
> + if (status & XILINX_CPM_PCIE_INTR_INTX) {
> + /* Handle INTx Interrupt */
> + intr_val = pcie_read(port, XILINX_CPM_PCIE_REG_IDRN);
> + intr_val = intr_val >> XILINX_CPM_PCIE_IDRN_SHIFT;
> +
> + for_each_set_bit(bit, &intr_val, PCI_NUM_INTX)
> + generic_handle_irq(irq_find_mapping(port->leg_domain,
> + bit));
> + }
> +
> + if (status & XILINX_CPM_PCIE_INTR_SLV_UNSUPP)
> + dev_warn(dev, "Slave unsupported request\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_SLV_UNEXP)
> + dev_warn(dev, "Slave unexpected completion\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_SLV_COMPL)
> + dev_warn(dev, "Slave completion timeout\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_SLV_ERRP)
> + dev_warn(dev, "Slave Error Poison\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_SLV_CMPABT)
> + dev_warn(dev, "Slave Completer Abort\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_SLV_ILLBUR)
> + dev_warn(dev, "Slave Illegal Burst\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_MST_DECERR)
> + dev_warn(dev, "Master decode error\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_MST_SLVERR)
> + dev_warn(dev, "Master slave error\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_CFG_PCIE_TIMEOUT)
> + dev_warn(dev, "PCIe ECAM access timeout\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_CFG_ERR_POISON)
> + dev_warn(dev, "ECAM poisoned completion received\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_PME_TO_ACK_RCVD)
> + dev_warn(dev, "PME_TO_ACK message received\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_PM_PME_RCVD)
> + dev_warn(dev, "PM_PME message received\n");
> +
> + if (status & XILINX_CPM_PCIE_INTR_SLV_PCIE_TIMEOUT)
> + dev_warn(dev, "PCIe completion timeout received\n");
> +
> + /* Clear the Interrupt Decode register */
> + pcie_write(port, status, XILINX_CPM_PCIE_REG_IDR);
> +
> + /*
> + * XILINX_CPM_PCIE_MISC_IR_STATUS register is mapped to
> + * CPM SLCR block.
> + */
> + val = readl(port->cpm_base + XILINX_CPM_PCIE_MISC_IR_STATUS);
> + if (val)
> + writel(val, port->cpm_base + XILINX_CPM_PCIE_MISC_IR_STATUS);
> +
> + return IRQ_HANDLED;
> +}
> +
> +/**
> + * xilinx_cpm_pcie_init_irq_domain - Initialize IRQ domain
> + * @port: PCIe port information
> + *
> + * Return: '0' on success and error value on failure
> + */
> +static int xilinx_cpm_pcie_init_irq_domain(struct xilinx_cpm_pcie_port *port)
> +{
> + struct device *dev = port->dev;
> + struct device_node *node = dev->of_node;
> + struct device_node *pcie_intc_node;
> +
> + /* Setup INTx */
> + pcie_intc_node = of_get_next_child(node, NULL);
> + if (!pcie_intc_node) {
> + dev_err(dev, "No PCIe Intc node found\n");
> + return -EINVAL;
> + }
> +
> + port->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
> + &intx_domain_ops,
> + port);
> + if (!port->leg_domain) {
> + dev_err(dev, "Failed to get a INTx IRQ domain\n");
> + return -ENOMEM;
> + }
> +
> + return 0;
> +}
> +
> +/**
> + * xilinx_cpm_pcie_init_port - Initialize hardware
> + * @port: PCIe port information
> + */
> +static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie_port *port)
> +{
> + if (cpm_pcie_link_up(port))
> + dev_info(port->dev, "PCIe Link is UP\n");
> + else
> + dev_info(port->dev, "PCIe Link is DOWN\n");
> +
> + /* Disable all interrupts */
> + pcie_write(port, ~XILINX_CPM_PCIE_IDR_ALL_MASK,
> + XILINX_CPM_PCIE_REG_IMR);
> +
> + /* Clear pending interrupts */
> + pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_IDR) &
> + XILINX_CPM_PCIE_IMR_ALL_MASK,
> + XILINX_CPM_PCIE_REG_IDR);
> +
> + /* Enable all interrupts */
> + pcie_write(port, XILINX_CPM_PCIE_IMR_ALL_MASK,
> + XILINX_CPM_PCIE_REG_IMR);
> + pcie_write(port, XILINX_CPM_PCIE_IDRN_MASK,
> + XILINX_CPM_PCIE_REG_IDRN_MASK);
> +
> + /*
> + * XILINX_CPM_PCIE_MISC_IR_ENABLE register is mapped to
> + * CPM SLCR block.
> + */
> + writel(XILINX_CPM_PCIE_MISC_IR_LOCAL,
> + port->cpm_base + XILINX_CPM_PCIE_MISC_IR_ENABLE);
> + /* Enable the Bridge enable bit */
> + pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_RPSC) |
> + XILINX_CPM_PCIE_REG_RPSC_BEN,
> + XILINX_CPM_PCIE_REG_RPSC);
> +}
> +
> +static int xilinx_cpm_request_misc_irq(struct xilinx_cpm_pcie_port *port)
> +{
> + struct device *dev = port->dev;
> + struct platform_device *pdev = to_platform_device(dev);
> + int err;
> +
> + port->irq_misc = platform_get_irq_byname(pdev, "misc");
> + if (port->irq_misc <= 0) {
> + dev_err(dev, "Unable to find misc IRQ line\n");
> + return port->irq_misc;
> + }
> + err = devm_request_irq(dev, port->irq_misc,
> + xilinx_cpm_pcie_intr_handler,
> + IRQF_SHARED | IRQF_NO_THREAD,
> + "xilinx-pcie", port);
Nope. See above.
> + if (err) {
> + dev_err(dev, "unable to request misc IRQ line %d\n",
> + port->irq_misc);
> + return err;
> + }
> +
> + return 0;
> +}
> +
> +/**
> + * xilinx_cpm_pcie_parse_dt - Parse Device tree
> + * @port: PCIe port information
> + *
> + * Return: '0' on success and error value on failure
> + */
> +static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie_port *port)
> +{
> + struct device *dev = port->dev;
> + struct platform_device *pdev = to_platform_device(dev);
> + struct resource *res;
> + int err;
> +
> + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
> + port->reg_base = devm_ioremap_resource(dev, res);
> + if (IS_ERR(port->reg_base))
> + return PTR_ERR(port->reg_base);
> +
> + res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
> + "cpm_slcr");
> + port->cpm_base = devm_ioremap_resource(dev, res);
> + if (IS_ERR(port->cpm_base))
> + return PTR_ERR(port->cpm_base);
> +
> + err = xilinx_cpm_request_misc_irq(port);
> + if (err)
> + return err;
> +
> + return 0;
> +}
> +
> +/**
> + * xilinx_cpm_pcie_probe - Probe function
> + * @pdev: Platform device pointer
> + *
> + * Return: '0' on success and error value on failure
> + */
> +static int xilinx_cpm_pcie_probe(struct platform_device *pdev)
> +{
> + struct xilinx_cpm_pcie_port *port;
> + struct device *dev = &pdev->dev;
> + struct pci_bus *bus;
> + struct pci_bus *child;
> + struct pci_host_bridge *bridge;
> + int err;
> +
> + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
> + if (!bridge)
> + return -ENODEV;
> +
> + port = pci_host_bridge_priv(bridge);
> +
> + port->dev = dev;
> +
> + err = xilinx_cpm_pcie_parse_dt(port);
> + if (err) {
> + dev_err(dev, "Parsing DT failed\n");
> + return err;
> + }
> +
> + xilinx_cpm_pcie_init_port(port);
> +
> + err = xilinx_cpm_pcie_init_irq_domain(port);
> + if (err) {
> + dev_err(dev, "Failed creating IRQ Domain\n");
> + return err;
> + }
If subsequent calls fail from now onwards, there is work carried
out in this initialization to be undone, which isn't, please add
it.
Thanks,
Lorenzo
> +
> + err = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
> + &bridge->dma_ranges, NULL);
> + if (err) {
> + dev_err(dev, "Getting bridge resources failed\n");
> + return err;
> + }
> +
> + bridge->dev.parent = dev;
> + bridge->sysdata = port;
> + bridge->busnr = port->root_busno;
> + bridge->ops = &xilinx_cpm_pcie_ops;
> + bridge->map_irq = of_irq_parse_and_map_pci;
> + bridge->swizzle_irq = pci_common_swizzle;
> +
> + err = pci_scan_root_bus_bridge(bridge);
> + if (err)
> + return err;
> +
> + bus = bridge->bus;
> +
> + pci_assign_unassigned_bus_resources(bus);
> + list_for_each_entry(child, &bus->children, node)
> + pcie_bus_configure_settings(child);
> + pci_bus_add_devices(bus);
> + return 0;
> +}
> +
> +static const struct of_device_id xilinx_cpm_pcie_of_match[] = {
> + { .compatible = "xlnx,versal-cpm-host-1.00", },
> + {}
> +};
> +
> +static struct platform_driver xilinx_cpm_pcie_driver = {
> + .driver = {
> + .name = "xilinx-cpm-pcie",
> + .of_match_table = xilinx_cpm_pcie_of_match,
> + .suppress_bind_attrs = true,
> + },
> + .probe = xilinx_cpm_pcie_probe,
> +};
> +
> +builtin_platform_driver(xilinx_cpm_pcie_driver);
> --
> 2.7.4
>
> Subject: Re: [PATCH v5 2/2] PCI: xilinx-cpm: Add Versal CPM Root Port driver
>
> On Thu, Jan 30, 2020 at 09:42:51PM +0530, Bharat Kumar Gogada wrote:
> > - Add support for Versal CPM as Root Port.
> > - The Versal ACAP devices include CCIX-PCIe Module (CPM). The integrated
> > block for CPM along with the integrated bridge can function
> > as PCIe Root Port.
> > - CPM Versal uses GICv3 ITS feature for achieving assigning MSI/MSI-X
> > vectors and handling MSI/MSI-X interrupts.
>
> This is not relevant information.
Thanks Lorenzo, will add better details.
>
> > - Bridge error and legacy interrupts in Versal CPM are handled using
> > Versal CPM specific MISC interrupt line.
> >
> > Changes v5:
> > - Removed xilinx_cpm_pcie_valid_device function
>
> Remove Changes log from the commit log.
Accepted will remove.
>
> > Signed-off-by: Bharat Kumar Gogada <[email protected]>
> > ---
> > drivers/pci/controller/Kconfig | 8 +
> > drivers/pci/controller/Makefile | 1 +
> > drivers/pci/controller/pcie-xilinx-cpm.c | 491
> > +++++++++++++++++++++++++++++++
> > 3 files changed, 500 insertions(+)
> > create mode 100644 drivers/pci/controller/pcie-xilinx-cpm.c
> >
> > diff --git a/drivers/pci/controller/Kconfig
> > b/drivers/pci/controller/Kconfig index c77069c..362f4db 100644
> > --- a/drivers/pci/controller/Kconfig
> > +++ b/drivers/pci/controller/Kconfig
> > @@ -81,6 +81,14 @@ config PCIE_XILINX
> > Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
> > Host Bridge driver.
> >
> > +config PCIE_XILINX_CPM
> > + bool "Xilinx Versal CPM host bridge support"
> > + depends on ARCH_ZYNQMP || COMPILE_TEST
> > + help
> > + Say 'Y' here if you want kernel support for the
> > + Xilinx Versal CPM host bridge. The driver supports
> > + MSI/MSI-X interrupts using GICv3 ITS feature.
> > +
> > config PCI_XGENE
> > bool "X-Gene PCIe controller"
> > depends on ARM64 || COMPILE_TEST
> > diff --git a/drivers/pci/controller/Makefile
> > b/drivers/pci/controller/Makefile index 3d4f597..6c936e9 100644
> > --- a/drivers/pci/controller/Makefile
> > +++ b/drivers/pci/controller/Makefile
> > @@ -12,6 +12,7 @@ obj-$(CONFIG_PCI_HOST_COMMON) += pci-host-
> common.o
> > obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o
> > obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
> > obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o
> > +obj-$(CONFIG_PCIE_XILINX_CPM) += pcie-xilinx-cpm.o
> > obj-$(CONFIG_PCI_V3_SEMI) += pci-v3-semi.o
> > obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o
> > obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o diff --git
> > a/drivers/pci/controller/pcie-xilinx-cpm.c
> > b/drivers/pci/controller/pcie-xilinx-cpm.c
> > new file mode 100644
> > index 0000000..4e4c0f0
> > --- /dev/null
> > +++ b/drivers/pci/controller/pcie-xilinx-cpm.c
> > @@ -0,0 +1,491 @@
> > +// SPDX-License-Identifier: GPL-2.0+
> > +/*
> > + * PCIe host controller driver for Xilinx Versal CPM DMA Bridge
> > + *
> > + * (C) Copyright 2019 - 2020, Xilinx, Inc.
> > + */
> > +
> > +#include <linux/interrupt.h>
> > +#include <linux/irq.h>
> > +#include <linux/irqdomain.h>
> > +#include <linux/kernel.h>
> > +#include <linux/module.h>
> > +#include <linux/of_address.h>
> > +#include <linux/of_pci.h>
> > +#include <linux/of_platform.h>
> > +#include <linux/of_irq.h>
> > +#include <linux/pci.h>
> > +#include <linux/platform_device.h>
> > +
> > +#include "../pci.h"
> > +
> > +/* Register definitions */
> > +#define XILINX_CPM_PCIE_REG_IDR 0x00000E10
> > +#define XILINX_CPM_PCIE_REG_IMR 0x00000E14
> > +#define XILINX_CPM_PCIE_REG_PSCR 0x00000E1C
> > +#define XILINX_CPM_PCIE_REG_RPSC 0x00000E20
> > +#define XILINX_CPM_PCIE_REG_RPEFR 0x00000E2C
> > +#define XILINX_CPM_PCIE_REG_IDRN 0x00000E38
> > +#define XILINX_CPM_PCIE_REG_IDRN_MASK 0x00000E3C
> > +#define XILINX_CPM_PCIE_MISC_IR_STATUS 0x00000340
> > +#define XILINX_CPM_PCIE_MISC_IR_ENABLE 0x00000348
> > +#define XILINX_CPM_PCIE_MISC_IR_LOCAL BIT(1)
> > +
> > +/* Interrupt registers definitions */
> > +#define XILINX_CPM_PCIE_INTR_LINK_DOWN BIT(0)
> > +#define XILINX_CPM_PCIE_INTR_HOT_RESET BIT(3)
> > +#define XILINX_CPM_PCIE_INTR_CFG_TIMEOUT BIT(8)
> > +#define XILINX_CPM_PCIE_INTR_CORRECTABLE BIT(9)
> > +#define XILINX_CPM_PCIE_INTR_NONFATAL BIT(10)
> > +#define XILINX_CPM_PCIE_INTR_FATAL BIT(11)
> > +#define XILINX_CPM_PCIE_INTR_INTX BIT(16)
> > +#define XILINX_CPM_PCIE_INTR_MSI BIT(17)
> > +#define XILINX_CPM_PCIE_INTR_SLV_UNSUPP BIT(20)
> > +#define XILINX_CPM_PCIE_INTR_SLV_UNEXP BIT(21)
> > +#define XILINX_CPM_PCIE_INTR_SLV_COMPL BIT(22)
> > +#define XILINX_CPM_PCIE_INTR_SLV_ERRP BIT(23)
> > +#define XILINX_CPM_PCIE_INTR_SLV_CMPABT BIT(24)
> > +#define XILINX_CPM_PCIE_INTR_SLV_ILLBUR BIT(25)
> > +#define XILINX_CPM_PCIE_INTR_MST_DECERR BIT(26)
> > +#define XILINX_CPM_PCIE_INTR_MST_SLVERR BIT(27)
> > +#define XILINX_CPM_PCIE_IMR_ALL_MASK 0x1FF39FF9
> > +#define XILINX_CPM_PCIE_IDR_ALL_MASK 0xFFFFFFFF
> > +#define XILINX_CPM_PCIE_IDRN_MASK GENMASK(19, 16)
> > +#define XILINX_CPM_PCIE_INTR_CFG_PCIE_TIMEOUT BIT(4)
> > +#define XILINX_CPM_PCIE_INTR_CFG_ERR_POISON BIT(12)
> > +#define XILINX_CPM_PCIE_INTR_PME_TO_ACK_RCVD BIT(15)
> > +#define XILINX_CPM_PCIE_INTR_PM_PME_RCVD BIT(17)
> > +#define XILINX_CPM_PCIE_INTR_SLV_PCIE_TIMEOUT BIT(28)
> > +#define XILINX_CPM_PCIE_IDRN_SHIFT 16
> > +
> > +/* Root Port Error FIFO Read Register definitions */
> > +#define XILINX_CPM_PCIE_RPEFR_ERR_VALID BIT(18)
> > +#define XILINX_CPM_PCIE_RPEFR_REQ_ID GENMASK(15, 0)
> > +#define XILINX_CPM_PCIE_RPEFR_ALL_MASK 0xFFFFFFFF
> > +
> > +/* Root Port Status/control Register definitions */
> > +#define XILINX_CPM_PCIE_REG_RPSC_BEN BIT(0)
> > +
> > +/* Phy Status/Control Register definitions */
> > +#define XILINX_CPM_PCIE_REG_PSCR_LNKUP BIT(11)
> > +
> > +/* ECAM definitions */
> > +#define ECAM_BUS_NUM_SHIFT 20
> > +#define ECAM_DEV_NUM_SHIFT 12
>
> You don't need these ECAM_* defines, you can use pci_generic_ecam_ops.
Does this need separate ranges region for ECAM space ?
We have ECAM and controller space in same region.
>
> > +
> > +/**
> > + * struct xilinx_cpm_pcie_port - PCIe port information
> > + * @reg_base: Bridge Register Base
> > + * @cpm_base: CPM System Level Control and Status Register(SLCR) Base
> > + * @irq: Interrupt number
> > + * @root_busno: Root Bus number
> > + * @dev: Device pointer
> > + * @leg_domain: Legacy IRQ domain pointer
> > + * @irq_misc: Legacy and error interrupt number */ struct
> > +xilinx_cpm_pcie_port {
> > + void __iomem *reg_base;
> > + void __iomem *cpm_base;
> > + u32 irq;
> > + u8 root_busno;
> > + struct device *dev;
> > + struct irq_domain *leg_domain;
> > + int irq_misc;
> > +};
> > +
> > +static inline u32 pcie_read(struct xilinx_cpm_pcie_port *port, u32
> > +reg) {
> > + return readl(port->reg_base + reg);
> > +}
> > +
> > +static inline void pcie_write(struct xilinx_cpm_pcie_port *port,
> > + u32 val, u32 reg)
> > +{
> > + writel(val, port->reg_base + reg);
> > +}
> > +
> > +static inline bool cpm_pcie_link_up(struct xilinx_cpm_pcie_port
> > +*port) {
> > + return (pcie_read(port, XILINX_CPM_PCIE_REG_PSCR) &
> > + XILINX_CPM_PCIE_REG_PSCR_LNKUP) ? 1 : 0;
>
> u32 val = pcie_read(port, XILINX_CPM_PCIE_REG_PSCR);
>
> return val & XILINX_CPM_PCIE_REG_PSCR_LNKUP;
>
> And this function call is not that informative anyway - it is used just to print a log
> whose usefulness is questionable.
We need this logging information customers are using this info in case of link down failure.
>
> > +}
> > +
> > +/**
> > + * xilinx_cpm_pcie_clear_err_interrupts - Clear Error Interrupts
> > + * @port: PCIe port information
> > + */
> > +static void cpm_pcie_clear_err_interrupts(struct xilinx_cpm_pcie_port
> > +*port) {
> > + unsigned long val = pcie_read(port, XILINX_CPM_PCIE_REG_RPEFR);
> > +
> > + if (val & XILINX_CPM_PCIE_RPEFR_ERR_VALID) {
> > + dev_dbg(port->dev, "Requester ID %lu\n",
> > + val & XILINX_CPM_PCIE_RPEFR_REQ_ID);
> > + pcie_write(port, XILINX_CPM_PCIE_RPEFR_ALL_MASK,
> > + XILINX_CPM_PCIE_REG_RPEFR);
> > + }
> > +}
> > +
> > +/**
> > + * xilinx_cpm_pcie_map_bus - Get configuration base
> > + * @bus: PCI Bus structure
> > + * @devfn: Device/function
> > + * @where: Offset from base
> > + *
> > + * Return: Base address of the configuration space needed to be
> > + * accessed.
> > + */
> > +static void __iomem *xilinx_cpm_pcie_map_bus(struct pci_bus *bus,
> > + unsigned int devfn, int where) {
> > + struct xilinx_cpm_pcie_port *port = bus->sysdata;
> > + int relbus;
> > +
> > + relbus = (bus->number << ECAM_BUS_NUM_SHIFT) |
> > + (devfn << ECAM_DEV_NUM_SHIFT);
> > +
> > + return port->reg_base + relbus + where; }
>
> You don't need this function, you can rely on pci_generic_ecam_ops.
Added query above.
>
> > +/* PCIe operations */
> > +static struct pci_ops xilinx_cpm_pcie_ops = {
> > + .map_bus = xilinx_cpm_pcie_map_bus,
> > + .read = pci_generic_config_read,
> > + .write = pci_generic_config_write,
> > +};
>
> See above.
>
> > +
> > +/**
> > + * xilinx_cpm_pcie_intx_map - Set the handler for the INTx and mark
> > +IRQ as valid
> > + * @domain: IRQ domain
> > + * @irq: Virtual IRQ number
> > + * @hwirq: HW interrupt number
> > + *
> > + * Return: Always returns 0.
> > + */
> > +static int xilinx_cpm_pcie_intx_map(struct irq_domain *domain,
> > + unsigned int irq, irq_hw_number_t hwirq) {
> > + irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
>
> INTX are level IRQs, the flow handler must be handle_level_irq.
Accepted will change.
>
> > + irq_set_chip_data(irq, domain->host_data);
> > + irq_set_status_flags(irq, IRQ_LEVEL);
>
> The way INTX are handled in this patch is wrong. You must set-up a chained IRQ
> with the appropriate flow handler, current code uses an IRQ action and that's an
> IRQ layer violation and it goes without saying that it is almost certainly broken.
In our controller we use same irq line for controller errors and legacy errors.
we have two cases here where error interrupts are self-consumed by controller,
and legacy interrupts are flow handled. Its not INTX handling alone for this IRQ line .
So chained IRQ can be used for self consumed interrupts too ?
>
> Please read drivers/pci/controller/pci-ft100.c code, that's the way INTX must be
> handled; I planned to take that code and make it a library, please use the same
> IRQ domain set-up.
>
> > + return 0;
> > +}
> > +
> > +/* INTx IRQ Domain operations */
> > +static const struct irq_domain_ops intx_domain_ops = {
> > + .map = xilinx_cpm_pcie_intx_map,
> > + .xlate = pci_irqd_intx_xlate,
>
> This is wrong, wrong, wrong. There is no need for xlat'ing anything see my reply
> on the DT bindings.
>
> > +};
> > +
> > +/**
> > + * xilinx_cpm_pcie_intr_handler - Interrupt Service Handler
> > + * @irq: IRQ number
> > + * @data: PCIe port information
> > + *
> > + * Return: IRQ_HANDLED on success and IRQ_NONE on failure */ static
> > +irqreturn_t xilinx_cpm_pcie_intr_handler(int irq, void *data)
>
> Bad. This must be a chained IRQ flow handler, not an IRQ action.
Please check above query.
>
> > +{
> > + struct xilinx_cpm_pcie_port *port = data;
> > + struct device *dev = port->dev;
> > + u32 val, mask, status, bit;
> > + unsigned long intr_val;
> > +
> > + /* Read interrupt decode and mask registers */
> > + val = pcie_read(port, XILINX_CPM_PCIE_REG_IDR);
> > + mask = pcie_read(port, XILINX_CPM_PCIE_REG_IMR);
> > +
> > + status = val & mask;
> > + if (!status)
> > + return IRQ_NONE;
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_LINK_DOWN)
> > + dev_warn(dev, "Link Down\n");
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_HOT_RESET)
> > + dev_info(dev, "Hot reset\n");
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_CFG_TIMEOUT)
> > + dev_warn(dev, "ECAM access timeout\n");
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_CORRECTABLE) {
> > + dev_warn(dev, "Correctable error message\n");
> > + cpm_pcie_clear_err_interrupts(port);
> > + }
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_NONFATAL) {
> > + dev_warn(dev, "Non fatal error message\n");
> > + cpm_pcie_clear_err_interrupts(port);
> > + }
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_FATAL) {
> > + dev_warn(dev, "Fatal error message\n");
> > + cpm_pcie_clear_err_interrupts(port);
> > + }
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_INTX) {
> > + /* Handle INTx Interrupt */
> > + intr_val = pcie_read(port, XILINX_CPM_PCIE_REG_IDRN);
> > + intr_val = intr_val >> XILINX_CPM_PCIE_IDRN_SHIFT;
> > +
> > + for_each_set_bit(bit, &intr_val, PCI_NUM_INTX)
> > + generic_handle_irq(irq_find_mapping(port-
> >leg_domain,
> > + bit));
> > + }
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_SLV_UNSUPP)
> > + dev_warn(dev, "Slave unsupported request\n");
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_SLV_UNEXP)
> > + dev_warn(dev, "Slave unexpected completion\n");
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_SLV_COMPL)
> > + dev_warn(dev, "Slave completion timeout\n");
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_SLV_ERRP)
> > + dev_warn(dev, "Slave Error Poison\n");
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_SLV_CMPABT)
> > + dev_warn(dev, "Slave Completer Abort\n");
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_SLV_ILLBUR)
> > + dev_warn(dev, "Slave Illegal Burst\n");
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_MST_DECERR)
> > + dev_warn(dev, "Master decode error\n");
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_MST_SLVERR)
> > + dev_warn(dev, "Master slave error\n");
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_CFG_PCIE_TIMEOUT)
> > + dev_warn(dev, "PCIe ECAM access timeout\n");
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_CFG_ERR_POISON)
> > + dev_warn(dev, "ECAM poisoned completion received\n");
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_PME_TO_ACK_RCVD)
> > + dev_warn(dev, "PME_TO_ACK message received\n");
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_PM_PME_RCVD)
> > + dev_warn(dev, "PM_PME message received\n");
> > +
> > + if (status & XILINX_CPM_PCIE_INTR_SLV_PCIE_TIMEOUT)
> > + dev_warn(dev, "PCIe completion timeout received\n");
> > +
> > + /* Clear the Interrupt Decode register */
> > + pcie_write(port, status, XILINX_CPM_PCIE_REG_IDR);
> > +
> > + /*
> > + * XILINX_CPM_PCIE_MISC_IR_STATUS register is mapped to
> > + * CPM SLCR block.
> > + */
> > + val = readl(port->cpm_base + XILINX_CPM_PCIE_MISC_IR_STATUS);
> > + if (val)
> > + writel(val, port->cpm_base +
> XILINX_CPM_PCIE_MISC_IR_STATUS);
> > +
> > + return IRQ_HANDLED;
> > +}
> > +
> > +/**
> > + * xilinx_cpm_pcie_init_irq_domain - Initialize IRQ domain
> > + * @port: PCIe port information
> > + *
> > + * Return: '0' on success and error value on failure */ static int
> > +xilinx_cpm_pcie_init_irq_domain(struct xilinx_cpm_pcie_port *port) {
> > + struct device *dev = port->dev;
> > + struct device_node *node = dev->of_node;
> > + struct device_node *pcie_intc_node;
> > +
> > + /* Setup INTx */
> > + pcie_intc_node = of_get_next_child(node, NULL);
> > + if (!pcie_intc_node) {
> > + dev_err(dev, "No PCIe Intc node found\n");
> > + return -EINVAL;
> > + }
> > +
> > + port->leg_domain = irq_domain_add_linear(pcie_intc_node,
> PCI_NUM_INTX,
> > + &intx_domain_ops,
> > + port);
> > + if (!port->leg_domain) {
> > + dev_err(dev, "Failed to get a INTx IRQ domain\n");
> > + return -ENOMEM;
> > + }
> > +
> > + return 0;
> > +}
> > +
> > +/**
> > + * xilinx_cpm_pcie_init_port - Initialize hardware
> > + * @port: PCIe port information
> > + */
> > +static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie_port
> > +*port) {
> > + if (cpm_pcie_link_up(port))
> > + dev_info(port->dev, "PCIe Link is UP\n");
> > + else
> > + dev_info(port->dev, "PCIe Link is DOWN\n");
> > +
> > + /* Disable all interrupts */
> > + pcie_write(port, ~XILINX_CPM_PCIE_IDR_ALL_MASK,
> > + XILINX_CPM_PCIE_REG_IMR);
> > +
> > + /* Clear pending interrupts */
> > + pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_IDR) &
> > + XILINX_CPM_PCIE_IMR_ALL_MASK,
> > + XILINX_CPM_PCIE_REG_IDR);
> > +
> > + /* Enable all interrupts */
> > + pcie_write(port, XILINX_CPM_PCIE_IMR_ALL_MASK,
> > + XILINX_CPM_PCIE_REG_IMR);
> > + pcie_write(port, XILINX_CPM_PCIE_IDRN_MASK,
> > + XILINX_CPM_PCIE_REG_IDRN_MASK);
> > +
> > + /*
> > + * XILINX_CPM_PCIE_MISC_IR_ENABLE register is mapped to
> > + * CPM SLCR block.
> > + */
> > + writel(XILINX_CPM_PCIE_MISC_IR_LOCAL,
> > + port->cpm_base + XILINX_CPM_PCIE_MISC_IR_ENABLE);
> > + /* Enable the Bridge enable bit */
> > + pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_RPSC) |
> > + XILINX_CPM_PCIE_REG_RPSC_BEN,
> > + XILINX_CPM_PCIE_REG_RPSC);
> > +}
> > +
> > +static int xilinx_cpm_request_misc_irq(struct xilinx_cpm_pcie_port
> > +*port) {
> > + struct device *dev = port->dev;
> > + struct platform_device *pdev = to_platform_device(dev);
> > + int err;
> > +
> > + port->irq_misc = platform_get_irq_byname(pdev, "misc");
> > + if (port->irq_misc <= 0) {
> > + dev_err(dev, "Unable to find misc IRQ line\n");
> > + return port->irq_misc;
> > + }
> > + err = devm_request_irq(dev, port->irq_misc,
> > + xilinx_cpm_pcie_intr_handler,
> > + IRQF_SHARED | IRQF_NO_THREAD,
> > + "xilinx-pcie", port);
>
> Nope. See above.
>
> > + if (err) {
> > + dev_err(dev, "unable to request misc IRQ line %d\n",
> > + port->irq_misc);
> > + return err;
> > + }
> > +
> > + return 0;
> > +}
> > +
> > +/**
> > + * xilinx_cpm_pcie_parse_dt - Parse Device tree
> > + * @port: PCIe port information
> > + *
> > + * Return: '0' on success and error value on failure */ static int
> > +xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie_port *port) {
> > + struct device *dev = port->dev;
> > + struct platform_device *pdev = to_platform_device(dev);
> > + struct resource *res;
> > + int err;
> > +
> > + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
> > + port->reg_base = devm_ioremap_resource(dev, res);
> > + if (IS_ERR(port->reg_base))
> > + return PTR_ERR(port->reg_base);
> > +
> > + res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
> > + "cpm_slcr");
> > + port->cpm_base = devm_ioremap_resource(dev, res);
> > + if (IS_ERR(port->cpm_base))
> > + return PTR_ERR(port->cpm_base);
> > +
> > + err = xilinx_cpm_request_misc_irq(port);
> > + if (err)
> > + return err;
> > +
> > + return 0;
> > +}
> > +
> > +/**
> > + * xilinx_cpm_pcie_probe - Probe function
> > + * @pdev: Platform device pointer
> > + *
> > + * Return: '0' on success and error value on failure */ static int
> > +xilinx_cpm_pcie_probe(struct platform_device *pdev) {
> > + struct xilinx_cpm_pcie_port *port;
> > + struct device *dev = &pdev->dev;
> > + struct pci_bus *bus;
> > + struct pci_bus *child;
> > + struct pci_host_bridge *bridge;
> > + int err;
> > +
> > + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
> > + if (!bridge)
> > + return -ENODEV;
> > +
> > + port = pci_host_bridge_priv(bridge);
> > +
> > + port->dev = dev;
> > +
> > + err = xilinx_cpm_pcie_parse_dt(port);
> > + if (err) {
> > + dev_err(dev, "Parsing DT failed\n");
> > + return err;
> > + }
> > +
> > + xilinx_cpm_pcie_init_port(port);
> > +
> > + err = xilinx_cpm_pcie_init_irq_domain(port);
> > + if (err) {
> > + dev_err(dev, "Failed creating IRQ Domain\n");
> > + return err;
> > + }
>
> If subsequent calls fail from now onwards, there is work carried out in this
> initialization to be undone, which isn't, please add it.
Accepted, will undo when fail case occurs.
>
>
> > +
> > + err = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
[+MarcZ, FHI]
On Tue, Feb 25, 2020 at 02:39:56PM +0000, Bharat Kumar Gogada wrote:
[...]
> > > +/* ECAM definitions */
> > > +#define ECAM_BUS_NUM_SHIFT 20
> > > +#define ECAM_DEV_NUM_SHIFT 12
> >
> > You don't need these ECAM_* defines, you can use pci_generic_ecam_ops.
> Does this need separate ranges region for ECAM space ?
> We have ECAM and controller space in same region.
You can create an ECAM window with pci_ecam_create where *cfgres
represent the ECAM area, I don't get what you mean by "same region".
Do you mean "contiguous" ? Or something else ?
> > > +
> > > +/**
> > > + * struct xilinx_cpm_pcie_port - PCIe port information
> > > + * @reg_base: Bridge Register Base
> > > + * @cpm_base: CPM System Level Control and Status Register(SLCR) Base
> > > + * @irq: Interrupt number
> > > + * @root_busno: Root Bus number
> > > + * @dev: Device pointer
> > > + * @leg_domain: Legacy IRQ domain pointer
> > > + * @irq_misc: Legacy and error interrupt number */ struct
> > > +xilinx_cpm_pcie_port {
> > > + void __iomem *reg_base;
> > > + void __iomem *cpm_base;
> > > + u32 irq;
> > > + u8 root_busno;
> > > + struct device *dev;
> > > + struct irq_domain *leg_domain;
> > > + int irq_misc;
> > > +};
> > > +
> > > +static inline u32 pcie_read(struct xilinx_cpm_pcie_port *port, u32
> > > +reg) {
> > > + return readl(port->reg_base + reg);
> > > +}
> > > +
> > > +static inline void pcie_write(struct xilinx_cpm_pcie_port *port,
> > > + u32 val, u32 reg)
> > > +{
> > > + writel(val, port->reg_base + reg);
> > > +}
> > > +
> > > +static inline bool cpm_pcie_link_up(struct xilinx_cpm_pcie_port
> > > +*port) {
> > > + return (pcie_read(port, XILINX_CPM_PCIE_REG_PSCR) &
> > > + XILINX_CPM_PCIE_REG_PSCR_LNKUP) ? 1 : 0;
> >
> > u32 val = pcie_read(port, XILINX_CPM_PCIE_REG_PSCR);
> >
> > return val & XILINX_CPM_PCIE_REG_PSCR_LNKUP;
> >
> > And this function call is not that informative anyway - it is used just to print a log
> > whose usefulness is questionable.
> We need this logging information customers are using this info in case
> of link down failure.
Out of curiosity, to do what ?
[...]
> > > +/**
> > > + * xilinx_cpm_pcie_intx_map - Set the handler for the INTx and mark
> > > +IRQ as valid
> > > + * @domain: IRQ domain
> > > + * @irq: Virtual IRQ number
> > > + * @hwirq: HW interrupt number
> > > + *
> > > + * Return: Always returns 0.
> > > + */
> > > +static int xilinx_cpm_pcie_intx_map(struct irq_domain *domain,
> > > + unsigned int irq, irq_hw_number_t hwirq) {
> > > + irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
> >
> > INTX are level IRQs, the flow handler must be handle_level_irq.
> Accepted will change.
> >
> > > + irq_set_chip_data(irq, domain->host_data);
> > > + irq_set_status_flags(irq, IRQ_LEVEL);
> >
> > The way INTX are handled in this patch is wrong. You must set-up a chained IRQ
> > with the appropriate flow handler, current code uses an IRQ action and that's an
> > IRQ layer violation and it goes without saying that it is almost certainly broken.
> In our controller we use same irq line for controller errors and
> legacy errors. we have two cases here where error interrupts are
> self-consumed by controller, and legacy interrupts are flow handled.
> Its not INTX handling alone for this IRQ line . So chained IRQ can be
> used for self consumed interrupts too ?
No. In this specific case both solutions are not satisfying, we need to
give it some thought, I will talk to Marc (CC'ed) to find the best
option here going forward.
Thanks,
Lorenzo
> Subject: Re: [PATCH v5 2/2] PCI: xilinx-cpm: Add Versal CPM Root Port driver
>
> [+MarcZ, FHI]
>
> On Tue, Feb 25, 2020 at 02:39:56PM +0000, Bharat Kumar Gogada wrote:
>
> [...]
>
> > > > +/* ECAM definitions */
> > > > +#define ECAM_BUS_NUM_SHIFT 20
> > > > +#define ECAM_DEV_NUM_SHIFT 12
> > >
> > > You don't need these ECAM_* defines, you can use pci_generic_ecam_ops.
> > Does this need separate ranges region for ECAM space ?
> > We have ECAM and controller space in same region.
>
> You can create an ECAM window with pci_ecam_create where *cfgres
> represent the ECAM area, I don't get what you mean by "same region".
>
> Do you mean "contiguous" ? Or something else ?
Yes, contiguous; within ECAM region some space is for controller registers.
>
> > > > +
> > > > +/**
> > > > + * struct xilinx_cpm_pcie_port - PCIe port information
> > > > + * @reg_base: Bridge Register Base
> > > > + * @cpm_base: CPM System Level Control and Status Register(SLCR)
> > > > +Base
> > > > + * @irq: Interrupt number
> > > > + * @root_busno: Root Bus number
> > > > + * @dev: Device pointer
> > > > + * @leg_domain: Legacy IRQ domain pointer
> > > > + * @irq_misc: Legacy and error interrupt number */ struct
> > > > +xilinx_cpm_pcie_port {
> > > > + void __iomem *reg_base;
> > > > + void __iomem *cpm_base;
> > > > + u32 irq;
> > > > + u8 root_busno;
> > > > + struct device *dev;
> > > > + struct irq_domain *leg_domain;
> > > > + int irq_misc;
> > > > +};
> > > > +
> > > > +static inline u32 pcie_read(struct xilinx_cpm_pcie_port *port,
> > > > +u32
> > > > +reg) {
> > > > + return readl(port->reg_base + reg); }
> > > > +
> > > > +static inline void pcie_write(struct xilinx_cpm_pcie_port *port,
> > > > + u32 val, u32 reg)
> > > > +{
> > > > + writel(val, port->reg_base + reg); }
> > > > +
> > > > +static inline bool cpm_pcie_link_up(struct xilinx_cpm_pcie_port
> > > > +*port) {
> > > > + return (pcie_read(port, XILINX_CPM_PCIE_REG_PSCR) &
> > > > + XILINX_CPM_PCIE_REG_PSCR_LNKUP) ? 1 : 0;
> > >
> > > u32 val = pcie_read(port, XILINX_CPM_PCIE_REG_PSCR);
> > >
> > > return val & XILINX_CPM_PCIE_REG_PSCR_LNKUP;
> > >
> > > And this function call is not that informative anyway - it is used
> > > just to print a log whose usefulness is questionable.
> > We need this logging information customers are using this info in case
> > of link down failure.
>
> Out of curiosity, to do what ?
They use this information as first level debug and initiate a query to xilinx support team.
>
> [...]
>
> > > > +/**
> > > > + * xilinx_cpm_pcie_intx_map - Set the handler for the INTx and
> > > > +mark IRQ as valid
> > > > + * @domain: IRQ domain
> > > > + * @irq: Virtual IRQ number
> > > > + * @hwirq: HW interrupt number
> > > > + *
> > > > + * Return: Always returns 0.
> > > > + */
> > > > +static int xilinx_cpm_pcie_intx_map(struct irq_domain *domain,
> > > > + unsigned int irq, irq_hw_number_t hwirq) {
> > > > + irq_set_chip_and_handler(irq, &dummy_irq_chip,
> > > > +handle_simple_irq);
> > >
> > > INTX are level IRQs, the flow handler must be handle_level_irq.
> > Accepted will change.
> > >
> > > > + irq_set_chip_data(irq, domain->host_data);
> > > > + irq_set_status_flags(irq, IRQ_LEVEL);
> > >
> > > The way INTX are handled in this patch is wrong. You must set-up a
> > > chained IRQ with the appropriate flow handler, current code uses an
> > > IRQ action and that's an IRQ layer violation and it goes without saying that it
> is almost certainly broken.
> > In our controller we use same irq line for controller errors and
> > legacy errors. we have two cases here where error interrupts are
> > self-consumed by controller, and legacy interrupts are flow handled.
> > Its not INTX handling alone for this IRQ line . So chained IRQ can be
> > used for self consumed interrupts too ?
>
> No. In this specific case both solutions are not satisfying, we need to give it
> some thought, I will talk to Marc (CC'ed) to find the best option here going
> forward.
>
Ok, will wait for Marc to provide inputs.
Regards,
Bharat
On Fri, Feb 28, 2020 at 12:48:48PM +0000, Bharat Kumar Gogada wrote:
> > Subject: Re: [PATCH v5 2/2] PCI: xilinx-cpm: Add Versal CPM Root Port driver
> >
> > [+MarcZ, FHI]
> >
> > On Tue, Feb 25, 2020 at 02:39:56PM +0000, Bharat Kumar Gogada wrote:
> >
> > [...]
> >
> > > > > +/* ECAM definitions */
> > > > > +#define ECAM_BUS_NUM_SHIFT 20
> > > > > +#define ECAM_DEV_NUM_SHIFT 12
> > > >
> > > > You don't need these ECAM_* defines, you can use pci_generic_ecam_ops.
> > > Does this need separate ranges region for ECAM space ?
> > > We have ECAM and controller space in same region.
> >
> > You can create an ECAM window with pci_ecam_create where *cfgres
> > represent the ECAM area, I don't get what you mean by "same region".
> >
> > Do you mean "contiguous" ? Or something else ?
> Yes, contiguous; within ECAM region some space is for controller registers.
What does that mean ? I don't get it. Can you explain to me how this
address space works please ?
Thanks,
Lorenzo
> From: Lorenzo Pieralisi <[email protected]>
> Sent: Friday, March 6, 2020 4:46 PM
> To: Bharat Kumar Gogada <[email protected]>
> Cc: [email protected]; [email protected];
> [email protected]; Ravikiran Gummaluri <[email protected]>;
> [email protected]
> Subject: Re: [PATCH v5 2/2] PCI: xilinx-cpm: Add Versal CPM Root Port driver
>
> On Fri, Feb 28, 2020 at 12:48:48PM +0000, Bharat Kumar Gogada wrote:
> > > Subject: Re: [PATCH v5 2/2] PCI: xilinx-cpm: Add Versal CPM Root
> > > Port driver
> > >
> > > [+MarcZ, FHI]
> > >
> > > On Tue, Feb 25, 2020 at 02:39:56PM +0000, Bharat Kumar Gogada wrote:
> > >
> > > [...]
> > >
> > > > > > +/* ECAM definitions */
> > > > > > +#define ECAM_BUS_NUM_SHIFT 20
> > > > > > +#define ECAM_DEV_NUM_SHIFT 12
> > > > >
> > > > > You don't need these ECAM_* defines, you can use
> pci_generic_ecam_ops.
> > > > Does this need separate ranges region for ECAM space ?
> > > > We have ECAM and controller space in same region.
> > >
> > > You can create an ECAM window with pci_ecam_create where *cfgres
> > > represent the ECAM area, I don't get what you mean by "same region".
> > >
> > > Do you mean "contiguous" ? Or something else ?
> > Yes, contiguous; within ECAM region some space is for controller registers.
>
> What does that mean ? I don't get it. Can you explain to me how this address
> space works please ?
>
Hi Lorenzo,
reg = <0x6 0x00000000 0x0 0x1000000>,
<0x0 0xFCA10000 0x0 0x1000>;
reg-names = "cfg", "cpm_slcr";
In the above cfg region some region of it reserved for bridge registers and rest for ECAM
address space transactions. The bridge registers are mapped at an unused offset in config space
of root port, when the offset hit it will access controller register space.
This region is already being mapped
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
port->reg_base = devm_ioremap_resource(dev, res);
Does pci_ecam_create will work along with above API simultaneously ?
Regards,
Bharat
On Fri, Mar 06, 2020 at 11:45:47AM +0000, Bharat Kumar Gogada wrote:
> > From: Lorenzo Pieralisi <[email protected]>
> > Sent: Friday, March 6, 2020 4:46 PM
> > To: Bharat Kumar Gogada <[email protected]>
> > Cc: [email protected]; [email protected];
> > [email protected]; Ravikiran Gummaluri <[email protected]>;
> > [email protected]
> > Subject: Re: [PATCH v5 2/2] PCI: xilinx-cpm: Add Versal CPM Root Port driver
> >
> > On Fri, Feb 28, 2020 at 12:48:48PM +0000, Bharat Kumar Gogada wrote:
> > > > Subject: Re: [PATCH v5 2/2] PCI: xilinx-cpm: Add Versal CPM Root
> > > > Port driver
> > > >
> > > > [+MarcZ, FHI]
> > > >
> > > > On Tue, Feb 25, 2020 at 02:39:56PM +0000, Bharat Kumar Gogada wrote:
> > > >
> > > > [...]
> > > >
> > > > > > > +/* ECAM definitions */
> > > > > > > +#define ECAM_BUS_NUM_SHIFT 20
> > > > > > > +#define ECAM_DEV_NUM_SHIFT 12
> > > > > >
> > > > > > You don't need these ECAM_* defines, you can use
> > pci_generic_ecam_ops.
> > > > > Does this need separate ranges region for ECAM space ?
> > > > > We have ECAM and controller space in same region.
> > > >
> > > > You can create an ECAM window with pci_ecam_create where *cfgres
> > > > represent the ECAM area, I don't get what you mean by "same region".
> > > >
> > > > Do you mean "contiguous" ? Or something else ?
> > > Yes, contiguous; within ECAM region some space is for controller registers.
> >
> > What does that mean ? I don't get it. Can you explain to me how this address
> > space works please ?
> >
> Hi Lorenzo,
> reg = <0x6 0x00000000 0x0 0x1000000>,
This supports up to 16 busses (it is 16MB in size rather than
full ECAM 256MB), right ? Please make sure that the bus-range
property reflects that.
> <0x0 0xFCA10000 0x0 0x1000>;
> reg-names = "cfg", "cpm_slcr";
>
> In the above cfg region some region of it reserved for bridge registers and rest for ECAM
> address space transactions. The bridge registers are mapped at an unused offset in config space
> of root port, when the offset hit it will access controller register space.
>
> This region is already being mapped
> res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
> port->reg_base = devm_ioremap_resource(dev, res);
>
> Does pci_ecam_create will work along with above API simultaneously ?
Basically the bridge registers are accessible through the PCI
config accessors (after enumeration), since they are in the
bridge device specific config space (device specific area).
IIUC the answer is yes and you can access the bridge registers through
PCI config space accessors (after enumeration).
Pre-enumeration you can map (and unmap) the region as you are doing now
(+ the unmap) - since you need a pci_bus structure for PCI config
accessors to work and you don't have it till the enumeration is
actually executed.
Lorenzo
> Subject: Re: [PATCH v5 2/2] PCI: xilinx-cpm: Add Versal CPM Root Port driver
>
> On Fri, Mar 06, 2020 at 11:45:47AM +0000, Bharat Kumar Gogada wrote:
> > > From: Lorenzo Pieralisi <[email protected]>
> > > Sent: Friday, March 6, 2020 4:46 PM
> > > To: Bharat Kumar Gogada <[email protected]>
> > > Cc: [email protected]; [email protected];
> > > [email protected]; Ravikiran Gummaluri <[email protected]>;
> > > [email protected]
> > > Subject: Re: [PATCH v5 2/2] PCI: xilinx-cpm: Add Versal CPM Root
> > > Port driver
> > >
> > > On Fri, Feb 28, 2020 at 12:48:48PM +0000, Bharat Kumar Gogada wrote:
> > > > > Subject: Re: [PATCH v5 2/2] PCI: xilinx-cpm: Add Versal CPM Root
> > > > > Port driver
> > > > >
> > > > > [+MarcZ, FHI]
> > > > >
> > > > > On Tue, Feb 25, 2020 at 02:39:56PM +0000, Bharat Kumar Gogada
> wrote:
> > > > >
> > > > > [...]
> > > > >
> > > > > > > > +/* ECAM definitions */
> > > > > > > > +#define ECAM_BUS_NUM_SHIFT 20
> > > > > > > > +#define ECAM_DEV_NUM_SHIFT 12
> > > > > > >
> > > > > > > You don't need these ECAM_* defines, you can use
> > > pci_generic_ecam_ops.
> > > > > > Does this need separate ranges region for ECAM space ?
> > > > > > We have ECAM and controller space in same region.
> > > > >
> > > > > You can create an ECAM window with pci_ecam_create where *cfgres
> > > > > represent the ECAM area, I don't get what you mean by "same region".
> > > > >
> > > > > Do you mean "contiguous" ? Or something else ?
> > > > Yes, contiguous; within ECAM region some space is for controller registers.
> > >
> > > What does that mean ? I don't get it. Can you explain to me how this
> > > address space works please ?
> > >
> > Hi Lorenzo,
> > reg = <0x6 0x00000000 0x0 0x1000000>,
>
> This supports up to 16 busses (it is 16MB in size rather than full ECAM 256MB),
> right ? Please make sure that the bus-range property reflects that.
>
> > <0x0 0xFCA10000 0x0 0x1000>;
> > reg-names = "cfg", "cpm_slcr";
> >
> > In the above cfg region some region of it reserved for bridge
> > registers and rest for ECAM address space transactions. The bridge
> > registers are mapped at an unused offset in config space of root port, when
> the offset hit it will access controller register space.
> >
> > This region is already being mapped
> > res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
> > port->reg_base = devm_ioremap_resource(dev, res);
> >
> > Does pci_ecam_create will work along with above API simultaneously ?
>
> Basically the bridge registers are accessible through the PCI config accessors
> (after enumeration), since they are in the bridge device specific config space
> (device specific area).
>
> IIUC the answer is yes and you can access the bridge registers through PCI
> config space accessors (after enumeration).
>
Hi Lorenzo,
The bridge register access are not using config space accessors, we have local pcie_read/write
api for accessing bridge registers.
The hardware logic have details of offsets to which it will not send config accesses it will do local
AXI read and write accesses when the bridge registers are accessed.
Regards,
Bharat
> Subject: Re: [PATCH v5 2/2] PCI: xilinx-cpm: Add Versal CPM Root Port driver
>
> [+MarcZ, FHI]
>
> On Tue, Feb 25, 2020 at 02:39:56PM +0000, Bharat Kumar Gogada wrote:
>
> [...]
>
> > > > +/* ECAM definitions */
> > > > +#define ECAM_BUS_NUM_SHIFT 20
> > > > +#define ECAM_DEV_NUM_SHIFT 12
> > >
> > > You don't need these ECAM_* defines, you can use pci_generic_ecam_ops.
> > Does this need separate ranges region for ECAM space ?
> > We have ECAM and controller space in same region.
>
> You can create an ECAM window with pci_ecam_create where *cfgres
> represent the ECAM area, I don't get what you mean by "same region".
>
> Do you mean "contiguous" ? Or something else ?
>
> > > > +
> > > > +/**
> > > > + * struct xilinx_cpm_pcie_port - PCIe port information
> > > > + * @reg_base: Bridge Register Base
> > > > + * @cpm_base: CPM System Level Control and Status Register(SLCR)
> > > > +Base
> > > > + * @irq: Interrupt number
> > > > + * @root_busno: Root Bus number
> > > > + * @dev: Device pointer
> > > > + * @leg_domain: Legacy IRQ domain pointer
> > > > + * @irq_misc: Legacy and error interrupt number */ struct
> > > > +xilinx_cpm_pcie_port {
> > > > + void __iomem *reg_base;
> > > > + void __iomem *cpm_base;
> > > > + u32 irq;
> > > > + u8 root_busno;
> > > > + struct device *dev;
> > > > + struct irq_domain *leg_domain;
> > > > + int irq_misc;
> > > > +};
> > > > +
> > > > +static inline u32 pcie_read(struct xilinx_cpm_pcie_port *port,
> > > > +u32
> > > > +reg) {
> > > > + return readl(port->reg_base + reg); }
> > > > +
> > > > +static inline void pcie_write(struct xilinx_cpm_pcie_port *port,
> > > > + u32 val, u32 reg)
> > > > +{
> > > > + writel(val, port->reg_base + reg); }
> > > > +
> > > > +static inline bool cpm_pcie_link_up(struct xilinx_cpm_pcie_port
> > > > +*port) {
> > > > + return (pcie_read(port, XILINX_CPM_PCIE_REG_PSCR) &
> > > > + XILINX_CPM_PCIE_REG_PSCR_LNKUP) ? 1 : 0;
> > >
> > > u32 val = pcie_read(port, XILINX_CPM_PCIE_REG_PSCR);
> > >
> > > return val & XILINX_CPM_PCIE_REG_PSCR_LNKUP;
> > >
> > > And this function call is not that informative anyway - it is used
> > > just to print a log whose usefulness is questionable.
> > We need this logging information customers are using this info in case
> > of link down failure.
>
> Out of curiosity, to do what ?
>
> [...]
>
> > > > +/**
> > > > + * xilinx_cpm_pcie_intx_map - Set the handler for the INTx and
> > > > +mark IRQ as valid
> > > > + * @domain: IRQ domain
> > > > + * @irq: Virtual IRQ number
> > > > + * @hwirq: HW interrupt number
> > > > + *
> > > > + * Return: Always returns 0.
> > > > + */
> > > > +static int xilinx_cpm_pcie_intx_map(struct irq_domain *domain,
> > > > + unsigned int irq, irq_hw_number_t hwirq) {
> > > > + irq_set_chip_and_handler(irq, &dummy_irq_chip,
> > > > +handle_simple_irq);
> > >
> > > INTX are level IRQs, the flow handler must be handle_level_irq.
> > Accepted will change.
> > >
> > > > + irq_set_chip_data(irq, domain->host_data);
> > > > + irq_set_status_flags(irq, IRQ_LEVEL);
> > >
> > > The way INTX are handled in this patch is wrong. You must set-up a
> > > chained IRQ with the appropriate flow handler, current code uses an
> > > IRQ action and that's an IRQ layer violation and it goes without saying that it
> is almost certainly broken.
> > In our controller we use same irq line for controller errors and
> > legacy errors. we have two cases here where error interrupts are
> > self-consumed by controller, and legacy interrupts are flow handled.
> > Its not INTX handling alone for this IRQ line . So chained IRQ can be
> > used for self consumed interrupts too ?
>
> No. In this specific case both solutions are not satisfying, we need to give it
> some thought, I will talk to Marc (CC'ed) to find the best option here going
> forward.
>
Hi Marc,
Can you please provide yours inputs for this case.
Regards,
Bharat
> Subject: RE: [PATCH v5 2/2] PCI: xilinx-cpm: Add Versal CPM Root Port driver
>
> > Subject: Re: [PATCH v5 2/2] PCI: xilinx-cpm: Add Versal CPM Root Port
> > driver
> >
> > [+MarcZ, FHI]
> >
> > On Tue, Feb 25, 2020 at 02:39:56PM +0000, Bharat Kumar Gogada wrote:
> >
> > [...]
> >
> > > > > +/* ECAM definitions */
> > > > > +#define ECAM_BUS_NUM_SHIFT 20
> > > > > +#define ECAM_DEV_NUM_SHIFT 12
> > > >
> > > > You don't need these ECAM_* defines, you can use pci_generic_ecam_ops.
> > > Does this need separate ranges region for ECAM space ?
> > > We have ECAM and controller space in same region.
> >
> > You can create an ECAM window with pci_ecam_create where *cfgres
> > represent the ECAM area, I don't get what you mean by "same region".
> >
> > Do you mean "contiguous" ? Or something else ?
> >
> > > > > +
> > > > > +/**
> > > > > + * struct xilinx_cpm_pcie_port - PCIe port information
> > > > > + * @reg_base: Bridge Register Base
> > > > > + * @cpm_base: CPM System Level Control and Status
> > > > > +Register(SLCR) Base
> > > > > + * @irq: Interrupt number
> > > > > + * @root_busno: Root Bus number
> > > > > + * @dev: Device pointer
> > > > > + * @leg_domain: Legacy IRQ domain pointer
> > > > > + * @irq_misc: Legacy and error interrupt number */ struct
> > > > > +xilinx_cpm_pcie_port {
> > > > > + void __iomem *reg_base;
> > > > > + void __iomem *cpm_base;
> > > > > + u32 irq;
> > > > > + u8 root_busno;
> > > > > + struct device *dev;
> > > > > + struct irq_domain *leg_domain;
> > > > > + int irq_misc;
> > > > > +};
> > > > > +
> > > > > +static inline u32 pcie_read(struct xilinx_cpm_pcie_port *port,
> > > > > +u32
> > > > > +reg) {
> > > > > + return readl(port->reg_base + reg); }
> > > > > +
> > > > > +static inline void pcie_write(struct xilinx_cpm_pcie_port *port,
> > > > > + u32 val, u32 reg)
> > > > > +{
> > > > > + writel(val, port->reg_base + reg); }
> > > > > +
> > > > > +static inline bool cpm_pcie_link_up(struct xilinx_cpm_pcie_port
> > > > > +*port) {
> > > > > + return (pcie_read(port, XILINX_CPM_PCIE_REG_PSCR) &
> > > > > + XILINX_CPM_PCIE_REG_PSCR_LNKUP) ? 1 : 0;
> > > >
> > > > u32 val = pcie_read(port, XILINX_CPM_PCIE_REG_PSCR);
> > > >
> > > > return val & XILINX_CPM_PCIE_REG_PSCR_LNKUP;
> > > >
> > > > And this function call is not that informative anyway - it is used
> > > > just to print a log whose usefulness is questionable.
> > > We need this logging information customers are using this info in
> > > case of link down failure.
> >
> > Out of curiosity, to do what ?
> >
> > [...]
> >
> > > > > +/**
> > > > > + * xilinx_cpm_pcie_intx_map - Set the handler for the INTx and
> > > > > +mark IRQ as valid
> > > > > + * @domain: IRQ domain
> > > > > + * @irq: Virtual IRQ number
> > > > > + * @hwirq: HW interrupt number
> > > > > + *
> > > > > + * Return: Always returns 0.
> > > > > + */
> > > > > +static int xilinx_cpm_pcie_intx_map(struct irq_domain *domain,
> > > > > + unsigned int irq, irq_hw_number_t
> hwirq) {
> > > > > + irq_set_chip_and_handler(irq, &dummy_irq_chip,
> > > > > +handle_simple_irq);
> > > >
> > > > INTX are level IRQs, the flow handler must be handle_level_irq.
> > > Accepted will change.
> > > >
> > > > > + irq_set_chip_data(irq, domain->host_data);
> > > > > + irq_set_status_flags(irq, IRQ_LEVEL);
> > > >
> > > > The way INTX are handled in this patch is wrong. You must set-up a
> > > > chained IRQ with the appropriate flow handler, current code uses
> > > > an IRQ action and that's an IRQ layer violation and it goes
> > > > without saying that it
> > is almost certainly broken.
> > > In our controller we use same irq line for controller errors and
> > > legacy errors. we have two cases here where error interrupts are
> > > self-consumed by controller, and legacy interrupts are flow handled.
> > > Its not INTX handling alone for this IRQ line . So chained IRQ can
> > > be used for self consumed interrupts too ?
> >
> > No. In this specific case both solutions are not satisfying, we need
> > to give it some thought, I will talk to Marc (CC'ed) to find the best
> > option here going forward.
> >
> Hi Marc,
>
> Can you please provide yours inputs for this case.
>
Hi Marc,
Can you please provide required inputs on this.
Regards,
Bharat
> Subject: RE: [PATCH v5 2/2] PCI: xilinx-cpm: Add Versal CPM Root Port driver
>
> > Subject: RE: [PATCH v5 2/2] PCI: xilinx-cpm: Add Versal CPM Root Port
> > driver
> >
> > > Subject: Re: [PATCH v5 2/2] PCI: xilinx-cpm: Add Versal CPM Root
> > > Port driver
> > >
> > > [+MarcZ, FHI]
> > >
> > > On Tue, Feb 25, 2020 at 02:39:56PM +0000, Bharat Kumar Gogada wrote:
> > >
> > > [...]
> > >
> > > > > > +/* ECAM definitions */
> > > > > > +#define ECAM_BUS_NUM_SHIFT 20
> > > > > > +#define ECAM_DEV_NUM_SHIFT 12
> > > > >
> > > > > You don't need these ECAM_* defines, you can use
> pci_generic_ecam_ops.
> > > > Does this need separate ranges region for ECAM space ?
> > > > We have ECAM and controller space in same region.
> > >
> > > You can create an ECAM window with pci_ecam_create where *cfgres
> > > represent the ECAM area, I don't get what you mean by "same region".
> > >
> > > Do you mean "contiguous" ? Or something else ?
> > >
> > > > > > +
> > > > > > +/**
> > > > > > + * struct xilinx_cpm_pcie_port - PCIe port information
> > > > > > + * @reg_base: Bridge Register Base
> > > > > > + * @cpm_base: CPM System Level Control and Status
> > > > > > +Register(SLCR) Base
> > > > > > + * @irq: Interrupt number
> > > > > > + * @root_busno: Root Bus number
> > > > > > + * @dev: Device pointer
> > > > > > + * @leg_domain: Legacy IRQ domain pointer
> > > > > > + * @irq_misc: Legacy and error interrupt number */ struct
> > > > > > +xilinx_cpm_pcie_port {
> > > > > > + void __iomem *reg_base;
> > > > > > + void __iomem *cpm_base;
> > > > > > + u32 irq;
> > > > > > + u8 root_busno;
> > > > > > + struct device *dev;
> > > > > > + struct irq_domain *leg_domain;
> > > > > > + int irq_misc;
> > > > > > +};
> > > > > > +
> > > > > > +static inline u32 pcie_read(struct xilinx_cpm_pcie_port
> > > > > > +*port,
> > > > > > +u32
> > > > > > +reg) {
> > > > > > + return readl(port->reg_base + reg); }
> > > > > > +
> > > > > > +static inline void pcie_write(struct xilinx_cpm_pcie_port *port,
> > > > > > + u32 val, u32 reg)
> > > > > > +{
> > > > > > + writel(val, port->reg_base + reg); }
> > > > > > +
> > > > > > +static inline bool cpm_pcie_link_up(struct
> > > > > > +xilinx_cpm_pcie_port
> > > > > > +*port) {
> > > > > > + return (pcie_read(port, XILINX_CPM_PCIE_REG_PSCR) &
> > > > > > + XILINX_CPM_PCIE_REG_PSCR_LNKUP) ? 1 : 0;
> > > > >
> > > > > u32 val = pcie_read(port, XILINX_CPM_PCIE_REG_PSCR);
> > > > >
> > > > > return val & XILINX_CPM_PCIE_REG_PSCR_LNKUP;
> > > > >
> > > > > And this function call is not that informative anyway - it is
> > > > > used just to print a log whose usefulness is questionable.
> > > > We need this logging information customers are using this info in
> > > > case of link down failure.
> > >
> > > Out of curiosity, to do what ?
> > >
> > > [...]
> > >
> > > > > > +/**
> > > > > > + * xilinx_cpm_pcie_intx_map - Set the handler for the INTx
> > > > > > +and mark IRQ as valid
> > > > > > + * @domain: IRQ domain
> > > > > > + * @irq: Virtual IRQ number
> > > > > > + * @hwirq: HW interrupt number
> > > > > > + *
> > > > > > + * Return: Always returns 0.
> > > > > > + */
> > > > > > +static int xilinx_cpm_pcie_intx_map(struct irq_domain *domain,
> > > > > > + unsigned int irq, irq_hw_number_t
> > hwirq) {
> > > > > > + irq_set_chip_and_handler(irq, &dummy_irq_chip,
> > > > > > +handle_simple_irq);
> > > > >
> > > > > INTX are level IRQs, the flow handler must be handle_level_irq.
> > > > Accepted will change.
> > > > >
> > > > > > + irq_set_chip_data(irq, domain->host_data);
> > > > > > + irq_set_status_flags(irq, IRQ_LEVEL);
> > > > >
> > > > > The way INTX are handled in this patch is wrong. You must set-up
> > > > > a chained IRQ with the appropriate flow handler, current code
> > > > > uses an IRQ action and that's an IRQ layer violation and it goes
> > > > > without saying that it
> > > is almost certainly broken.
> > > > In our controller we use same irq line for controller errors and
> > > > legacy errors. we have two cases here where error interrupts are
> > > > self-consumed by controller, and legacy interrupts are flow handled.
> > > > Its not INTX handling alone for this IRQ line . So chained IRQ
> > > > can be used for self consumed interrupts too ?
> > >
> > > No. In this specific case both solutions are not satisfying, we need
> > > to give it some thought, I will talk to Marc (CC'ed) to find the
> > > best option here going forward.
> > >
> > Hi Marc,
> >
> > Can you please provide yours inputs for this case.
> >
> Hi Marc,
>
> Can you please provide required inputs on this.
>
HI Lorenzo,
Since Marc hasn't responded, do you have any inputs on this ?
Shall I proceed with other comments of yours ?
Regards,
Bharat
On Thu, Apr 16, 2020 at 07:07:28AM +0000, Bharat Kumar Gogada wrote:
> > Subject: RE: [PATCH v5 2/2] PCI: xilinx-cpm: Add Versal CPM Root Port driver
> >
> > > Subject: RE: [PATCH v5 2/2] PCI: xilinx-cpm: Add Versal CPM Root Port
> > > driver
> > >
> > > > Subject: Re: [PATCH v5 2/2] PCI: xilinx-cpm: Add Versal CPM Root
> > > > Port driver
> > > >
> > > > [+MarcZ, FHI]
> > > >
> > > > On Tue, Feb 25, 2020 at 02:39:56PM +0000, Bharat Kumar Gogada wrote:
> > > >
> > > > [...]
> > > >
> > > > > > > +/* ECAM definitions */
> > > > > > > +#define ECAM_BUS_NUM_SHIFT 20
> > > > > > > +#define ECAM_DEV_NUM_SHIFT 12
> > > > > >
> > > > > > You don't need these ECAM_* defines, you can use
> > pci_generic_ecam_ops.
> > > > > Does this need separate ranges region for ECAM space ?
> > > > > We have ECAM and controller space in same region.
> > > >
> > > > You can create an ECAM window with pci_ecam_create where *cfgres
> > > > represent the ECAM area, I don't get what you mean by "same region".
> > > >
> > > > Do you mean "contiguous" ? Or something else ?
> > > >
> > > > > > > +
> > > > > > > +/**
> > > > > > > + * struct xilinx_cpm_pcie_port - PCIe port information
> > > > > > > + * @reg_base: Bridge Register Base
> > > > > > > + * @cpm_base: CPM System Level Control and Status
> > > > > > > +Register(SLCR) Base
> > > > > > > + * @irq: Interrupt number
> > > > > > > + * @root_busno: Root Bus number
> > > > > > > + * @dev: Device pointer
> > > > > > > + * @leg_domain: Legacy IRQ domain pointer
> > > > > > > + * @irq_misc: Legacy and error interrupt number */ struct
> > > > > > > +xilinx_cpm_pcie_port {
> > > > > > > + void __iomem *reg_base;
> > > > > > > + void __iomem *cpm_base;
> > > > > > > + u32 irq;
> > > > > > > + u8 root_busno;
> > > > > > > + struct device *dev;
> > > > > > > + struct irq_domain *leg_domain;
> > > > > > > + int irq_misc;
> > > > > > > +};
> > > > > > > +
> > > > > > > +static inline u32 pcie_read(struct xilinx_cpm_pcie_port
> > > > > > > +*port,
> > > > > > > +u32
> > > > > > > +reg) {
> > > > > > > + return readl(port->reg_base + reg); }
> > > > > > > +
> > > > > > > +static inline void pcie_write(struct xilinx_cpm_pcie_port *port,
> > > > > > > + u32 val, u32 reg)
> > > > > > > +{
> > > > > > > + writel(val, port->reg_base + reg); }
> > > > > > > +
> > > > > > > +static inline bool cpm_pcie_link_up(struct
> > > > > > > +xilinx_cpm_pcie_port
> > > > > > > +*port) {
> > > > > > > + return (pcie_read(port, XILINX_CPM_PCIE_REG_PSCR) &
> > > > > > > + XILINX_CPM_PCIE_REG_PSCR_LNKUP) ? 1 : 0;
> > > > > >
> > > > > > u32 val = pcie_read(port, XILINX_CPM_PCIE_REG_PSCR);
> > > > > >
> > > > > > return val & XILINX_CPM_PCIE_REG_PSCR_LNKUP;
> > > > > >
> > > > > > And this function call is not that informative anyway - it is
> > > > > > used just to print a log whose usefulness is questionable.
> > > > > We need this logging information customers are using this info in
> > > > > case of link down failure.
> > > >
> > > > Out of curiosity, to do what ?
> > > >
> > > > [...]
> > > >
> > > > > > > +/**
> > > > > > > + * xilinx_cpm_pcie_intx_map - Set the handler for the INTx
> > > > > > > +and mark IRQ as valid
> > > > > > > + * @domain: IRQ domain
> > > > > > > + * @irq: Virtual IRQ number
> > > > > > > + * @hwirq: HW interrupt number
> > > > > > > + *
> > > > > > > + * Return: Always returns 0.
> > > > > > > + */
> > > > > > > +static int xilinx_cpm_pcie_intx_map(struct irq_domain *domain,
> > > > > > > + unsigned int irq, irq_hw_number_t
> > > hwirq) {
> > > > > > > + irq_set_chip_and_handler(irq, &dummy_irq_chip,
> > > > > > > +handle_simple_irq);
> > > > > >
> > > > > > INTX are level IRQs, the flow handler must be handle_level_irq.
> > > > > Accepted will change.
> > > > > >
> > > > > > > + irq_set_chip_data(irq, domain->host_data);
> > > > > > > + irq_set_status_flags(irq, IRQ_LEVEL);
> > > > > >
> > > > > > The way INTX are handled in this patch is wrong. You must set-up
> > > > > > a chained IRQ with the appropriate flow handler, current code
> > > > > > uses an IRQ action and that's an IRQ layer violation and it goes
> > > > > > without saying that it
> > > > is almost certainly broken.
> > > > > In our controller we use same irq line for controller errors and
> > > > > legacy errors. we have two cases here where error interrupts are
> > > > > self-consumed by controller, and legacy interrupts are flow handled.
> > > > > Its not INTX handling alone for this IRQ line . So chained IRQ
> > > > > can be used for self consumed interrupts too ?
> > > >
> > > > No. In this specific case both solutions are not satisfying, we need
> > > > to give it some thought, I will talk to Marc (CC'ed) to find the
> > > > best option here going forward.
> > > >
> > > Hi Marc,
> > >
> > > Can you please provide yours inputs for this case.
> > >
> > Hi Marc,
> >
> > Can you please provide required inputs on this.
> >
> HI Lorenzo,
>
> Since Marc hasn't responded, do you have any inputs on this ?
> Shall I proceed with other comments of yours ?
Yes please update the patch with my comments - for the irqchip we need
to decide how to proceed further but for the sake of making progress
please update the code and repost - no need to update the IRQ handling
code, we will decide what to do when everything else is in order.
Thanks,
Lorenzo