ZynqMP devices have PCIe Bridge along with DMA in PS.
These devices can be configured as either PCIe Endpoints
or as PCIe Root Complex.
This patch series shall provide a driver to initiate
transactions using this DMA.
Single platform driver shall handle both EndPoint and
Root DMA transfers.
Patch 1 enables Root DMA register translation and
interrupts
Patch 2 corrects styling errors seen with checkpatch
Patch 3 adds PCIe driver for end points
Patch 4 adds Platform driver which handles DMA transactions
for both PCI end points and Root DMA
Patch 5 describes device tree bindings for Root DMA
Ravi Shankar Jonnalagadda (5):
PCI:xilinx-nwl: Enable Root DMA
PCI:xilinx-nwl: Correcting Styling checks
dmaengine: zynqmp_ps_pcie: Adding PS PCIe DMA driver
dmaengine: zynqmp_ps_pcie: Adding PS PCIe platform DMA driver
devicetree: zynqmp_ps_pcie: Devicetree binding for Root DMA
.../devicetree/bindings/dma/xilinx/ps-pcie-dma.txt | 67 +
drivers/dma/Kconfig | 12 +
drivers/dma/xilinx/Makefile | 2 +
drivers/dma/xilinx/ps_pcie.h | 44 +
drivers/dma/xilinx/ps_pcie_main.c | 200 ++
drivers/dma/xilinx/ps_pcie_platform.c | 3055 ++++++++++++++++++++
drivers/pci/host/pcie-xilinx-nwl.c | 23 +-
include/linux/dma/ps_pcie_dma.h | 69 +
8 files changed, 3468 insertions(+), 4 deletions(-)
create mode 100644 Documentation/devicetree/bindings/dma/xilinx/ps-pcie-dma.txt
create mode 100644 drivers/dma/xilinx/ps_pcie.h
create mode 100644 drivers/dma/xilinx/ps_pcie_main.c
create mode 100644 drivers/dma/xilinx/ps_pcie_platform.c
create mode 100644 include/linux/dma/ps_pcie_dma.h
--
2.7.4
Correcting Style checks thrown by checkpatch scripts
Signed-off-by: Ravi Shankar Jonnalagadda <[email protected]>
Signed-off-by: RaviKiran Gummaluri <[email protected]>
---
drivers/pci/host/pcie-xilinx-nwl.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c
index 5766582..3c62e3d 100644
--- a/drivers/pci/host/pcie-xilinx-nwl.c
+++ b/drivers/pci/host/pcie-xilinx-nwl.c
@@ -506,15 +506,15 @@ static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
for (i = 0; i < nr_irqs; i++) {
irq_domain_set_info(domain, virq + i, bit + i, &nwl_irq_chip,
- domain->host_data, handle_simple_irq,
- NULL, NULL);
+ domain->host_data, handle_simple_irq,
+ NULL, NULL);
}
mutex_unlock(&msi->lock);
return 0;
}
static void nwl_irq_domain_free(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs)
+ unsigned int nr_irqs)
{
struct irq_data *data = irq_domain_get_irq_data(domain, virq);
struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
@@ -767,7 +767,6 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
/* Enable all misc interrupts */
nwl_bridge_writel(pcie, MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK);
-
/* Disable all legacy interrupts */
nwl_bridge_writel(pcie, (u32)~MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK);
@@ -932,4 +931,5 @@ static struct platform_driver nwl_pcie_driver = {
},
.probe = nwl_pcie_probe,
};
+
builtin_platform_driver(nwl_pcie_driver);
--
2.7.4
Adding support for ZynqmMP PS PCIe EP driver.
Adding support for ZynqmMP PS PCIe Root DMA driver.
Modifying Kconfig and Makefile to add the support.
Signed-off-by: Ravi Shankar Jonnalagadda <[email protected]>
Signed-off-by: RaviKiran Gummaluri <[email protected]>
---
drivers/dma/Kconfig | 12 +++
drivers/dma/xilinx/Makefile | 2 +
drivers/dma/xilinx/ps_pcie.h | 44 +++++++++
drivers/dma/xilinx/ps_pcie_main.c | 200 ++++++++++++++++++++++++++++++++++++++
include/linux/dma/ps_pcie_dma.h | 69 +++++++++++++
5 files changed, 327 insertions(+)
create mode 100644 drivers/dma/xilinx/ps_pcie.h
create mode 100644 drivers/dma/xilinx/ps_pcie_main.c
create mode 100644 include/linux/dma/ps_pcie_dma.h
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index fa8f9c0..e2fe4e5 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -586,6 +586,18 @@ config XILINX_ZYNQMP_DMA
help
Enable support for Xilinx ZynqMP DMA controller.
+config XILINX_PS_PCIE_DMA
+ tristate "Xilinx PS PCIe DMA support"
+ depends on (PCI && X86_64 || ARM64)
+ select DMA_ENGINE
+ help
+ Enable support for the Xilinx PS PCIe DMA engine present
+ in recent Xilinx ZynqMP chipsets.
+
+ Say Y here if you have such a chipset.
+
+ If unsure, say N.
+
config ZX_DMA
tristate "ZTE ZX DMA support"
depends on ARCH_ZX || COMPILE_TEST
diff --git a/drivers/dma/xilinx/Makefile b/drivers/dma/xilinx/Makefile
index 9e91f8f..04f6f99 100644
--- a/drivers/dma/xilinx/Makefile
+++ b/drivers/dma/xilinx/Makefile
@@ -1,2 +1,4 @@
obj-$(CONFIG_XILINX_DMA) += xilinx_dma.o
obj-$(CONFIG_XILINX_ZYNQMP_DMA) += zynqmp_dma.o
+ps_pcie_dma-objs := ps_pcie_main.o ps_pcie_platform.o
+obj-$(CONFIG_XILINX_PS_PCIE_DMA) += ps_pcie_dma.o
diff --git a/drivers/dma/xilinx/ps_pcie.h b/drivers/dma/xilinx/ps_pcie.h
new file mode 100644
index 0000000..351f051
--- /dev/null
+++ b/drivers/dma/xilinx/ps_pcie.h
@@ -0,0 +1,44 @@
+/*
+ * Xilinx PS PCIe DMA Engine platform header file
+ *
+ * Copyright (C) 2010-2017 Xilinx, Inc. All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation
+ */
+
+#ifndef __XILINX_PS_PCIE_H
+#define __XILINX_PS_PCIE_H
+
+#include <linux/delay.h>
+#include <linux/dma-direction.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/irqreturn.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mempool.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <linux/property.h>
+#include <linux/platform_device.h>
+#include <linux/timer.h>
+#include <linux/dma/ps_pcie_dma.h>
+
+/**
+ * dma_platform_driver_register - This will be invoked by module init
+ *
+ * Return: returns status of platform_driver_register
+ */
+int dma_platform_driver_register(void);
+/**
+ * dma_platform_driver_unregister - This will be invoked by module exit
+ *
+ * Return: returns void after unregustering platform driver
+ */
+void dma_platform_driver_unregister(void);
+
+#endif
diff --git a/drivers/dma/xilinx/ps_pcie_main.c b/drivers/dma/xilinx/ps_pcie_main.c
new file mode 100644
index 0000000..4ccd8ef
--- /dev/null
+++ b/drivers/dma/xilinx/ps_pcie_main.c
@@ -0,0 +1,200 @@
+/*
+ * XILINX PS PCIe driver
+ *
+ * Copyright (C) 2017 Xilinx, Inc. All rights reserved.
+ *
+ * Description
+ * PS PCIe DMA is memory mapped DMA used to execute PS to PL transfers
+ * on ZynqMP UltraScale+ Devices.
+ * This PCIe driver creates a platform device with specific platform
+ * info enabling creation of DMA device corresponding to the channel
+ * information provided in the properties
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation
+ */
+
+#include "ps_pcie.h"
+#include "../dmaengine.h"
+
+#define DRV_MODULE_NAME "ps_pcie_dma"
+
+static int ps_pcie_dma_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+static void ps_pcie_dma_remove(struct pci_dev *pdev);
+
+static u32 channel_properties_pcie_axi[] = {
+ (u32)(PCIE_AXI_DIRECTION), (u32)(NUMBER_OF_BUFFER_DESCRIPTORS),
+ (u32)(DEFAULT_DMA_QUEUES), (u32)(CHANNEL_COAELSE_COUNT),
+ (u32)(CHANNEL_POLL_TIMER_FREQUENCY) };
+
+static u32 channel_properties_axi_pcie[] = {
+ (u32)(AXI_PCIE_DIRECTION), (u32)(NUMBER_OF_BUFFER_DESCRIPTORS),
+ (u32)(DEFAULT_DMA_QUEUES), (u32)(CHANNEL_COAELSE_COUNT),
+ (u32)(CHANNEL_POLL_TIMER_FREQUENCY) };
+
+static struct property_entry generic_pcie_ep_property[] = {
+ PROPERTY_ENTRY_U32("numchannels", (u32)MAX_NUMBER_OF_CHANNELS),
+ PROPERTY_ENTRY_U32_ARRAY("ps_pcie_channel0",
+ channel_properties_pcie_axi),
+ PROPERTY_ENTRY_U32_ARRAY("ps_pcie_channel1",
+ channel_properties_axi_pcie),
+ PROPERTY_ENTRY_U32_ARRAY("ps_pcie_channel2",
+ channel_properties_pcie_axi),
+ PROPERTY_ENTRY_U32_ARRAY("ps_pcie_channel3",
+ channel_properties_axi_pcie),
+ { },
+};
+
+static const struct platform_device_info xlnx_std_platform_dev_info = {
+ .name = XLNX_PLATFORM_DRIVER_NAME,
+ .properties = generic_pcie_ep_property,
+};
+
+/**
+ * ps_pcie_dma_probe - Driver probe function
+ * @pdev: Pointer to the pci_dev structure
+ * @ent: pci device id
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int ps_pcie_dma_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int err;
+ struct platform_device *platform_dev;
+ struct platform_device_info platform_dev_info;
+
+ dev_info(&pdev->dev, "PS PCIe DMA Driver probe\n");
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
+ return err;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_info(&pdev->dev, "Cannot set 64 bit DMA mask\n");
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "DMA mask set error\n");
+ return err;
+ }
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_info(&pdev->dev, "Cannot set 64 bit consistent DMA mask\n");
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "Cannot set consistent DMA mask\n");
+ return err;
+ }
+ }
+
+ pci_set_master(pdev);
+
+ /* For Root DMA platform device will be created through device tree */
+ if (pdev->vendor == PCI_VENDOR_ID_XILINX &&
+ pdev->device == ZYNQMP_RC_DMA_DEVID)
+ return 0;
+
+ memcpy(&platform_dev_info, &xlnx_std_platform_dev_info,
+ sizeof(xlnx_std_platform_dev_info));
+
+ /* Do device specific channel configuration changes to
+ * platform_dev_info.properties if required
+ * More information on channel properties can be found
+ * at Documentation/devicetree/bindings/dma/xilinx/ps-pcie-dma.txt
+ */
+
+ platform_dev_info.parent = &pdev->dev;
+ platform_dev_info.data = &pdev;
+ platform_dev_info.size_data = sizeof(struct pci_dev **);
+
+ platform_dev = platform_device_register_full(&platform_dev_info);
+ if (IS_ERR(platform_dev)) {
+ dev_err(&pdev->dev,
+ "Cannot create platform device, aborting\n");
+ return PTR_ERR(platform_dev);
+ }
+
+ pci_set_drvdata(pdev, platform_dev);
+
+ dev_info(&pdev->dev, "PS PCIe DMA driver successfully probed\n");
+
+ return 0;
+}
+
+static struct pci_device_id ps_pcie_dma_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_XILINX, ZYNQMP_DMA_DEVID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_XILINX, ZYNQMP_RC_DMA_DEVID) },
+ { }
+};
+
+static struct pci_driver ps_pcie_dma_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = ps_pcie_dma_tbl,
+ .probe = ps_pcie_dma_probe,
+ .remove = ps_pcie_dma_remove,
+};
+
+/**
+ * ps_pcie_init - Driver init function
+ *
+ * Return: 0 on success. Non zero on failure
+ */
+static int __init ps_pcie_init(void)
+{
+ int ret;
+
+ pr_info("%s init()\n", DRV_MODULE_NAME);
+
+ ret = pci_register_driver(&ps_pcie_dma_driver);
+ if (ret)
+ return ret;
+
+ ret = dma_platform_driver_register();
+ if (ret)
+ pci_unregister_driver(&ps_pcie_dma_driver);
+
+ return ret;
+}
+
+/**
+ * ps_pcie_dma_remove - Driver remove function
+ * @pdev: Pointer to the pci_dev structure
+ *
+ * Return: void
+ */
+static void ps_pcie_dma_remove(struct pci_dev *pdev)
+{
+ struct platform_device *platform_dev;
+
+ platform_dev = (struct platform_device *)pci_get_drvdata(pdev);
+
+ if (platform_dev)
+ platform_device_unregister(platform_dev);
+}
+
+/**
+ * ps_pcie_exit - Driver exit function
+ *
+ * Return: void
+ */
+static void __exit ps_pcie_exit(void)
+{
+ pr_info("%s exit()\n", DRV_MODULE_NAME);
+
+ dma_platform_driver_unregister();
+ pci_unregister_driver(&ps_pcie_dma_driver);
+}
+
+module_init(ps_pcie_init);
+module_exit(ps_pcie_exit);
+
+MODULE_AUTHOR("Xilinx Inc");
+MODULE_DESCRIPTION("Xilinx PS PCIe DMA Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/include/linux/dma/ps_pcie_dma.h b/include/linux/dma/ps_pcie_dma.h
new file mode 100644
index 0000000..d11323a
--- /dev/null
+++ b/include/linux/dma/ps_pcie_dma.h
@@ -0,0 +1,69 @@
+/*
+ * Xilinx PS PCIe DMA Engine support header file
+ *
+ * Copyright (C) 2017 Xilinx, Inc. All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation
+ */
+
+#ifndef __DMA_XILINX_PS_PCIE_H
+#define __DMA_XILINX_PS_PCIE_H
+
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+
+#define XLNX_PLATFORM_DRIVER_NAME "xlnx-platform-dma-driver"
+
+#define ZYNQMP_DMA_DEVID (0xD024)
+#define ZYNQMP_RC_DMA_DEVID (0xD021)
+
+#define MAX_ALLOWED_CHANNELS_IN_HW 4
+
+#define MAX_NUMBER_OF_CHANNELS MAX_ALLOWED_CHANNELS_IN_HW
+
+#define DEFAULT_DMA_QUEUES 4
+#define TWO_DMA_QUEUES 2
+
+#define NUMBER_OF_BUFFER_DESCRIPTORS 1999
+#define MAX_DESCRIPTORS 65536
+
+#define CHANNEL_COAELSE_COUNT 0
+
+#define CHANNEL_POLL_TIMER_FREQUENCY 1000 /* in milli seconds */
+
+#define PCIE_AXI_DIRECTION DMA_TO_DEVICE
+#define AXI_PCIE_DIRECTION DMA_FROM_DEVICE
+
+/**
+ * struct BAR_PARAMS - PCIe Bar Parameters
+ * @BAR_PHYS_ADDR: PCIe BAR Physical address
+ * @BAR_LENGTH: Length of PCIe BAR
+ * @BAR_VIRT_ADDR: Virtual Address to access PCIe BAR
+ */
+struct BAR_PARAMS {
+ dma_addr_t BAR_PHYS_ADDR; /**< Base physical address of BAR memory */
+ unsigned long BAR_LENGTH; /**< Length of BAR memory window */
+ void *BAR_VIRT_ADDR; /**< Virtual Address of mapped BAR memory */
+};
+
+/**
+ * struct ps_pcie_dma_channel_match - Match structure for dma clients
+ * @pci_vendorid: PCIe Vendor id of PS PCIe DMA device
+ * @pci_deviceid: PCIe Device id of PS PCIe DMA device
+ * @board_number: Unique id to identify individual device in a system
+ * @channel_number: Unique channel number of the device
+ * @direction: DMA channel direction
+ * @bar_params: Pointer to BAR_PARAMS for accessing application specific data
+ */
+struct ps_pcie_dma_channel_match {
+ u16 pci_vendorid;
+ u16 pci_deviceid;
+ u16 board_number;
+ u16 channel_number;
+ enum dma_data_direction direction;
+ struct BAR_PARAMS *bar_params;
+};
+
+#endif
--
2.7.4
Binding explaining devicetree usage for enabling Root DMA capability
Signed-off-by: Ravi Shankar Jonnalagadda <[email protected]>
Signed-off-by: RaviKiran Gummaluri <[email protected]>
---
.../devicetree/bindings/dma/xilinx/ps-pcie-dma.txt | 67 ++++++++++++++++++++++
1 file changed, 67 insertions(+)
create mode 100644 Documentation/devicetree/bindings/dma/xilinx/ps-pcie-dma.txt
diff --git a/Documentation/devicetree/bindings/dma/xilinx/ps-pcie-dma.txt b/Documentation/devicetree/bindings/dma/xilinx/ps-pcie-dma.txt
new file mode 100644
index 0000000..1522a49
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/xilinx/ps-pcie-dma.txt
@@ -0,0 +1,67 @@
+* Xilinx PS PCIe Root DMA
+
+Required properties:
+- compatible: Should be "xlnx,ps_pcie_dma-1.00.a"
+- reg: Register offset for Root DMA channels
+- reg-names: Name for the register. Should be "xlnx,ps_pcie_regbase"
+- interrupts: Interrupt pin for Root DMA
+- interrupt-names: Name for the interrupt. Should be "xlnx,ps_pcie_rootdma_intr"
+- interrupt-parent: Should be gic in case of zynqmp
+- xlnx,rootdma: Indicates this platform device is root dma.
+ This is required as the same platform driver will be invoked by pcie end points too
+- xlnx,dma_vendorid: 16 bit PCIe device vendor id.
+ This can be later used by dma client for matching while using dma_request_channel
+- xlnx,dma_deviceid: 16 bit PCIe device id
+ This can be later used by dma client for matching while using dma_request_channel
+- xlnx,numchannels: Indicates number of channels to be enabled for the device.
+ Valid values are from 1 to 4 for zynqmp
+- xlnx,ps_pcie_channel : One for each channel to be enabled.
+ This array contains channel specific properties.
+ Index 0: Direction of channel
+ Direction of channel can be either PCIe Memory to AXI memory i.e., Host to Card or
+ AXI Memory to PCIe memory i.e., Card to Host
+ PCIe to AXI Channel Direction is represented as 0x1
+ AXI to PCIe Channel Direction is represented as 0x0
+ Index 1: Number of Buffer Descriptors
+ This number describes number of buffer descriptors to be allocated for a channel
+ Index 2: Number of Queues
+ Each Channel has four DMA Buffer Descriptor Queues.
+ By default All four Queues will be managed by Root DMA driver.
+ User may choose to have only two queues either Source and it's Status Queue or
+ Destination and it's Status Queue to be handled by Driver.
+ The other two queues need to be handled by user logic which will not be part of this driver.
+ All Queues on Host is represented by 0x4
+ Two Queues on Host is represented by 0x2
+ Index 3: Coaelse Count
+ This number indicates the number of transfers after which interrupt needs to
+ be raised for the particular channel. The allowed range is from 0 to 255
+ Index 4: Coaelse Count Timer frequency
+ This property is used to control the frequency of poll timer. Poll timer is
+ created for a channel whenever coalesce count value (>= 1) is programmed for the particular
+ channel. This timer is helpful in draining out completed transactions even though interrupt is
+ not generated.
+
+Client Usage:
+ DMA clients can request for these channels using dma_request_channel API
+
+
+Xilinx PS PCIe Root DMA node Example
+++++++++++++++++++++++++++++++++++++
+
+ pci_rootdma: rootdma@fd0f0000 {
+ compatible = "xlnx,ps_pcie_dma-1.00.a";
+ reg = <0x0 0xfd0f0000 0x0 0x1000>;
+ reg-names = "xlnx,ps_pcie_regbase";
+ interrupts = <0 117 4>;
+ interrupt-names = "xlnx,ps_pcie_rootdma_intr";
+ interrupt-parent = <&gic>;
+ xlnx,rootdma;
+ xlnx,dma_vendorid = /bits/ 16 <0x10EE>;
+ xlnx,dma_deviceid = /bits/ 16 <0xD021>;
+ xlnx,numchannels = <0x4>;
+ #size-cells = <0x5>;
+ xlnx,ps_pcie_channel0 = <0x1 0x7CF 0x4 0x0 0x3E8>;
+ xlnx,ps_pcie_channel1 = <0x0 0x7CF 0x4 0x0 0x3E8>;
+ xlnx,ps_pcie_channel2 = <0x1 0x7CF 0x4 0x0 0x3E8>;
+ xlnx,ps_pcie_channel3 = <0x0 0x7CF 0x4 0x0 0x3E8>;
+ };
--
2.7.4
Platform driver handles transactions for PCIe EP DMA and Root DMA
Signed-off-by: Ravi Shankar Jonnalagadda <[email protected]>
Signed-off-by: RaviKiran Gummaluri <[email protected]>
---
drivers/dma/xilinx/ps_pcie_platform.c | 3055 +++++++++++++++++++++++++++++++++
1 file changed, 3055 insertions(+)
create mode 100644 drivers/dma/xilinx/ps_pcie_platform.c
diff --git a/drivers/dma/xilinx/ps_pcie_platform.c b/drivers/dma/xilinx/ps_pcie_platform.c
new file mode 100644
index 0000000..79f324a
--- /dev/null
+++ b/drivers/dma/xilinx/ps_pcie_platform.c
@@ -0,0 +1,3055 @@
+/*
+ * XILINX PS PCIe DMA driver
+ *
+ * Copyright (C) 2017 Xilinx, Inc. All rights reserved.
+ *
+ * Description
+ * PS PCIe DMA is memory mapped DMA used to execute PS to PL transfers
+ * on ZynqMP UltraScale+ Devices
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation
+ */
+
+#include "ps_pcie.h"
+#include "../dmaengine.h"
+
+#define PLATFORM_DRIVER_NAME "ps_pcie_pform_dma"
+#define MAX_BARS 6
+
+#define DMA_BAR_NUMBER 0
+
+#define MIN_SW_INTR_TRANSACTIONS 2
+
+#define CHANNEL_PROPERTY_LENGTH 50
+#define WORKQ_NAME_SIZE 100
+#define INTR_HANDLR_NAME_SIZE 100
+
+#define PS_PCIE_DMA_IRQ_NOSHARE 0
+
+#define MAX_COALESCE_COUNT 255
+
+#define DMA_CHANNEL_REGS_SIZE 0x80
+
+#define DMA_SRCQPTRLO_REG_OFFSET (0x00) /* Source Q pointer Lo */
+#define DMA_SRCQPTRHI_REG_OFFSET (0x04) /* Source Q pointer Hi */
+#define DMA_SRCQSZ_REG_OFFSET (0x08) /* Source Q size */
+#define DMA_SRCQLMT_REG_OFFSET (0x0C) /* Source Q limit */
+#define DMA_DSTQPTRLO_REG_OFFSET (0x10) /* Destination Q pointer Lo */
+#define DMA_DSTQPTRHI_REG_OFFSET (0x14) /* Destination Q pointer Hi */
+#define DMA_DSTQSZ_REG_OFFSET (0x18) /* Destination Q size */
+#define DMA_DSTQLMT_REG_OFFSET (0x1C) /* Destination Q limit */
+#define DMA_SSTAQPTRLO_REG_OFFSET (0x20) /* Source Status Q pointer Lo */
+#define DMA_SSTAQPTRHI_REG_OFFSET (0x24) /* Source Status Q pointer Hi */
+#define DMA_SSTAQSZ_REG_OFFSET (0x28) /* Source Status Q size */
+#define DMA_SSTAQLMT_REG_OFFSET (0x2C) /* Source Status Q limit */
+#define DMA_DSTAQPTRLO_REG_OFFSET (0x30) /* Destination Status Q pointer Lo */
+#define DMA_DSTAQPTRHI_REG_OFFSET (0x34) /* Destination Status Q pointer Hi */
+#define DMA_DSTAQSZ_REG_OFFSET (0x38) /* Destination Status Q size */
+#define DMA_DSTAQLMT_REG_OFFSET (0x3C) /* Destination Status Q limit */
+#define DMA_SRCQNXT_REG_OFFSET (0x40) /* Source Q next */
+#define DMA_DSTQNXT_REG_OFFSET (0x44) /* Destination Q next */
+#define DMA_SSTAQNXT_REG_OFFSET (0x48) /* Source Status Q next */
+#define DMA_DSTAQNXT_REG_OFFSET (0x4C) /* Destination Status Q next */
+#define DMA_SCRATCH0_REG_OFFSET (0x50) /* Scratch pad register 0 */
+
+#define DMA_PCIE_INTR_CNTRL_REG_OFFSET (0x60) /* DMA PCIe intr control reg */
+#define DMA_PCIE_INTR_STATUS_REG_OFFSET (0x64) /* DMA PCIe intr status reg */
+#define DMA_AXI_INTR_CNTRL_REG_OFFSET (0x68) /* DMA AXI intr control reg */
+#define DMA_AXI_INTR_STATUS_REG_OFFSET (0x6C) /* DMA AXI intr status reg */
+#define DMA_PCIE_INTR_ASSRT_REG_OFFSET (0x70) /* PCIe intr assert reg */
+#define DMA_AXI_INTR_ASSRT_REG_OFFSET (0x74) /* AXI intr assert register */
+#define DMA_CNTRL_REG_OFFSET (0x78) /* DMA control register */
+#define DMA_STATUS_REG_OFFSET (0x7C) /* DMA status register */
+
+#define DMA_CNTRL_RST_BIT BIT(1)
+#define DMA_CNTRL_64BIT_STAQ_ELEMSZ_BIT BIT(2)
+#define DMA_CNTRL_ENABL_BIT BIT(0)
+#define DMA_STATUS_DMA_PRES_BIT BIT(15)
+#define DMA_STATUS_DMA_RUNNING_BIT BIT(0)
+#define DMA_QPTRLO_QLOCAXI_BIT BIT(0)
+#define DMA_QPTRLO_Q_ENABLE_BIT BIT(1)
+#define DMA_INTSTATUS_DMAERR_BIT BIT(1)
+#define DMA_INTSTATUS_SGLINTR_BIT BIT(2)
+#define DMA_INTSTATUS_SWINTR_BIT BIT(3)
+#define DMA_INTCNTRL_ENABLINTR_BIT BIT(0)
+#define DMA_INTCNTRL_DMAERRINTR_BIT BIT(1)
+#define DMA_INTCNTRL_DMASGINTR_BIT BIT(2)
+#define DMA_SW_INTR_ASSRT_BIT BIT(3)
+
+#define SOURCE_CONTROL_BD_BYTE_COUNT_MASK GENMASK(23, 0)
+#define SOURCE_CONTROL_BD_LOC_AXI BIT(24)
+#define SOURCE_CONTROL_BD_EOP_BIT BIT(25)
+#define SOURCE_CONTROL_BD_INTR_BIT BIT(26)
+#define SOURCE_CONTROL_BACK_TO_BACK_PACK_BIT BIT(25)
+#define SOURCE_CONTROL_ATTRIBUTES_MASK GENMASK(31, 28)
+#define SRC_CTL_ATTRIB_BIT_SHIFT (29)
+
+#define STA_BD_COMPLETED_BIT BIT(0)
+#define STA_BD_SOURCE_ERROR_BIT BIT(1)
+#define STA_BD_DESTINATION_ERROR_BIT BIT(2)
+#define STA_BD_INTERNAL_ERROR_BIT BIT(3)
+#define STA_BD_UPPER_STATUS_NONZERO_BIT BIT(31)
+#define STA_BD_BYTE_COUNT_MASK GENMASK(30, 4)
+
+#define STA_BD_BYTE_COUNT_SHIFT 4
+
+#define DMA_INTCNTRL_SGCOLSCCNT_BIT_SHIFT (16)
+
+#define DMA_SRC_Q_LOW_BIT_SHIFT GENMASK(5, 0)
+
+#define MAX_TRANSFER_LENGTH 0x1000000
+
+#define AXI_ATTRIBUTE 0x3
+#define PCI_ATTRIBUTE 0x2
+
+#define ROOTDMA_Q_READ_ATTRIBUTE 0x8
+
+/*
+ * User Id programmed into Source Q will be copied into Status Q of Destination
+ */
+#define DEFAULT_UID 1
+
+/*
+ * DMA channel registers
+ */
+struct DMA_ENGINE_REGISTERS {
+ u32 src_q_low; /* 0x00 */
+ u32 src_q_high; /* 0x04 */
+ u32 src_q_size; /* 0x08 */
+ u32 src_q_limit; /* 0x0C */
+ u32 dst_q_low; /* 0x10 */
+ u32 dst_q_high; /* 0x14 */
+ u32 dst_q_size; /* 0x18 */
+ u32 dst_q_limit; /* 0x1c */
+ u32 stas_q_low; /* 0x20 */
+ u32 stas_q_high; /* 0x24 */
+ u32 stas_q_size; /* 0x28 */
+ u32 stas_q_limit; /* 0x2C */
+ u32 stad_q_low; /* 0x30 */
+ u32 stad_q_high; /* 0x34 */
+ u32 stad_q_size; /* 0x38 */
+ u32 stad_q_limit; /* 0x3C */
+ u32 src_q_next; /* 0x40 */
+ u32 dst_q_next; /* 0x44 */
+ u32 stas_q_next; /* 0x48 */
+ u32 stad_q_next; /* 0x4C */
+ u32 scrathc0; /* 0x50 */
+ u32 scrathc1; /* 0x54 */
+ u32 scrathc2; /* 0x58 */
+ u32 scrathc3; /* 0x5C */
+ u32 pcie_intr_cntrl; /* 0x60 */
+ u32 pcie_intr_status; /* 0x64 */
+ u32 axi_intr_cntrl; /* 0x68 */
+ u32 axi_intr_status; /* 0x6C */
+ u32 pcie_intr_assert; /* 0x70 */
+ u32 axi_intr_assert; /* 0x74 */
+ u32 dma_channel_ctrl; /* 0x78 */
+ u32 dma_channel_status; /* 0x7C */
+} __attribute__((__packed__));
+
+/**
+ * struct SOURCE_DMA_DESCRIPTOR - Source Hardware Descriptor
+ * @system_address: 64 bit buffer physical address
+ * @control_byte_count: Byte count/buffer length and control flags
+ * @user_handle: User handle gets copied to status q on completion
+ * @user_id: User id gets copied to status q of destination
+ */
+struct SOURCE_DMA_DESCRIPTOR {
+ u64 system_address;
+ u32 control_byte_count;
+ u16 user_handle;
+ u16 user_id;
+} __attribute__((__packed__));
+
+/**
+ * struct DEST_DMA_DESCRIPTOR - Destination Hardware Descriptor
+ * @system_address: 64 bit buffer physical address
+ * @control_byte_count: Byte count/buffer length and control flags
+ * @user_handle: User handle gets copied to status q on completion
+ * @reserved: Reserved field
+ */
+struct DEST_DMA_DESCRIPTOR {
+ u64 system_address;
+ u32 control_byte_count;
+ u16 user_handle;
+ u16 reserved;
+} __attribute__((__packed__));
+
+/**
+ * struct STATUS_DMA_DESCRIPTOR - Status Hardware Descriptor
+ * @status_flag_byte_count: Byte count/buffer length and status flags
+ * @user_handle: User handle gets copied from src/dstq on completion
+ * @user_id: User id gets copied from srcq
+ */
+struct STATUS_DMA_DESCRIPTOR {
+ u32 status_flag_byte_count;
+ u16 user_handle;
+ u16 user_id;
+} __attribute__((__packed__));
+
+enum PACKET_CONTEXT_AVAILABILITY {
+ FREE = 0, /*Packet transfer Parameter context is free.*/
+ IN_USE /*Packet transfer Parameter context is in use.*/
+};
+
+struct ps_pcie_transfer_elements {
+ struct scatterlist *src_sgl;
+ unsigned int srcq_num_elemets;
+ struct scatterlist *dst_sgl;
+ unsigned int dstq_num_elemets;
+};
+
+struct ps_pcie_tx_segment {
+ struct list_head node;
+ struct dma_async_tx_descriptor async_tx;
+ struct ps_pcie_transfer_elements tx_elements;
+};
+
+struct ps_pcie_intr_segment {
+ struct list_head node;
+ struct dma_async_tx_descriptor async_intr_tx;
+};
+
+/*
+ * The context structure stored for each DMA transaction
+ * This structure is maintained separately for Src Q and Destination Q
+ * @availability_status: Indicates whether packet context is available
+ * @idx_sop: Indicates starting index of buffer descriptor for a transfer
+ * @idx_eop: Indicates ending index of buffer descriptor for a transfer
+ * @sgl: Indicates either src or dst sglist for the transaction
+ */
+struct PACKET_TRANSFER_PARAMS {
+ enum PACKET_CONTEXT_AVAILABILITY availability_status;
+ u16 idx_sop;
+ u16 idx_eop;
+ struct scatterlist *sgl;
+ struct ps_pcie_tx_segment *seg;
+ u32 requested_bytes;
+};
+
+enum CHANNEL_STATE {
+ CHANNEL_RESOURCE_UNALLOCATED = 0, /* Channel resources not allocated */
+ CHANNEL_UNAVIALBLE, /* Channel inactive */
+ CHANNEL_AVAILABLE, /* Channel available for transfers */
+ CHANNEL_ERROR /* Channel encountered errors */
+};
+
+enum BUFFER_LOCATION {
+ BUFFER_LOC_PCI = 0,
+ BUFFER_LOC_AXI,
+ BUFFER_LOC_INVALID
+};
+
+enum dev_channel_properties {
+ DMA_CHANNEL_DIRECTION = 0,
+ NUM_DESCRIPTORS,
+ NUM_QUEUES,
+ COALESE_COUNT,
+ POLL_TIMER_FREQUENCY
+};
+
+/*
+ * struct ps_pcie_dma_chan - Driver specific DMA channel structure
+ * @xdev: Driver specific device structure
+ * @dev: The dma device
+ * @common: DMA common channel
+ * @chan_base: Pointer to Channel registers
+ * @channel_number: DMA channel number in the device
+ * @num_queues: Number of queues per channel.
+ * It should be four for memory mapped case and
+ * two for Streaming case
+ * @direction: Transfer direction
+ * @state: Indicates channel state
+ * @channel_lock: Spin lock to be used before changing channel state
+ * @cookie_lock: Spin lock to be used before assigning cookie for a transaction
+ * @coalesce_count: Indicates number of packet transfers before interrupts
+ * @poll_timer_freq:Indicates frequency of polling for completed transactions
+ * @poll_timer: Timer to poll dma buffer descriptors if coalesce count is > 0
+ * @src_avail_descriptors: Available sgl source descriptors
+ * @src_desc_lock: Lock for synchronizing src_avail_descriptors
+ * @dst_avail_descriptors: Available sgl destination descriptors
+ * @dst_desc_lock: Lock for synchronizing
+ * dst_avail_descriptors
+ * @src_sgl_bd_pa: Physical address of Source SGL buffer Descriptors
+ * @psrc_sgl_bd: Virtual address of Source SGL buffer Descriptors
+ * @src_sgl_freeidx: Holds index of Source SGL buffer descriptor to be filled
+ * @sglDestinationQLock:Lock to serialize Destination Q updates
+ * @dst_sgl_bd_pa: Physical address of Dst SGL buffer Descriptors
+ * @pdst_sgl_bd: Virtual address of Dst SGL buffer Descriptors
+ * @dst_sgl_freeidx: Holds index of Destination SGL
+ * @src_sta_bd_pa: Physical address of StatusQ buffer Descriptors
+ * @psrc_sta_bd: Virtual address of Src StatusQ buffer Descriptors
+ * @src_staprobe_idx: Holds index of Status Q to be examined for SrcQ updates
+ * @src_sta_hw_probe_idx: Holds index of maximum limit of Status Q for hardware
+ * @dst_sta_bd_pa: Physical address of Dst StatusQ buffer Descriptor
+ * @pdst_sta_bd: Virtual address of Dst Status Q buffer Descriptors
+ * @dst_staprobe_idx: Holds index of Status Q to be examined for updates
+ * @dst_sta_hw_probe_idx: Holds index of max limit of Dst Status Q for hardware
+ * @@read_attribute: Describes the attributes of buffer in srcq
+ * @@write_attribute: Describes the attributes of buffer in dstq
+ * @@intr_status_offset: Register offset to be cheked on receiving interrupt
+ * @@intr_status_offset: Register offset to be used to control interrupts
+ * @ppkt_ctx_srcq: Virtual address of packet context to Src Q updates
+ * @idx_ctx_srcq_head: Holds index of packet context to be filled for Source Q
+ * @idx_ctx_srcq_tail: Holds index of packet context to be examined for Source Q
+ * @ppkt_ctx_dstq: Virtual address of packet context to Dst Q updates
+ * @idx_ctx_dstq_head: Holds index of packet context to be filled for Dst Q
+ * @idx_ctx_dstq_tail: Holds index of packet context to be examined for Dst Q
+ * @pending_list_lock: Lock to be taken before updating pending transfers list
+ * @pending_list: List of transactions submitted to channel
+ * @active_list_lock: Lock to be taken before transferring transactions from
+ * pending list to active list which will be subsequently
+ * submitted to hardware
+ * @active_list: List of transactions that will be submitted to hardware
+ * @pending_interrupts_lock: Lock to be taken before updating pending Intr list
+ * @pending_interrupts_list: List of interrupt transactions submitted to channel
+ * @active_interrupts_lock: Lock to be taken before transferring transactions
+ * from pending interrupt list to active interrupt list
+ * @active_interrupts_list: List of interrupt transactions that are active
+ * @transactions_pool: Mem pool to allocate dma transactions quickly
+ * @intr_transactions_pool: Mem pool to allocate interrupt transactions quickly
+ * @sw_intrs_wrkq: Work Q which performs handling of software intrs
+ * @handle_sw_intrs:Work function handling software interrupts
+ * @maintenance_workq: Work Q to perform maintenance tasks during stop or error
+ * @handle_chan_reset: Work that invokes channel reset function
+ * @handle_chan_shutdown: Work that invokes channel shutdown function
+ * @handle_chan_terminate: Work that invokes channel transactions termination
+ * @chan_shutdown_complt: Completion variable which says shutdown is done
+ * @chan_terminate_complete: Completion variable which says terminate is done
+ * @primary_desc_cleanup: Work Q which performs work related to sgl handling
+ * @handle_primary_desc_cleanup: Work that invokes src Q, dst Q cleanup
+ * and programming
+ * @chan_programming: Work Q which performs work related to channel programming
+ * @handle_chan_programming: Work that invokes channel programming function
+ * @srcq_desc_cleanup: Work Q which performs src Q descriptor cleanup
+ * @handle_srcq_desc_cleanup: Work function handling Src Q completions
+ * @dstq_desc_cleanup: Work Q which performs dst Q descriptor cleanup
+ * @handle_dstq_desc_cleanup: Work function handling Dst Q completions
+ * @srcq_work_complete: Src Q Work completion variable for primary work
+ * @dstq_work_complete: Dst Q Work completion variable for primary work
+ */
+struct ps_pcie_dma_chan {
+ struct xlnx_pcie_dma_device *xdev;
+ struct device *dev;
+
+ struct dma_chan common;
+
+ struct DMA_ENGINE_REGISTERS *chan_base;
+ u16 channel_number;
+
+ u32 num_queues;
+ enum dma_data_direction direction;
+ enum BUFFER_LOCATION srcq_buffer_location;
+ enum BUFFER_LOCATION dstq_buffer_location;
+
+ u32 total_descriptors;
+
+ enum CHANNEL_STATE state;
+ spinlock_t channel_lock; /* For changing channel state */
+
+ spinlock_t cookie_lock; /* For acquiring cookie from dma framework*/
+
+ u32 coalesce_count;
+ u32 poll_timer_freq;
+
+ struct timer_list poll_timer;
+
+ u32 src_avail_descriptors;
+ spinlock_t src_desc_lock; /* For handling srcq available descriptors */
+
+ u32 dst_avail_descriptors;
+ spinlock_t dst_desc_lock; /* For handling dstq available descriptors */
+
+ dma_addr_t src_sgl_bd_pa;
+ struct SOURCE_DMA_DESCRIPTOR *psrc_sgl_bd;
+ u32 src_sgl_freeidx;
+
+ dma_addr_t dst_sgl_bd_pa;
+ struct DEST_DMA_DESCRIPTOR *pdst_sgl_bd;
+ u32 dst_sgl_freeidx;
+
+ dma_addr_t src_sta_bd_pa;
+ struct STATUS_DMA_DESCRIPTOR *psrc_sta_bd;
+ u32 src_staprobe_idx;
+ u32 src_sta_hw_probe_idx;
+
+ dma_addr_t dst_sta_bd_pa;
+ struct STATUS_DMA_DESCRIPTOR *pdst_sta_bd;
+ u32 dst_staprobe_idx;
+ u32 dst_sta_hw_probe_idx;
+
+ u32 read_attribute;
+ u32 write_attribute;
+
+ u32 intr_status_offset;
+ u32 intr_control_offset;
+
+ struct PACKET_TRANSFER_PARAMS *ppkt_ctx_srcq;
+ u16 idx_ctx_srcq_head;
+ u16 idx_ctx_srcq_tail;
+
+ struct PACKET_TRANSFER_PARAMS *ppkt_ctx_dstq;
+ u16 idx_ctx_dstq_head;
+ u16 idx_ctx_dstq_tail;
+
+ spinlock_t pending_list_lock; /* For handling dma pending_list */
+ struct list_head pending_list;
+ spinlock_t active_list_lock; /* For handling dma active_list */
+ struct list_head active_list;
+
+ spinlock_t pending_interrupts_lock; /* For dma pending interrupts list*/
+ struct list_head pending_interrupts_list;
+ spinlock_t active_interrupts_lock; /* For dma active interrupts list*/
+ struct list_head active_interrupts_list;
+
+ mempool_t *transactions_pool;
+ mempool_t *intr_transactions_pool;
+
+ struct workqueue_struct *sw_intrs_wrkq;
+ struct work_struct handle_sw_intrs;
+
+ struct workqueue_struct *maintenance_workq;
+ struct work_struct handle_chan_reset;
+ struct work_struct handle_chan_shutdown;
+ struct work_struct handle_chan_terminate;
+
+ struct completion chan_shutdown_complt;
+ struct completion chan_terminate_complete;
+
+ struct workqueue_struct *primary_desc_cleanup;
+ struct work_struct handle_primary_desc_cleanup;
+
+ struct workqueue_struct *chan_programming;
+ struct work_struct handle_chan_programming;
+
+ struct workqueue_struct *srcq_desc_cleanup;
+ struct work_struct handle_srcq_desc_cleanup;
+ struct completion srcq_work_complete;
+
+ struct workqueue_struct *dstq_desc_cleanup;
+ struct work_struct handle_dstq_desc_cleanup;
+ struct completion dstq_work_complete;
+};
+
+/*
+ * struct xlnx_pcie_dma_device - Driver specific platform device structure
+ * @is_rootdma: Indicates whether the dma instance is root port dma
+ * @dma_buf_ext_addr: Indicates whether target system is 32 bit or 64 bit
+ * @bar_mask: Indicates available pcie bars
+ * @board_number: Count value of platform device
+ * @dev: Device structure pointer for pcie device
+ * @channels: Pointer to device DMA channels structure
+ * @common: DMA device structure
+ * @num_channels: Number of channels active for the device
+ * @reg_base: Base address of first DMA channel of the device
+ * @irq_vecs: Number of irq vectors allocated to pci device
+ * @pci_dev: Parent pci device which created this platform device
+ * @bar_info: PCIe bar related information
+ * @platform_irq_vec: Platform irq vector number for root dma
+ * @rootdma_vendor: PCI Vendor id for root dma
+ * @rootdma_device: PCI Device id for root dma
+ */
+struct xlnx_pcie_dma_device {
+ bool is_rootdma;
+ bool dma_buf_ext_addr;
+ u32 bar_mask;
+ u16 board_number;
+ struct device *dev;
+ struct ps_pcie_dma_chan *channels;
+ struct dma_device common;
+ int num_channels;
+ int irq_vecs;
+ void __iomem *reg_base;
+ struct pci_dev *pci_dev;
+ struct BAR_PARAMS bar_info[MAX_BARS];
+ int platform_irq_vec;
+ u16 rootdma_vendor;
+ u16 rootdma_device;
+};
+
+#define to_xilinx_chan(chan) \
+ container_of(chan, struct ps_pcie_dma_chan, common)
+#define to_ps_pcie_dma_tx_descriptor(tx) \
+ container_of(tx, struct ps_pcie_tx_segment, async_tx)
+#define to_ps_pcie_dma_tx_intr_descriptor(tx) \
+ container_of(tx, struct ps_pcie_intr_segment, async_intr_tx)
+
+/* Function Protypes */
+static u32 ps_pcie_dma_read(struct ps_pcie_dma_chan *chan, u32 reg);
+static void ps_pcie_dma_write(struct ps_pcie_dma_chan *chan, u32 reg,
+ u32 value);
+static void ps_pcie_dma_clr_mask(struct ps_pcie_dma_chan *chan, u32 reg,
+ u32 mask);
+static void ps_pcie_dma_set_mask(struct ps_pcie_dma_chan *chan, u32 reg,
+ u32 mask);
+static int irq_setup(struct xlnx_pcie_dma_device *xdev);
+static int platform_irq_setup(struct xlnx_pcie_dma_device *xdev);
+static int chan_intr_setup(struct xlnx_pcie_dma_device *xdev);
+static int device_intr_setup(struct xlnx_pcie_dma_device *xdev);
+static int irq_probe(struct xlnx_pcie_dma_device *xdev);
+static int ps_pcie_check_intr_status(struct ps_pcie_dma_chan *chan);
+static irqreturn_t ps_pcie_dma_dev_intr_handler(int irq, void *data);
+static irqreturn_t ps_pcie_dma_chan_intr_handler(int irq, void *data);
+static int init_hw_components(struct ps_pcie_dma_chan *chan);
+static int init_sw_components(struct ps_pcie_dma_chan *chan);
+static void update_channel_read_attribute(struct ps_pcie_dma_chan *chan);
+static void update_channel_write_attribute(struct ps_pcie_dma_chan *chan);
+static void ps_pcie_chan_reset(struct ps_pcie_dma_chan *chan);
+static void poll_completed_transactions(unsigned long arg);
+static bool check_descriptors_for_two_queues(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg);
+static bool check_descriptors_for_all_queues(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg);
+static bool check_descriptor_availability(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg);
+static void handle_error(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_update_srcq(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg);
+static void xlnx_ps_pcie_update_dstq(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg);
+static void ps_pcie_chan_program_work(struct work_struct *work);
+static void dst_cleanup_work(struct work_struct *work);
+static void src_cleanup_work(struct work_struct *work);
+static void ps_pcie_chan_primary_work(struct work_struct *work);
+static int probe_channel_properties(struct platform_device *platform_dev,
+ struct xlnx_pcie_dma_device *xdev,
+ u16 channel_number);
+static void xlnx_ps_pcie_destroy_mempool(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_free_worker_queues(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_free_pkt_ctxts(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_free_descriptors(struct ps_pcie_dma_chan *chan);
+static int xlnx_ps_pcie_channel_activate(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_channel_quiesce(struct ps_pcie_dma_chan *chan);
+static void ivk_cbk_for_pending(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_reset_channel(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_free_poll_timer(struct ps_pcie_dma_chan *chan);
+static int xlnx_ps_pcie_alloc_poll_timer(struct ps_pcie_dma_chan *chan);
+static void terminate_transactions_work(struct work_struct *work);
+static void chan_shutdown_work(struct work_struct *work);
+static void chan_reset_work(struct work_struct *work);
+static int xlnx_ps_pcie_alloc_worker_threads(struct ps_pcie_dma_chan *chan);
+static int xlnx_ps_pcie_alloc_mempool(struct ps_pcie_dma_chan *chan);
+static int xlnx_ps_pcie_alloc_pkt_contexts(struct ps_pcie_dma_chan *chan);
+static int dma_alloc_descriptors_two_queues(struct ps_pcie_dma_chan *chan);
+static int dma_alloc_decriptors_all_queues(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_dma_free_chan_resources(struct dma_chan *dchan);
+static int xlnx_ps_pcie_dma_alloc_chan_resources(struct dma_chan *dchan);
+static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx);
+static dma_cookie_t xilinx_intr_tx_submit(struct dma_async_tx_descriptor *tx);
+static struct dma_async_tx_descriptor *xlnx_ps_pcie_dma_prep_dma_sg(
+ struct dma_chan *channel, struct scatterlist *dst_sg,
+ unsigned int dst_nents, struct scatterlist *src_sg,
+ unsigned int src_nents, unsigned long flags);
+static struct dma_async_tx_descriptor *xlnx_ps_pcie_dma_prep_slave_sg(
+ struct dma_chan *channel, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context);
+static struct dma_async_tx_descriptor *xlnx_ps_pcie_dma_prep_interrupt(
+ struct dma_chan *channel, unsigned long flags);
+static void xlnx_ps_pcie_dma_issue_pending(struct dma_chan *channel);
+static int xlnx_ps_pcie_dma_terminate_all(struct dma_chan *channel);
+static int read_rootdma_config(struct platform_device *platform_dev,
+ struct xlnx_pcie_dma_device *xdev);
+static int read_epdma_config(struct platform_device *platform_dev,
+ struct xlnx_pcie_dma_device *xdev);
+static int xlnx_pcie_dma_driver_probe(struct platform_device *platform_dev);
+static int xlnx_pcie_dma_driver_remove(struct platform_device *platform_dev);
+
+/* IO accessors */
+static inline u32 ps_pcie_dma_read(struct ps_pcie_dma_chan *chan, u32 reg)
+{
+ return ioread32((void __iomem *)((char *)(chan->chan_base) + reg));
+}
+
+static inline void ps_pcie_dma_write(struct ps_pcie_dma_chan *chan, u32 reg,
+ u32 value)
+{
+ iowrite32(value, (void __iomem *)((char *)(chan->chan_base) + reg));
+}
+
+static inline void ps_pcie_dma_clr_mask(struct ps_pcie_dma_chan *chan, u32 reg,
+ u32 mask)
+{
+ ps_pcie_dma_write(chan, reg, ps_pcie_dma_read(chan, reg) & ~mask);
+}
+
+static inline void ps_pcie_dma_set_mask(struct ps_pcie_dma_chan *chan, u32 reg,
+ u32 mask)
+{
+ ps_pcie_dma_write(chan, reg, ps_pcie_dma_read(chan, reg) | mask);
+}
+
+/**
+ * ps_pcie_dma_dev_intr_handler - This will be invoked for MSI/Legacy interrupts
+ *
+ * @irq: IRQ number
+ * @data: Pointer to the PS PCIe DMA channel structure
+ *
+ * Return: IRQ_HANDLED/IRQ_NONE
+ */
+static irqreturn_t ps_pcie_dma_dev_intr_handler(int irq, void *data)
+{
+ struct xlnx_pcie_dma_device *xdev =
+ (struct xlnx_pcie_dma_device *)data;
+ struct ps_pcie_dma_chan *chan = NULL;
+ int i;
+ int err = -1;
+ int ret = -1;
+
+ for (i = 0; i < xdev->num_channels; i++) {
+ chan = &xdev->channels[i];
+ err = ps_pcie_check_intr_status(chan);
+ if (err == 0)
+ ret = 0;
+ }
+
+ return (ret == 0) ? IRQ_HANDLED : IRQ_NONE;
+}
+
+/**
+ * ps_pcie_dma_chan_intr_handler - This will be invoked for MSI-X interrupts
+ *
+ * @irq: IRQ number
+ * @data: Pointer to the PS PCIe DMA channel structure
+ *
+ * Return: IRQ_HANDLED
+ */
+static irqreturn_t ps_pcie_dma_chan_intr_handler(int irq, void *data)
+{
+ struct ps_pcie_dma_chan *chan = (struct ps_pcie_dma_chan *)data;
+
+ ps_pcie_check_intr_status(chan);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * chan_intr_setup - Requests Interrupt handler for individual channels
+ *
+ * @xdev: Driver specific data for device
+ *
+ * Return: 0 on success and non zero value on failure.
+ */
+static int chan_intr_setup(struct xlnx_pcie_dma_device *xdev)
+{
+ struct ps_pcie_dma_chan *chan;
+ int i;
+ int err = 0;
+
+ for (i = 0; i < xdev->num_channels; i++) {
+ chan = &xdev->channels[i];
+ err = devm_request_irq(xdev->dev,
+ pci_irq_vector(xdev->pci_dev, i),
+ ps_pcie_dma_chan_intr_handler,
+ PS_PCIE_DMA_IRQ_NOSHARE,
+ "PS PCIe DMA Chan Intr handler", chan);
+ if (err) {
+ dev_err(xdev->dev,
+ "Irq %d for chan %d error %d\n",
+ pci_irq_vector(xdev->pci_dev, i),
+ chan->channel_number, err);
+ break;
+ }
+ }
+
+ if (err) {
+ while (--i >= 0) {
+ chan = &xdev->channels[i];
+ devm_free_irq(xdev->dev,
+ pci_irq_vector(xdev->pci_dev, i), chan);
+ }
+ }
+
+ return err;
+}
+
+/**
+ * device_intr_setup - Requests interrupt handler for DMA device
+ *
+ * @xdev: Driver specific data for device
+ *
+ * Return: 0 on success and non zero value on failure.
+ */
+static int device_intr_setup(struct xlnx_pcie_dma_device *xdev)
+{
+ int err;
+ unsigned long intr_flags = IRQF_SHARED;
+
+ if (xdev->pci_dev->msix_enabled || xdev->pci_dev->msi_enabled)
+ intr_flags = PS_PCIE_DMA_IRQ_NOSHARE;
+
+ err = devm_request_irq(xdev->dev,
+ pci_irq_vector(xdev->pci_dev, 0),
+ ps_pcie_dma_dev_intr_handler,
+ intr_flags,
+ "PS PCIe DMA Intr Handler", xdev);
+ if (err)
+ dev_err(xdev->dev, "Couldn't request irq %d\n",
+ pci_irq_vector(xdev->pci_dev, 0));
+
+ return err;
+}
+
+/**
+ * irq_setup - Requests interrupts based on the interrupt type detected
+ *
+ * @xdev: Driver specific data for device
+ *
+ * Return: 0 on success and non zero value on failure.
+ */
+static int irq_setup(struct xlnx_pcie_dma_device *xdev)
+{
+ int err;
+
+ if (xdev->irq_vecs == xdev->num_channels)
+ err = chan_intr_setup(xdev);
+ else
+ err = device_intr_setup(xdev);
+
+ return err;
+}
+
+static int platform_irq_setup(struct xlnx_pcie_dma_device *xdev)
+{
+ int err;
+
+ err = devm_request_irq(xdev->dev,
+ xdev->platform_irq_vec,
+ ps_pcie_dma_dev_intr_handler,
+ IRQF_SHARED,
+ "PS PCIe Root DMA Handler", xdev);
+ if (err)
+ dev_err(xdev->dev, "Couldn't request irq %d\n",
+ xdev->platform_irq_vec);
+
+ return err;
+}
+
+/**
+ * irq_probe - Checks which interrupt types can be serviced by hardware
+ *
+ * @xdev: Driver specific data for device
+ *
+ * Return: Number of interrupt vectors when successful or -ENOSPC on failure
+ */
+static int irq_probe(struct xlnx_pcie_dma_device *xdev)
+{
+ struct pci_dev *pdev;
+
+ pdev = xdev->pci_dev;
+
+ xdev->irq_vecs = pci_alloc_irq_vectors(pdev, 1, xdev->num_channels,
+ PCI_IRQ_ALL_TYPES);
+ return xdev->irq_vecs;
+}
+
+/**
+ * ps_pcie_check_intr_status - Checks channel interrupt status
+ *
+ * @chan: Pointer to the PS PCIe DMA channel structure
+ *
+ * Return: 0 if interrupt is pending on channel
+ * -1 if no interrupt is pending on channel
+ */
+static int ps_pcie_check_intr_status(struct ps_pcie_dma_chan *chan)
+{
+ int err = -1;
+ u32 status;
+
+ if (chan->state != CHANNEL_AVAILABLE)
+ return err;
+
+ status = ps_pcie_dma_read(chan, chan->intr_status_offset);
+
+ if (status & DMA_INTSTATUS_SGLINTR_BIT) {
+ if (chan->primary_desc_cleanup) {
+ queue_work(chan->primary_desc_cleanup,
+ &chan->handle_primary_desc_cleanup);
+ }
+ /* Clearing Persistent bit */
+ ps_pcie_dma_set_mask(chan, chan->intr_status_offset,
+ DMA_INTSTATUS_SGLINTR_BIT);
+ err = 0;
+ }
+
+ if (status & DMA_INTSTATUS_SWINTR_BIT) {
+ if (chan->sw_intrs_wrkq)
+ queue_work(chan->sw_intrs_wrkq, &chan->handle_sw_intrs);
+ /* Clearing Persistent bit */
+ ps_pcie_dma_set_mask(chan, chan->intr_status_offset,
+ DMA_INTSTATUS_SWINTR_BIT);
+ err = 0;
+ }
+
+ if (status & DMA_INTSTATUS_DMAERR_BIT) {
+ dev_err(chan->dev,
+ "DMA Channel %d ControlStatus Reg: 0x%x",
+ chan->channel_number, status);
+ dev_err(chan->dev,
+ "Chn %d SrcQLmt = %d SrcQSz = %d SrcQNxt = %d",
+ chan->channel_number,
+ chan->chan_base->src_q_limit,
+ chan->chan_base->src_q_size,
+ chan->chan_base->src_q_next);
+ dev_err(chan->dev,
+ "Chn %d SrcStaLmt = %d SrcStaSz = %d SrcStaNxt = %d",
+ chan->channel_number,
+ chan->chan_base->stas_q_limit,
+ chan->chan_base->stas_q_size,
+ chan->chan_base->stas_q_next);
+ dev_err(chan->dev,
+ "Chn %d DstQLmt = %d DstQSz = %d DstQNxt = %d",
+ chan->channel_number,
+ chan->chan_base->dst_q_limit,
+ chan->chan_base->dst_q_size,
+ chan->chan_base->dst_q_next);
+ dev_err(chan->dev,
+ "Chan %d DstStaLmt = %d DstStaSz = %d DstStaNxt = %d",
+ chan->channel_number,
+ chan->chan_base->stad_q_limit,
+ chan->chan_base->stad_q_size,
+ chan->chan_base->stad_q_next);
+ /* Clearing Persistent bit */
+ ps_pcie_dma_set_mask(chan, chan->intr_status_offset,
+ DMA_INTSTATUS_DMAERR_BIT);
+
+ handle_error(chan);
+
+ err = 0;
+ }
+
+ return err;
+}
+
+static int init_hw_components(struct ps_pcie_dma_chan *chan)
+{
+ if (chan->psrc_sgl_bd && chan->psrc_sta_bd) {
+ /* Programming SourceQ and StatusQ bd addresses */
+ chan->chan_base->src_q_next = 0;
+ chan->chan_base->src_q_high =
+ upper_32_bits(chan->src_sgl_bd_pa);
+ chan->chan_base->src_q_size = chan->total_descriptors;
+ chan->chan_base->src_q_limit = 0;
+ if (chan->xdev->is_rootdma) {
+ chan->chan_base->src_q_low = ROOTDMA_Q_READ_ATTRIBUTE
+ | DMA_QPTRLO_QLOCAXI_BIT;
+ } else {
+ chan->chan_base->src_q_low = 0;
+ }
+ chan->chan_base->src_q_low |=
+ (lower_32_bits((chan->src_sgl_bd_pa))
+ & ~(DMA_SRC_Q_LOW_BIT_SHIFT))
+ | DMA_QPTRLO_Q_ENABLE_BIT;
+
+ chan->chan_base->stas_q_next = 0;
+ chan->chan_base->stas_q_high =
+ upper_32_bits(chan->src_sta_bd_pa);
+ chan->chan_base->stas_q_size = chan->total_descriptors;
+ chan->chan_base->stas_q_limit = chan->total_descriptors - 1;
+ if (chan->xdev->is_rootdma) {
+ chan->chan_base->stas_q_low = ROOTDMA_Q_READ_ATTRIBUTE
+ | DMA_QPTRLO_QLOCAXI_BIT;
+ } else {
+ chan->chan_base->stas_q_low = 0;
+ }
+ chan->chan_base->stas_q_low |=
+ (lower_32_bits(chan->src_sta_bd_pa)
+ & ~(DMA_SRC_Q_LOW_BIT_SHIFT))
+ | DMA_QPTRLO_Q_ENABLE_BIT;
+ }
+
+ if (chan->pdst_sgl_bd && chan->pdst_sta_bd) {
+ /* Programming DestinationQ and StatusQ buffer descriptors */
+ chan->chan_base->dst_q_next = 0;
+ chan->chan_base->dst_q_high =
+ upper_32_bits(chan->dst_sgl_bd_pa);
+ chan->chan_base->dst_q_size = chan->total_descriptors;
+ chan->chan_base->dst_q_limit = 0;
+ if (chan->xdev->is_rootdma) {
+ chan->chan_base->dst_q_low = ROOTDMA_Q_READ_ATTRIBUTE
+ | DMA_QPTRLO_QLOCAXI_BIT;
+ } else {
+ chan->chan_base->dst_q_low = 0;
+ }
+ chan->chan_base->dst_q_low |=
+ (lower_32_bits(chan->dst_sgl_bd_pa)
+ & ~(DMA_SRC_Q_LOW_BIT_SHIFT))
+ | DMA_QPTRLO_Q_ENABLE_BIT;
+
+ chan->chan_base->stad_q_next = 0;
+ chan->chan_base->stad_q_high =
+ upper_32_bits(chan->dst_sta_bd_pa);
+ chan->chan_base->stad_q_size = chan->total_descriptors;
+ chan->chan_base->stad_q_limit = chan->total_descriptors - 1;
+ if (chan->xdev->is_rootdma) {
+ chan->chan_base->stad_q_low = ROOTDMA_Q_READ_ATTRIBUTE
+ | DMA_QPTRLO_QLOCAXI_BIT;
+ } else {
+ chan->chan_base->stad_q_low = 0;
+ }
+ chan->chan_base->stad_q_low |=
+ (lower_32_bits(chan->dst_sta_bd_pa)
+ & ~(DMA_SRC_Q_LOW_BIT_SHIFT))
+ | DMA_QPTRLO_Q_ENABLE_BIT;
+ }
+
+ return 0;
+}
+
+static void update_channel_read_attribute(struct ps_pcie_dma_chan *chan)
+{
+ if (chan->xdev->is_rootdma) {
+ /* For Root DMA, Host Memory and Buffer Descriptors
+ * will be on AXI side
+ */
+ if (chan->srcq_buffer_location == BUFFER_LOC_PCI) {
+ chan->read_attribute = (AXI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT) |
+ SOURCE_CONTROL_BD_LOC_AXI;
+ } else if (chan->srcq_buffer_location == BUFFER_LOC_AXI) {
+ chan->read_attribute = AXI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT;
+ }
+ } else {
+ if (chan->srcq_buffer_location == BUFFER_LOC_PCI) {
+ chan->read_attribute = PCI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT;
+ } else if (chan->srcq_buffer_location == BUFFER_LOC_AXI) {
+ chan->read_attribute = (AXI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT) |
+ SOURCE_CONTROL_BD_LOC_AXI;
+ }
+ }
+}
+
+static void update_channel_write_attribute(struct ps_pcie_dma_chan *chan)
+{
+ if (chan->xdev->is_rootdma) {
+ /* For Root DMA, Host Memory and Buffer Descriptors
+ * will be on AXI side
+ */
+ if (chan->dstq_buffer_location == BUFFER_LOC_PCI) {
+ chan->write_attribute = (AXI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT) |
+ SOURCE_CONTROL_BD_LOC_AXI;
+ } else if (chan->srcq_buffer_location == BUFFER_LOC_AXI) {
+ chan->write_attribute = AXI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT;
+ }
+ } else {
+ if (chan->dstq_buffer_location == BUFFER_LOC_PCI) {
+ chan->write_attribute = PCI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT;
+ } else if (chan->dstq_buffer_location == BUFFER_LOC_AXI) {
+ chan->write_attribute = (AXI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT) |
+ SOURCE_CONTROL_BD_LOC_AXI;
+ }
+ }
+ chan->write_attribute |= SOURCE_CONTROL_BACK_TO_BACK_PACK_BIT;
+}
+
+static int init_sw_components(struct ps_pcie_dma_chan *chan)
+{
+ if ((chan->ppkt_ctx_srcq) && (chan->psrc_sgl_bd) &&
+ (chan->psrc_sta_bd)) {
+ memset(chan->ppkt_ctx_srcq, 0,
+ sizeof(struct PACKET_TRANSFER_PARAMS)
+ * chan->total_descriptors);
+
+ memset(chan->psrc_sgl_bd, 0,
+ sizeof(struct SOURCE_DMA_DESCRIPTOR)
+ * chan->total_descriptors);
+
+ memset(chan->psrc_sta_bd, 0,
+ sizeof(struct STATUS_DMA_DESCRIPTOR)
+ * chan->total_descriptors);
+
+ chan->src_avail_descriptors = chan->total_descriptors;
+
+ chan->src_sgl_freeidx = 0;
+ chan->src_staprobe_idx = 0;
+ chan->src_sta_hw_probe_idx = chan->total_descriptors - 1;
+ chan->idx_ctx_srcq_head = 0;
+ chan->idx_ctx_srcq_tail = 0;
+ }
+
+ if ((chan->ppkt_ctx_dstq) && (chan->pdst_sgl_bd) &&
+ (chan->pdst_sta_bd)) {
+ memset(chan->ppkt_ctx_dstq, 0,
+ sizeof(struct PACKET_TRANSFER_PARAMS)
+ * chan->total_descriptors);
+
+ memset(chan->pdst_sgl_bd, 0,
+ sizeof(struct DEST_DMA_DESCRIPTOR)
+ * chan->total_descriptors);
+
+ memset(chan->pdst_sta_bd, 0,
+ sizeof(struct STATUS_DMA_DESCRIPTOR)
+ * chan->total_descriptors);
+
+ chan->dst_avail_descriptors = chan->total_descriptors;
+
+ chan->dst_sgl_freeidx = 0;
+ chan->dst_staprobe_idx = 0;
+ chan->dst_sta_hw_probe_idx = chan->total_descriptors - 1;
+ chan->idx_ctx_dstq_head = 0;
+ chan->idx_ctx_dstq_tail = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * ps_pcie_chan_reset - Resets channel, by programming relevant registers
+ *
+ * @chan: PS PCIe DMA channel information holder
+ * Return: void
+ */
+static void ps_pcie_chan_reset(struct ps_pcie_dma_chan *chan)
+{
+ /* Enable channel reset */
+ ps_pcie_dma_set_mask(chan, DMA_CNTRL_REG_OFFSET, DMA_CNTRL_RST_BIT);
+
+ mdelay(10);
+
+ /* Disable channel reset */
+ ps_pcie_dma_clr_mask(chan, DMA_CNTRL_REG_OFFSET, DMA_CNTRL_RST_BIT);
+}
+
+/**
+ * poll_completed_transactions - Function invoked by poll timer
+ *
+ * @arg: Pointer to PS PCIe DMA channel information
+ * Return: void
+ */
+static void poll_completed_transactions(unsigned long arg)
+{
+ struct ps_pcie_dma_chan *chan = (struct ps_pcie_dma_chan *)arg;
+
+ if (chan->state == CHANNEL_AVAILABLE) {
+ queue_work(chan->primary_desc_cleanup,
+ &chan->handle_primary_desc_cleanup);
+ }
+
+ mod_timer(&chan->poll_timer, jiffies + chan->poll_timer_freq);
+}
+
+static bool check_descriptors_for_two_queues(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg)
+{
+ if (seg->tx_elements.src_sgl) {
+ if (chan->src_avail_descriptors >=
+ seg->tx_elements.srcq_num_elemets) {
+ return true;
+ }
+ } else if (seg->tx_elements.dst_sgl) {
+ if (chan->dst_avail_descriptors >=
+ seg->tx_elements.dstq_num_elemets) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool check_descriptors_for_all_queues(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg)
+{
+ if ((chan->src_avail_descriptors >=
+ seg->tx_elements.srcq_num_elemets) &&
+ (chan->dst_avail_descriptors >=
+ seg->tx_elements.dstq_num_elemets)) {
+ return true;
+ }
+
+ return false;
+}
+
+static bool check_descriptor_availability(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg)
+{
+ if (chan->num_queues == DEFAULT_DMA_QUEUES)
+ return check_descriptors_for_all_queues(chan, seg);
+ else
+ return check_descriptors_for_two_queues(chan, seg);
+}
+
+static void handle_error(struct ps_pcie_dma_chan *chan)
+{
+ if (chan->state != CHANNEL_AVAILABLE)
+ return;
+
+ spin_lock(&chan->channel_lock);
+ chan->state = CHANNEL_ERROR;
+ spin_unlock(&chan->channel_lock);
+
+ if (chan->maintenance_workq)
+ queue_work(chan->maintenance_workq, &chan->handle_chan_reset);
+}
+
+static void xlnx_ps_pcie_update_srcq(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg)
+{
+ struct SOURCE_DMA_DESCRIPTOR *pdesc;
+ struct PACKET_TRANSFER_PARAMS *pkt_ctx = NULL;
+ struct scatterlist *sgl_ptr;
+ unsigned int i;
+
+ pkt_ctx = chan->ppkt_ctx_srcq + chan->idx_ctx_srcq_head;
+ if (pkt_ctx->availability_status == IN_USE) {
+ dev_err(chan->dev,
+ "src pkt context not avail for channel %d\n",
+ chan->channel_number);
+ handle_error(chan);
+ return;
+ }
+
+ pkt_ctx->availability_status = IN_USE;
+ pkt_ctx->sgl = seg->tx_elements.src_sgl;
+
+ if (chan->srcq_buffer_location == BUFFER_LOC_PCI)
+ pkt_ctx->seg = seg;
+
+ /* Get the address of the next available DMA Descriptor */
+ pdesc = chan->psrc_sgl_bd + chan->src_sgl_freeidx;
+ pkt_ctx->idx_sop = chan->src_sgl_freeidx;
+
+ /* Build transactions using information in the scatter gather list */
+ for_each_sg(seg->tx_elements.src_sgl, sgl_ptr,
+ seg->tx_elements.srcq_num_elemets, i) {
+ if (chan->xdev->dma_buf_ext_addr) {
+ pdesc->system_address =
+ (u64)sg_dma_address(sgl_ptr);
+ } else {
+ pdesc->system_address =
+ (u32)sg_dma_address(sgl_ptr);
+ }
+
+ pdesc->control_byte_count = (sg_dma_len(sgl_ptr) &
+ SOURCE_CONTROL_BD_BYTE_COUNT_MASK) |
+ chan->read_attribute;
+ if (pkt_ctx->seg)
+ pkt_ctx->requested_bytes += sg_dma_len(sgl_ptr);
+
+ pdesc->user_handle = chan->idx_ctx_srcq_head;
+ pdesc->user_id = DEFAULT_UID;
+ /* Check if this is last descriptor */
+ if (i == (seg->tx_elements.srcq_num_elemets - 1)) {
+ pkt_ctx->idx_eop = chan->src_sgl_freeidx;
+ pdesc->control_byte_count = pdesc->control_byte_count |
+ SOURCE_CONTROL_BD_EOP_BIT |
+ SOURCE_CONTROL_BD_INTR_BIT;
+ }
+ chan->src_sgl_freeidx++;
+ if (chan->src_sgl_freeidx == chan->total_descriptors)
+ chan->src_sgl_freeidx = 0;
+ pdesc = chan->psrc_sgl_bd + chan->src_sgl_freeidx;
+ spin_lock(&chan->src_desc_lock);
+ chan->src_avail_descriptors--;
+ spin_unlock(&chan->src_desc_lock);
+ }
+
+ chan->chan_base->src_q_limit = chan->src_sgl_freeidx;
+ chan->idx_ctx_srcq_head++;
+ if (chan->idx_ctx_srcq_head == chan->total_descriptors)
+ chan->idx_ctx_srcq_head = 0;
+}
+
+static void xlnx_ps_pcie_update_dstq(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg)
+{
+ struct DEST_DMA_DESCRIPTOR *pdesc;
+ struct PACKET_TRANSFER_PARAMS *pkt_ctx = NULL;
+ struct scatterlist *sgl_ptr;
+ unsigned int i;
+
+ pkt_ctx = chan->ppkt_ctx_dstq + chan->idx_ctx_dstq_head;
+ if (pkt_ctx->availability_status == IN_USE) {
+ dev_err(chan->dev,
+ "dst pkt context not avail for channel %d\n",
+ chan->channel_number);
+ handle_error(chan);
+
+ return;
+ }
+
+ pkt_ctx->availability_status = IN_USE;
+ pkt_ctx->sgl = seg->tx_elements.dst_sgl;
+
+ if (chan->dstq_buffer_location == BUFFER_LOC_PCI)
+ pkt_ctx->seg = seg;
+
+ pdesc = chan->pdst_sgl_bd + chan->dst_sgl_freeidx;
+ pkt_ctx->idx_sop = chan->dst_sgl_freeidx;
+
+ /* Build transactions using information in the scatter gather list */
+ for_each_sg(seg->tx_elements.dst_sgl, sgl_ptr,
+ seg->tx_elements.dstq_num_elemets, i) {
+ if (chan->xdev->dma_buf_ext_addr) {
+ pdesc->system_address =
+ (u64)sg_dma_address(sgl_ptr);
+ } else {
+ pdesc->system_address =
+ (u32)sg_dma_address(sgl_ptr);
+ }
+
+ pdesc->control_byte_count = (sg_dma_len(sgl_ptr) &
+ SOURCE_CONTROL_BD_BYTE_COUNT_MASK) |
+ chan->write_attribute;
+
+ if (pkt_ctx->seg)
+ pkt_ctx->requested_bytes += sg_dma_len(sgl_ptr);
+
+ pdesc->user_handle = chan->idx_ctx_dstq_head;
+ /* Check if this is last descriptor */
+ if (i == (seg->tx_elements.dstq_num_elemets - 1))
+ pkt_ctx->idx_eop = chan->dst_sgl_freeidx;
+ chan->dst_sgl_freeidx++;
+ if (chan->dst_sgl_freeidx == chan->total_descriptors)
+ chan->dst_sgl_freeidx = 0;
+ pdesc = chan->pdst_sgl_bd + chan->dst_sgl_freeidx;
+ spin_lock(&chan->dst_desc_lock);
+ chan->dst_avail_descriptors--;
+ spin_unlock(&chan->dst_desc_lock);
+ }
+
+ chan->chan_base->dst_q_limit = chan->dst_sgl_freeidx;
+ chan->idx_ctx_dstq_head++;
+ if (chan->idx_ctx_dstq_head == chan->total_descriptors)
+ chan->idx_ctx_dstq_head = 0;
+}
+
+static void ps_pcie_chan_program_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(work,
+ struct ps_pcie_dma_chan,
+ handle_chan_programming);
+ struct ps_pcie_tx_segment *seg = NULL;
+
+ while (chan->state == CHANNEL_AVAILABLE) {
+ spin_lock(&chan->active_list_lock);
+ seg = list_first_entry_or_null(&chan->active_list,
+ struct ps_pcie_tx_segment, node);
+ spin_unlock(&chan->active_list_lock);
+
+ if (!seg)
+ break;
+
+ if (check_descriptor_availability(chan, seg) == false)
+ break;
+
+ spin_lock(&chan->active_list_lock);
+ list_del(&seg->node);
+ spin_unlock(&chan->active_list_lock);
+
+ if (seg->tx_elements.src_sgl)
+ xlnx_ps_pcie_update_srcq(chan, seg);
+
+ if (seg->tx_elements.dst_sgl)
+ xlnx_ps_pcie_update_dstq(chan, seg);
+ }
+}
+
+/**
+ * dst_cleanup_work - Goes through all completed elements in status Q
+ * and invokes callbacks for the concerned DMA transaction.
+ *
+ * @work: Work associated with the task
+ *
+ * Return: void
+ */
+static void dst_cleanup_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(work,
+ struct ps_pcie_dma_chan, handle_dstq_desc_cleanup);
+
+ struct STATUS_DMA_DESCRIPTOR *psta_bd;
+ struct DEST_DMA_DESCRIPTOR *pdst_bd;
+ struct PACKET_TRANSFER_PARAMS *ppkt_ctx;
+ struct dmaengine_result rslt;
+ u32 completed_bytes;
+ u32 dstq_desc_idx;
+
+ psta_bd = chan->pdst_sta_bd + chan->dst_staprobe_idx;
+
+ while (psta_bd->status_flag_byte_count & STA_BD_COMPLETED_BIT) {
+ if (psta_bd->status_flag_byte_count &
+ STA_BD_DESTINATION_ERROR_BIT) {
+ dev_err(chan->dev,
+ "Dst Sts Elmnt %d chan %d has Destination Err",
+ chan->dst_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+ if (psta_bd->status_flag_byte_count & STA_BD_SOURCE_ERROR_BIT) {
+ dev_err(chan->dev,
+ "Dst Sts Elmnt %d chan %d has Source Error",
+ chan->dst_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+ if (psta_bd->status_flag_byte_count &
+ STA_BD_INTERNAL_ERROR_BIT) {
+ dev_err(chan->dev,
+ "Dst Sts Elmnt %d chan %d has Internal Error",
+ chan->dst_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+ /* we are using 64 bit USER field. */
+ if ((psta_bd->status_flag_byte_count &
+ STA_BD_UPPER_STATUS_NONZERO_BIT) == 0) {
+ dev_err(chan->dev,
+ "Dst Sts Elmnt %d for chan %d has NON ZERO",
+ chan->dst_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+
+ chan->idx_ctx_dstq_tail = psta_bd->user_handle;
+ ppkt_ctx = chan->ppkt_ctx_dstq + chan->idx_ctx_dstq_tail;
+ completed_bytes = (psta_bd->status_flag_byte_count &
+ STA_BD_BYTE_COUNT_MASK) >>
+ STA_BD_BYTE_COUNT_SHIFT;
+
+ memset(psta_bd, 0, sizeof(struct STATUS_DMA_DESCRIPTOR));
+
+ chan->dst_staprobe_idx++;
+
+ if (chan->dst_staprobe_idx == chan->total_descriptors)
+ chan->dst_staprobe_idx = 0;
+
+ chan->dst_sta_hw_probe_idx++;
+
+ if (chan->dst_sta_hw_probe_idx == chan->total_descriptors)
+ chan->dst_sta_hw_probe_idx = 0;
+
+ chan->chan_base->stad_q_limit = chan->dst_sta_hw_probe_idx;
+
+ psta_bd = chan->pdst_sta_bd + chan->dst_staprobe_idx;
+
+ dstq_desc_idx = ppkt_ctx->idx_sop;
+
+ do {
+ pdst_bd = chan->pdst_sgl_bd + dstq_desc_idx;
+ memset(pdst_bd, 0,
+ sizeof(struct DEST_DMA_DESCRIPTOR));
+
+ spin_lock(&chan->dst_desc_lock);
+ chan->dst_avail_descriptors++;
+ spin_unlock(&chan->dst_desc_lock);
+
+ if (dstq_desc_idx == ppkt_ctx->idx_eop)
+ break;
+
+ dstq_desc_idx++;
+
+ if (dstq_desc_idx == chan->total_descriptors)
+ dstq_desc_idx = 0;
+
+ } while (1);
+
+ /* Invoking callback */
+ if (ppkt_ctx->seg) {
+ spin_lock(&chan->cookie_lock);
+ dma_cookie_complete(&ppkt_ctx->seg->async_tx);
+ spin_unlock(&chan->cookie_lock);
+ rslt.result = DMA_TRANS_NOERROR;
+ rslt.residue = ppkt_ctx->requested_bytes -
+ completed_bytes;
+ dmaengine_desc_get_callback_invoke(&ppkt_ctx->seg->async_tx,
+ &rslt);
+ mempool_free(ppkt_ctx->seg, chan->transactions_pool);
+ }
+ memset(ppkt_ctx, 0, sizeof(struct PACKET_TRANSFER_PARAMS));
+ }
+
+ complete(&chan->dstq_work_complete);
+}
+
+/**
+ * src_cleanup_work - Goes through all completed elements in status Q and
+ * invokes callbacks for the concerned DMA transaction.
+ *
+ * @work: Work associated with the task
+ *
+ * Return: void
+ */
+static void src_cleanup_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(
+ work, struct ps_pcie_dma_chan, handle_srcq_desc_cleanup);
+
+ struct STATUS_DMA_DESCRIPTOR *psta_bd;
+ struct SOURCE_DMA_DESCRIPTOR *psrc_bd;
+ struct PACKET_TRANSFER_PARAMS *ppkt_ctx;
+ struct dmaengine_result rslt;
+ u32 completed_bytes;
+ u32 srcq_desc_idx;
+
+ psta_bd = chan->psrc_sta_bd + chan->src_staprobe_idx;
+
+ while (psta_bd->status_flag_byte_count & STA_BD_COMPLETED_BIT) {
+ if (psta_bd->status_flag_byte_count &
+ STA_BD_DESTINATION_ERROR_BIT) {
+ dev_err(chan->dev,
+ "Src Sts Elmnt %d chan %d has Dst Error",
+ chan->src_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+ if (psta_bd->status_flag_byte_count & STA_BD_SOURCE_ERROR_BIT) {
+ dev_err(chan->dev,
+ "Src Sts Elmnt %d chan %d has Source Error",
+ chan->src_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+ if (psta_bd->status_flag_byte_count &
+ STA_BD_INTERNAL_ERROR_BIT) {
+ dev_err(chan->dev,
+ "Src Sts Elmnt %d chan %d has Internal Error",
+ chan->src_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+ if ((psta_bd->status_flag_byte_count
+ & STA_BD_UPPER_STATUS_NONZERO_BIT) == 0) {
+ dev_err(chan->dev,
+ "Src Sts Elmnt %d chan %d has NonZero",
+ chan->src_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+ chan->idx_ctx_srcq_tail = psta_bd->user_handle;
+ ppkt_ctx = chan->ppkt_ctx_srcq + chan->idx_ctx_srcq_tail;
+ completed_bytes = (psta_bd->status_flag_byte_count
+ & STA_BD_BYTE_COUNT_MASK) >>
+ STA_BD_BYTE_COUNT_SHIFT;
+
+ memset(psta_bd, 0, sizeof(struct STATUS_DMA_DESCRIPTOR));
+
+ chan->src_staprobe_idx++;
+
+ if (chan->src_staprobe_idx == chan->total_descriptors)
+ chan->src_staprobe_idx = 0;
+
+ chan->src_sta_hw_probe_idx++;
+
+ if (chan->src_sta_hw_probe_idx == chan->total_descriptors)
+ chan->src_sta_hw_probe_idx = 0;
+
+ chan->chan_base->stas_q_limit = chan->src_sta_hw_probe_idx;
+
+ psta_bd = chan->psrc_sta_bd + chan->src_staprobe_idx;
+
+ srcq_desc_idx = ppkt_ctx->idx_sop;
+
+ do {
+ psrc_bd = chan->psrc_sgl_bd + srcq_desc_idx;
+ memset(psrc_bd, 0,
+ sizeof(struct SOURCE_DMA_DESCRIPTOR));
+
+ spin_lock(&chan->src_desc_lock);
+ chan->src_avail_descriptors++;
+ spin_unlock(&chan->src_desc_lock);
+
+ if (srcq_desc_idx == ppkt_ctx->idx_eop)
+ break;
+ srcq_desc_idx++;
+
+ if (srcq_desc_idx == chan->total_descriptors)
+ srcq_desc_idx = 0;
+
+ } while (1);
+
+ /* Invoking callback */
+ if (ppkt_ctx->seg) {
+ spin_lock(&chan->cookie_lock);
+ dma_cookie_complete(&ppkt_ctx->seg->async_tx);
+ spin_unlock(&chan->cookie_lock);
+ rslt.result = DMA_TRANS_NOERROR;
+ rslt.residue = ppkt_ctx->requested_bytes -
+ completed_bytes;
+ dmaengine_desc_get_callback_invoke(&ppkt_ctx->seg->async_tx,
+ &rslt);
+ mempool_free(ppkt_ctx->seg, chan->transactions_pool);
+ }
+ memset(ppkt_ctx, 0, sizeof(struct PACKET_TRANSFER_PARAMS));
+ }
+
+ complete(&chan->srcq_work_complete);
+}
+
+/**
+ * ps_pcie_chan_primary_work - Masks out interrupts, invokes source Q and
+ * destination Q processing. Waits for source Q and destination Q processing
+ * and re enables interrupts. Same work is invoked by timer if coalesce count
+ * is greater than zero and interrupts are not invoked before the timeout period
+ *
+ * @work: Work associated with the task
+ *
+ * Return: void
+ */
+static void ps_pcie_chan_primary_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(
+ work, struct ps_pcie_dma_chan,
+ handle_primary_desc_cleanup);
+
+ /* Disable interrupts for Channel */
+ ps_pcie_dma_clr_mask(chan, chan->intr_control_offset,
+ DMA_INTCNTRL_ENABLINTR_BIT);
+
+ if (chan->psrc_sgl_bd) {
+ reinit_completion(&chan->srcq_work_complete);
+ if (chan->srcq_desc_cleanup)
+ queue_work(chan->srcq_desc_cleanup,
+ &chan->handle_srcq_desc_cleanup);
+ }
+ if (chan->pdst_sgl_bd) {
+ reinit_completion(&chan->dstq_work_complete);
+ if (chan->dstq_desc_cleanup)
+ queue_work(chan->dstq_desc_cleanup,
+ &chan->handle_dstq_desc_cleanup);
+ }
+
+ if (chan->psrc_sgl_bd)
+ wait_for_completion_interruptible(&chan->srcq_work_complete);
+ if (chan->pdst_sgl_bd)
+ wait_for_completion_interruptible(&chan->dstq_work_complete);
+
+ /* Enable interrupts for channel */
+ ps_pcie_dma_set_mask(chan, chan->intr_control_offset,
+ DMA_INTCNTRL_ENABLINTR_BIT);
+
+ if (chan->chan_programming) {
+ queue_work(chan->chan_programming,
+ &chan->handle_chan_programming);
+ }
+
+ if (chan->coalesce_count > 0 && chan->poll_timer.function)
+ mod_timer(&chan->poll_timer, jiffies + chan->poll_timer_freq);
+}
+
+static int read_rootdma_config(struct platform_device *platform_dev,
+ struct xlnx_pcie_dma_device *xdev)
+{
+ int err;
+ struct resource *r;
+
+ err = dma_set_mask(&platform_dev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_info(&platform_dev->dev, "Cannot set 64 bit DMA mask\n");
+ err = dma_set_mask(&platform_dev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&platform_dev->dev, "DMA mask set error\n");
+ return err;
+ }
+ }
+
+ err = dma_set_coherent_mask(&platform_dev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_info(&platform_dev->dev, "Cannot set 64 bit consistent DMA mask\n");
+ err = dma_set_coherent_mask(&platform_dev->dev,
+ DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&platform_dev->dev, "Cannot set consistent DMA mask\n");
+ return err;
+ }
+ }
+
+ r = platform_get_resource_byname(platform_dev, IORESOURCE_MEM,
+ "ps_pcie_regbase");
+ if (!r) {
+ dev_err(&platform_dev->dev,
+ "Unable to find memory resource for root dma\n");
+ return PTR_ERR(r);
+ }
+
+ xdev->reg_base = devm_ioremap_resource(&platform_dev->dev, r);
+ if (IS_ERR(xdev->reg_base)) {
+ dev_err(&platform_dev->dev, "ioresource error for root dma\n");
+ return PTR_ERR(xdev->reg_base);
+ }
+
+ xdev->platform_irq_vec =
+ platform_get_irq_byname(platform_dev,
+ "ps_pcie_rootdma_intr");
+ if (xdev->platform_irq_vec < 0) {
+ dev_err(&platform_dev->dev,
+ "Unable to get interrupt number for root dma\n");
+ return xdev->platform_irq_vec;
+ }
+
+ err = device_property_read_u16(&platform_dev->dev, "dma_vendorid",
+ &xdev->rootdma_vendor);
+ if (err) {
+ dev_err(&platform_dev->dev,
+ "Unable to find RootDMA PCI Vendor Id\n");
+ return err;
+ }
+
+ err = device_property_read_u16(&platform_dev->dev, "dma_deviceid",
+ &xdev->rootdma_device);
+ if (err) {
+ dev_err(&platform_dev->dev,
+ "Unable to find RootDMA PCI Device Id\n");
+ return err;
+ }
+
+ xdev->common.dev = xdev->dev;
+
+ return 0;
+}
+
+static int read_epdma_config(struct platform_device *platform_dev,
+ struct xlnx_pcie_dma_device *xdev)
+{
+ int err;
+ struct pci_dev *pdev;
+ u16 i;
+ void __iomem * const *pci_iomap;
+ unsigned long pci_bar_length;
+
+ pdev = *((struct pci_dev **)(platform_dev->dev.platform_data));
+ xdev->pci_dev = pdev;
+
+ for (i = 0; i < MAX_BARS; i++) {
+ if (pci_resource_len(pdev, i) == 0)
+ continue;
+ xdev->bar_mask = xdev->bar_mask | (1 << (i));
+ }
+
+ err = pcim_iomap_regions(pdev, xdev->bar_mask, PLATFORM_DRIVER_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot request PCI regions, aborting\n");
+ return err;
+ }
+
+ pci_iomap = pcim_iomap_table(pdev);
+ if (!pci_iomap) {
+ err = -ENOMEM;
+ return err;
+ }
+
+ for (i = 0; i < MAX_BARS; i++) {
+ pci_bar_length = pci_resource_len(pdev, i);
+ if (pci_bar_length == 0) {
+ xdev->bar_info[i].BAR_LENGTH = 0;
+ xdev->bar_info[i].BAR_PHYS_ADDR = 0;
+ xdev->bar_info[i].BAR_VIRT_ADDR = NULL;
+ } else {
+ xdev->bar_info[i].BAR_LENGTH =
+ pci_bar_length;
+ xdev->bar_info[i].BAR_PHYS_ADDR =
+ pci_resource_start(pdev, i);
+ xdev->bar_info[i].BAR_VIRT_ADDR =
+ pci_iomap[i];
+ }
+ }
+
+ xdev->reg_base = pci_iomap[DMA_BAR_NUMBER];
+
+ err = irq_probe(xdev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Cannot probe irq lines for device %d\n",
+ platform_dev->id);
+ return err;
+ }
+
+ xdev->common.dev = &pdev->dev;
+
+ return 0;
+}
+
+static int probe_channel_properties(struct platform_device *platform_dev,
+ struct xlnx_pcie_dma_device *xdev,
+ u16 channel_number)
+{
+ int i;
+ char propertyname[CHANNEL_PROPERTY_LENGTH];
+ int numvals, ret;
+ u32 *val;
+ struct ps_pcie_dma_chan *channel;
+ struct ps_pcie_dma_channel_match *xlnx_match;
+
+ snprintf(propertyname, CHANNEL_PROPERTY_LENGTH,
+ "ps_pcie_channel%d", channel_number);
+
+ channel = &xdev->channels[channel_number];
+
+ spin_lock_init(&channel->channel_lock);
+ spin_lock_init(&channel->cookie_lock);
+
+ INIT_LIST_HEAD(&channel->pending_list);
+ spin_lock_init(&channel->pending_list_lock);
+
+ INIT_LIST_HEAD(&channel->active_list);
+ spin_lock_init(&channel->active_list_lock);
+
+ spin_lock_init(&channel->src_desc_lock);
+ spin_lock_init(&channel->dst_desc_lock);
+
+ INIT_LIST_HEAD(&channel->pending_interrupts_list);
+ spin_lock_init(&channel->pending_interrupts_lock);
+
+ INIT_LIST_HEAD(&channel->active_interrupts_list);
+ spin_lock_init(&channel->active_interrupts_lock);
+
+ init_completion(&channel->srcq_work_complete);
+ init_completion(&channel->dstq_work_complete);
+ init_completion(&channel->chan_shutdown_complt);
+ init_completion(&channel->chan_terminate_complete);
+
+ if (device_property_present(&platform_dev->dev, propertyname)) {
+ numvals = device_property_read_u32_array(&platform_dev->dev,
+ propertyname, NULL, 0);
+
+ if (numvals < 0)
+ return numvals;
+
+ val = devm_kzalloc(&platform_dev->dev, sizeof(u32) * numvals,
+ GFP_KERNEL);
+
+ if (!val)
+ return -ENOMEM;
+
+ ret = device_property_read_u32_array(&platform_dev->dev,
+ propertyname, val,
+ numvals);
+ if (ret < 0) {
+ dev_err(&platform_dev->dev,
+ "Unable to read property %s\n", propertyname);
+ return ret;
+ }
+
+ for (i = 0; i < numvals; i++) {
+ switch (i) {
+ case DMA_CHANNEL_DIRECTION:
+ channel->direction =
+ (val[DMA_CHANNEL_DIRECTION] ==
+ PCIE_AXI_DIRECTION) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ break;
+ case NUM_DESCRIPTORS:
+ channel->total_descriptors =
+ val[NUM_DESCRIPTORS];
+ if (channel->total_descriptors >
+ MAX_DESCRIPTORS) {
+ dev_info(&platform_dev->dev,
+ "Descriptors > alowd max\n");
+ channel->total_descriptors =
+ MAX_DESCRIPTORS;
+ }
+ break;
+ case NUM_QUEUES:
+ channel->num_queues = val[NUM_QUEUES];
+ switch (channel->num_queues) {
+ case DEFAULT_DMA_QUEUES:
+ break;
+ case TWO_DMA_QUEUES:
+ break;
+ default:
+ dev_info(&platform_dev->dev,
+ "Incorrect Q number for dma chan\n");
+ channel->num_queues = DEFAULT_DMA_QUEUES;
+ }
+ break;
+ case COALESE_COUNT:
+ channel->coalesce_count = val[COALESE_COUNT];
+
+ if (channel->coalesce_count >
+ MAX_COALESCE_COUNT) {
+ dev_info(&platform_dev->dev,
+ "Invalid coalesce Count\n");
+ channel->coalesce_count =
+ MAX_COALESCE_COUNT;
+ }
+ break;
+ case POLL_TIMER_FREQUENCY:
+ channel->poll_timer_freq =
+ val[POLL_TIMER_FREQUENCY];
+ break;
+ default:
+ dev_err(&platform_dev->dev,
+ "Check order of channel properties!\n");
+ }
+ }
+ } else {
+ dev_err(&platform_dev->dev,
+ "Property %s not present. Invalid configuration!\n",
+ propertyname);
+ return -ENOTSUPP;
+ }
+
+ if (channel->direction == DMA_TO_DEVICE) {
+ if (channel->num_queues == DEFAULT_DMA_QUEUES) {
+ channel->srcq_buffer_location = BUFFER_LOC_PCI;
+ channel->dstq_buffer_location = BUFFER_LOC_AXI;
+ } else {
+ channel->srcq_buffer_location = BUFFER_LOC_PCI;
+ channel->dstq_buffer_location = BUFFER_LOC_INVALID;
+ }
+ } else {
+ if (channel->num_queues == DEFAULT_DMA_QUEUES) {
+ channel->srcq_buffer_location = BUFFER_LOC_AXI;
+ channel->dstq_buffer_location = BUFFER_LOC_PCI;
+ } else {
+ channel->srcq_buffer_location = BUFFER_LOC_INVALID;
+ channel->dstq_buffer_location = BUFFER_LOC_PCI;
+ }
+ }
+
+ channel->xdev = xdev;
+ channel->channel_number = channel_number;
+
+ if (xdev->is_rootdma) {
+ channel->dev = xdev->dev;
+ channel->intr_status_offset = DMA_AXI_INTR_STATUS_REG_OFFSET;
+ channel->intr_control_offset = DMA_AXI_INTR_CNTRL_REG_OFFSET;
+ } else {
+ channel->dev = &xdev->pci_dev->dev;
+ channel->intr_status_offset = DMA_PCIE_INTR_STATUS_REG_OFFSET;
+ channel->intr_control_offset = DMA_PCIE_INTR_CNTRL_REG_OFFSET;
+ }
+
+ channel->chan_base =
+ (struct DMA_ENGINE_REGISTERS *)((__force char *)(xdev->reg_base) +
+ (channel_number * DMA_CHANNEL_REGS_SIZE));
+
+ if (((channel->chan_base->dma_channel_status) &
+ DMA_STATUS_DMA_PRES_BIT) == 0) {
+ dev_err(&platform_dev->dev,
+ "Hardware reports channel not present\n");
+ return -ENOTSUPP;
+ }
+
+ update_channel_read_attribute(channel);
+ update_channel_write_attribute(channel);
+
+ xlnx_match = devm_kzalloc(&platform_dev->dev,
+ sizeof(struct ps_pcie_dma_channel_match),
+ GFP_KERNEL);
+
+ if (!xlnx_match)
+ return -ENOMEM;
+
+ if (xdev->is_rootdma) {
+ xlnx_match->pci_vendorid = xdev->rootdma_vendor;
+ xlnx_match->pci_deviceid = xdev->rootdma_device;
+ } else {
+ xlnx_match->pci_vendorid = xdev->pci_dev->vendor;
+ xlnx_match->pci_deviceid = xdev->pci_dev->device;
+ xlnx_match->bar_params = xdev->bar_info;
+ }
+
+ xlnx_match->board_number = xdev->board_number;
+ xlnx_match->channel_number = channel_number;
+ xlnx_match->direction = xdev->channels[channel_number].direction;
+
+ channel->common.private = (void *)xlnx_match;
+
+ channel->common.device = &xdev->common;
+ list_add_tail(&channel->common.device_node, &xdev->common.channels);
+
+ return 0;
+}
+
+static void xlnx_ps_pcie_destroy_mempool(struct ps_pcie_dma_chan *chan)
+{
+ mempool_destroy(chan->transactions_pool);
+
+ mempool_destroy(chan->intr_transactions_pool);
+}
+
+static void xlnx_ps_pcie_free_worker_queues(struct ps_pcie_dma_chan *chan)
+{
+ if (chan->maintenance_workq)
+ destroy_workqueue(chan->maintenance_workq);
+
+ if (chan->sw_intrs_wrkq)
+ destroy_workqueue(chan->sw_intrs_wrkq);
+
+ if (chan->srcq_desc_cleanup)
+ destroy_workqueue(chan->srcq_desc_cleanup);
+
+ if (chan->dstq_desc_cleanup)
+ destroy_workqueue(chan->dstq_desc_cleanup);
+
+ if (chan->chan_programming)
+ destroy_workqueue(chan->chan_programming);
+
+ if (chan->primary_desc_cleanup)
+ destroy_workqueue(chan->primary_desc_cleanup);
+}
+
+static void xlnx_ps_pcie_free_pkt_ctxts(struct ps_pcie_dma_chan *chan)
+{
+ kfree(chan->ppkt_ctx_srcq);
+
+ kfree(chan->ppkt_ctx_dstq);
+}
+
+static void xlnx_ps_pcie_free_descriptors(struct ps_pcie_dma_chan *chan)
+{
+ ssize_t size;
+
+ if (chan->psrc_sgl_bd) {
+ size = chan->total_descriptors *
+ sizeof(struct SOURCE_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, chan->psrc_sgl_bd,
+ chan->src_sgl_bd_pa);
+ }
+
+ if (chan->pdst_sgl_bd) {
+ size = chan->total_descriptors *
+ sizeof(struct DEST_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, chan->pdst_sgl_bd,
+ chan->dst_sgl_bd_pa);
+ }
+
+ if (chan->psrc_sta_bd) {
+ size = chan->total_descriptors *
+ sizeof(struct STATUS_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, chan->psrc_sta_bd,
+ chan->src_sta_bd_pa);
+ }
+
+ if (chan->pdst_sta_bd) {
+ size = chan->total_descriptors *
+ sizeof(struct STATUS_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, chan->pdst_sta_bd,
+ chan->dst_sta_bd_pa);
+ }
+}
+
+static int xlnx_ps_pcie_channel_activate(struct ps_pcie_dma_chan *chan)
+{
+ u32 reg = chan->coalesce_count;
+
+ reg = reg << DMA_INTCNTRL_SGCOLSCCNT_BIT_SHIFT;
+
+ /* Enable Interrupts for channel */
+ ps_pcie_dma_set_mask(chan, chan->intr_control_offset,
+ reg | DMA_INTCNTRL_ENABLINTR_BIT |
+ DMA_INTCNTRL_DMAERRINTR_BIT |
+ DMA_INTCNTRL_DMASGINTR_BIT);
+
+ /* Enable DMA */
+ ps_pcie_dma_set_mask(chan, DMA_CNTRL_REG_OFFSET,
+ DMA_CNTRL_ENABL_BIT |
+ DMA_CNTRL_64BIT_STAQ_ELEMSZ_BIT);
+
+ spin_lock(&chan->channel_lock);
+ chan->state = CHANNEL_AVAILABLE;
+ spin_unlock(&chan->channel_lock);
+
+ /* Activate timer if required */
+ if ((chan->coalesce_count > 0) && !chan->poll_timer.function)
+ xlnx_ps_pcie_alloc_poll_timer(chan);
+
+ return 0;
+}
+
+static void xlnx_ps_pcie_channel_quiesce(struct ps_pcie_dma_chan *chan)
+{
+ /* Disable interrupts for Channel */
+ ps_pcie_dma_clr_mask(chan, chan->intr_control_offset,
+ DMA_INTCNTRL_ENABLINTR_BIT);
+
+ /* Delete timer if it is created */
+ if ((chan->coalesce_count > 0) && (!chan->poll_timer.function))
+ xlnx_ps_pcie_free_poll_timer(chan);
+
+ /* Flush descriptor cleaning work queues */
+ if (chan->primary_desc_cleanup)
+ flush_workqueue(chan->primary_desc_cleanup);
+
+ /* Flush channel programming work queue */
+ if (chan->chan_programming)
+ flush_workqueue(chan->chan_programming);
+
+ /* Clear the persistent bits */
+ ps_pcie_dma_set_mask(chan, chan->intr_status_offset,
+ DMA_INTSTATUS_DMAERR_BIT |
+ DMA_INTSTATUS_SGLINTR_BIT |
+ DMA_INTSTATUS_SWINTR_BIT);
+
+ /* Disable DMA channel */
+ ps_pcie_dma_clr_mask(chan, DMA_CNTRL_REG_OFFSET, DMA_CNTRL_ENABL_BIT);
+
+ spin_lock(&chan->channel_lock);
+ chan->state = CHANNEL_UNAVIALBLE;
+ spin_unlock(&chan->channel_lock);
+}
+
+static u32 total_bytes_in_sgl(struct scatterlist *sgl,
+ unsigned int num_entries)
+{
+ u32 total_bytes = 0;
+ struct scatterlist *sgl_ptr;
+ unsigned int i;
+
+ for_each_sg(sgl, sgl_ptr, num_entries, i)
+ total_bytes += sg_dma_len(sgl_ptr);
+
+ return total_bytes;
+}
+
+static void ivk_cbk_intr_seg(struct ps_pcie_intr_segment *intr_seg,
+ struct ps_pcie_dma_chan *chan,
+ enum dmaengine_tx_result result)
+{
+ struct dmaengine_result rslt;
+
+ rslt.result = result;
+ rslt.residue = 0;
+
+ spin_lock(&chan->cookie_lock);
+ dma_cookie_complete(&intr_seg->async_intr_tx);
+ spin_unlock(&chan->cookie_lock);
+
+ dmaengine_desc_get_callback_invoke(&intr_seg->async_intr_tx, &rslt);
+}
+
+static void ivk_cbk_seg(struct ps_pcie_tx_segment *seg,
+ struct ps_pcie_dma_chan *chan,
+ enum dmaengine_tx_result result)
+{
+ struct dmaengine_result rslt, *prslt;
+
+ spin_lock(&chan->cookie_lock);
+ dma_cookie_complete(&seg->async_tx);
+ spin_unlock(&chan->cookie_lock);
+
+ rslt.result = result;
+ if (seg->tx_elements.src_sgl &&
+ chan->srcq_buffer_location == BUFFER_LOC_PCI) {
+ rslt.residue =
+ total_bytes_in_sgl(seg->tx_elements.src_sgl,
+ seg->tx_elements.srcq_num_elemets);
+ prslt = &rslt;
+ } else if (seg->tx_elements.dst_sgl &&
+ chan->dstq_buffer_location == BUFFER_LOC_PCI) {
+ rslt.residue =
+ total_bytes_in_sgl(seg->tx_elements.dst_sgl,
+ seg->tx_elements.dstq_num_elemets);
+ prslt = &rslt;
+ } else {
+ prslt = NULL;
+ }
+
+ dmaengine_desc_get_callback_invoke(&seg->async_tx, prslt);
+}
+
+static void ivk_cbk_ctx(struct PACKET_TRANSFER_PARAMS *ppkt_ctxt,
+ struct ps_pcie_dma_chan *chan,
+ enum dmaengine_tx_result result)
+{
+ if (ppkt_ctxt->availability_status == IN_USE) {
+ if (ppkt_ctxt->seg) {
+ ivk_cbk_seg(ppkt_ctxt->seg, chan, result);
+ mempool_free(ppkt_ctxt->seg,
+ chan->transactions_pool);
+ }
+ }
+}
+
+static void ivk_cbk_for_pending(struct ps_pcie_dma_chan *chan)
+{
+ int i;
+ struct PACKET_TRANSFER_PARAMS *ppkt_ctxt;
+ struct ps_pcie_tx_segment *seg, *seg_nxt;
+ struct ps_pcie_intr_segment *intr_seg, *intr_seg_next;
+
+ if (chan->ppkt_ctx_srcq) {
+ if (chan->idx_ctx_srcq_tail != chan->idx_ctx_srcq_head) {
+ i = chan->idx_ctx_srcq_tail;
+ while (i != chan->idx_ctx_srcq_head) {
+ ppkt_ctxt = chan->ppkt_ctx_srcq + i;
+ ivk_cbk_ctx(ppkt_ctxt, chan,
+ DMA_TRANS_READ_FAILED);
+ memset(ppkt_ctxt, 0,
+ sizeof(struct PACKET_TRANSFER_PARAMS));
+ i++;
+ if (i == chan->total_descriptors)
+ i = 0;
+ }
+ }
+ }
+
+ if (chan->ppkt_ctx_dstq) {
+ if (chan->idx_ctx_dstq_tail != chan->idx_ctx_dstq_head) {
+ i = chan->idx_ctx_dstq_tail;
+ while (i != chan->idx_ctx_dstq_head) {
+ ppkt_ctxt = chan->ppkt_ctx_dstq + i;
+ ivk_cbk_ctx(ppkt_ctxt, chan,
+ DMA_TRANS_WRITE_FAILED);
+ memset(ppkt_ctxt, 0,
+ sizeof(struct PACKET_TRANSFER_PARAMS));
+ i++;
+ if (i == chan->total_descriptors)
+ i = 0;
+ }
+ }
+ }
+
+ list_for_each_entry_safe(seg, seg_nxt, &chan->active_list, node) {
+ ivk_cbk_seg(seg, chan, DMA_TRANS_ABORTED);
+ spin_lock(&chan->active_list_lock);
+ list_del(&seg->node);
+ spin_unlock(&chan->active_list_lock);
+ mempool_free(seg, chan->transactions_pool);
+ }
+
+ list_for_each_entry_safe(seg, seg_nxt, &chan->pending_list, node) {
+ ivk_cbk_seg(seg, chan, DMA_TRANS_ABORTED);
+ spin_lock(&chan->pending_list_lock);
+ list_del(&seg->node);
+ spin_unlock(&chan->pending_list_lock);
+ mempool_free(seg, chan->transactions_pool);
+ }
+
+ list_for_each_entry_safe(intr_seg, intr_seg_next,
+ &chan->active_interrupts_list, node) {
+ ivk_cbk_intr_seg(intr_seg, chan, DMA_TRANS_ABORTED);
+ spin_lock(&chan->active_interrupts_lock);
+ list_del(&intr_seg->node);
+ spin_unlock(&chan->active_interrupts_lock);
+ mempool_free(intr_seg, chan->intr_transactions_pool);
+ }
+
+ list_for_each_entry_safe(intr_seg, intr_seg_next,
+ &chan->pending_interrupts_list, node) {
+ ivk_cbk_intr_seg(intr_seg, chan, DMA_TRANS_ABORTED);
+ spin_lock(&chan->pending_interrupts_lock);
+ list_del(&intr_seg->node);
+ spin_unlock(&chan->pending_interrupts_lock);
+ mempool_free(intr_seg, chan->intr_transactions_pool);
+ }
+}
+
+static void xlnx_ps_pcie_reset_channel(struct ps_pcie_dma_chan *chan)
+{
+ xlnx_ps_pcie_channel_quiesce(chan);
+
+ ivk_cbk_for_pending(chan);
+
+ ps_pcie_chan_reset(chan);
+
+ init_sw_components(chan);
+ init_hw_components(chan);
+
+ xlnx_ps_pcie_channel_activate(chan);
+}
+
+static void xlnx_ps_pcie_free_poll_timer(struct ps_pcie_dma_chan *chan)
+{
+ if (chan->poll_timer.function) {
+ del_timer_sync(&chan->poll_timer);
+ chan->poll_timer.function = NULL;
+ }
+}
+
+static int xlnx_ps_pcie_alloc_poll_timer(struct ps_pcie_dma_chan *chan)
+{
+ init_timer(&chan->poll_timer);
+ chan->poll_timer.function = poll_completed_transactions;
+ chan->poll_timer.expires = jiffies + chan->poll_timer_freq;
+ chan->poll_timer.data = (unsigned long)chan;
+
+ add_timer(&chan->poll_timer);
+
+ return 0;
+}
+
+static void terminate_transactions_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(work,
+ struct ps_pcie_dma_chan, handle_chan_terminate);
+
+ xlnx_ps_pcie_channel_quiesce(chan);
+ ivk_cbk_for_pending(chan);
+ xlnx_ps_pcie_channel_activate(chan);
+
+ complete(&chan->chan_terminate_complete);
+}
+
+static void chan_shutdown_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(work,
+ struct ps_pcie_dma_chan, handle_chan_shutdown);
+
+ xlnx_ps_pcie_channel_quiesce(chan);
+
+ complete(&chan->chan_shutdown_complt);
+}
+
+static void chan_reset_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(work,
+ struct ps_pcie_dma_chan, handle_chan_reset);
+
+ xlnx_ps_pcie_reset_channel(chan);
+}
+
+static void sw_intr_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(work,
+ struct ps_pcie_dma_chan, handle_sw_intrs);
+ struct ps_pcie_intr_segment *intr_seg, *intr_seg_next;
+
+ list_for_each_entry_safe(intr_seg, intr_seg_next,
+ &chan->active_interrupts_list, node) {
+ spin_lock(&chan->cookie_lock);
+ dma_cookie_complete(&intr_seg->async_intr_tx);
+ spin_unlock(&chan->cookie_lock);
+ dmaengine_desc_get_callback_invoke(&intr_seg->async_intr_tx,
+ NULL);
+ spin_lock(&chan->active_interrupts_lock);
+ list_del(&intr_seg->node);
+ spin_unlock(&chan->active_interrupts_lock);
+ }
+}
+
+static int xlnx_ps_pcie_alloc_worker_threads(struct ps_pcie_dma_chan *chan)
+{
+ char wq_name[WORKQ_NAME_SIZE];
+
+ snprintf(wq_name, WORKQ_NAME_SIZE,
+ "PS PCIe channel %d descriptor programming wq",
+ chan->channel_number);
+ chan->chan_programming =
+ create_singlethread_workqueue((const char *)wq_name);
+ if (!chan->chan_programming) {
+ dev_err(chan->dev,
+ "Unable to create programming wq for chan %d",
+ chan->channel_number);
+ goto err_no_desc_program_wq;
+ } else {
+ INIT_WORK(&chan->handle_chan_programming,
+ ps_pcie_chan_program_work);
+ }
+ memset(wq_name, 0, WORKQ_NAME_SIZE);
+
+ snprintf(wq_name, WORKQ_NAME_SIZE,
+ "PS PCIe channel %d primary cleanup wq", chan->channel_number);
+ chan->primary_desc_cleanup =
+ create_singlethread_workqueue((const char *)wq_name);
+ if (!chan->primary_desc_cleanup) {
+ dev_err(chan->dev,
+ "Unable to create primary cleanup wq for channel %d",
+ chan->channel_number);
+ goto err_no_primary_clean_wq;
+ } else {
+ INIT_WORK(&chan->handle_primary_desc_cleanup,
+ ps_pcie_chan_primary_work);
+ }
+ memset(wq_name, 0, WORKQ_NAME_SIZE);
+
+ snprintf(wq_name, WORKQ_NAME_SIZE,
+ "PS PCIe channel %d maintenance works wq",
+ chan->channel_number);
+ chan->maintenance_workq =
+ create_singlethread_workqueue((const char *)wq_name);
+ if (!chan->maintenance_workq) {
+ dev_err(chan->dev,
+ "Unable to create maintenance wq for channel %d",
+ chan->channel_number);
+ goto err_no_maintenance_wq;
+ } else {
+ INIT_WORK(&chan->handle_chan_reset, chan_reset_work);
+ INIT_WORK(&chan->handle_chan_shutdown, chan_shutdown_work);
+ INIT_WORK(&chan->handle_chan_terminate,
+ terminate_transactions_work);
+ }
+ memset(wq_name, 0, WORKQ_NAME_SIZE);
+
+ snprintf(wq_name, WORKQ_NAME_SIZE,
+ "PS PCIe channel %d software Interrupts wq",
+ chan->channel_number);
+ chan->sw_intrs_wrkq =
+ create_singlethread_workqueue((const char *)wq_name);
+ if (!chan->sw_intrs_wrkq) {
+ dev_err(chan->dev,
+ "Unable to create sw interrupts wq for channel %d",
+ chan->channel_number);
+ goto err_no_sw_intrs_wq;
+ } else {
+ INIT_WORK(&chan->handle_sw_intrs, sw_intr_work);
+ }
+ memset(wq_name, 0, WORKQ_NAME_SIZE);
+
+ if (chan->psrc_sgl_bd) {
+ snprintf(wq_name, WORKQ_NAME_SIZE,
+ "PS PCIe channel %d srcq handling wq",
+ chan->channel_number);
+ chan->srcq_desc_cleanup =
+ create_singlethread_workqueue((const char *)wq_name);
+ if (!chan->srcq_desc_cleanup) {
+ dev_err(chan->dev,
+ "Unable to create src q completion wq chan %d",
+ chan->channel_number);
+ goto err_no_src_q_completion_wq;
+ } else {
+ INIT_WORK(&chan->handle_srcq_desc_cleanup,
+ src_cleanup_work);
+ }
+ memset(wq_name, 0, WORKQ_NAME_SIZE);
+ }
+
+ if (chan->pdst_sgl_bd) {
+ snprintf(wq_name, WORKQ_NAME_SIZE,
+ "PS PCIe channel %d dstq handling wq",
+ chan->channel_number);
+ chan->dstq_desc_cleanup =
+ create_singlethread_workqueue((const char *)wq_name);
+ if (!chan->dstq_desc_cleanup) {
+ dev_err(chan->dev,
+ "Unable to create dst q completion wq chan %d",
+ chan->channel_number);
+ goto err_no_dst_q_completion_wq;
+ } else {
+ INIT_WORK(&chan->handle_dstq_desc_cleanup,
+ dst_cleanup_work);
+ }
+ memset(wq_name, 0, WORKQ_NAME_SIZE);
+ }
+
+ return 0;
+err_no_dst_q_completion_wq:
+ if (chan->srcq_desc_cleanup)
+ destroy_workqueue(chan->srcq_desc_cleanup);
+err_no_src_q_completion_wq:
+ if (chan->sw_intrs_wrkq)
+ destroy_workqueue(chan->sw_intrs_wrkq);
+err_no_sw_intrs_wq:
+ if (chan->maintenance_workq)
+ destroy_workqueue(chan->maintenance_workq);
+err_no_maintenance_wq:
+ if (chan->primary_desc_cleanup)
+ destroy_workqueue(chan->primary_desc_cleanup);
+err_no_primary_clean_wq:
+ if (chan->chan_programming)
+ destroy_workqueue(chan->chan_programming);
+err_no_desc_program_wq:
+ return -ENOMEM;
+}
+
+static int xlnx_ps_pcie_alloc_mempool(struct ps_pcie_dma_chan *chan)
+{
+ chan->transactions_pool =
+ mempool_create_kmalloc_pool(chan->total_descriptors,
+ sizeof(struct ps_pcie_tx_segment));
+
+ if (!chan->transactions_pool)
+ goto no_transactions_pool;
+
+ chan->intr_transactions_pool =
+ mempool_create_kmalloc_pool(MIN_SW_INTR_TRANSACTIONS,
+ sizeof(struct ps_pcie_intr_segment));
+
+ if (!chan->intr_transactions_pool)
+ goto no_intr_transactions_pool;
+
+ return 0;
+
+no_intr_transactions_pool:
+ mempool_destroy(chan->transactions_pool);
+
+no_transactions_pool:
+ return -ENOMEM;
+}
+
+static int xlnx_ps_pcie_alloc_pkt_contexts(struct ps_pcie_dma_chan *chan)
+{
+ if (chan->psrc_sgl_bd) {
+ chan->ppkt_ctx_srcq =
+ kcalloc(chan->total_descriptors,
+ sizeof(struct PACKET_TRANSFER_PARAMS),
+ GFP_KERNEL);
+ if (!chan->ppkt_ctx_srcq) {
+ dev_err(chan->dev,
+ "Src pkt cxt allocation for chan %d failed\n",
+ chan->channel_number);
+ goto err_no_src_pkt_ctx;
+ }
+ }
+
+ if (chan->pdst_sgl_bd) {
+ chan->ppkt_ctx_dstq =
+ kcalloc(chan->total_descriptors,
+ sizeof(struct PACKET_TRANSFER_PARAMS),
+ GFP_KERNEL);
+ if (!chan->ppkt_ctx_dstq) {
+ dev_err(chan->dev,
+ "Dst pkt cxt for chan %d failed\n",
+ chan->channel_number);
+ goto err_no_dst_pkt_ctx;
+ }
+ }
+
+ return 0;
+
+err_no_dst_pkt_ctx:
+ kfree(chan->ppkt_ctx_srcq);
+
+err_no_src_pkt_ctx:
+ return -ENOMEM;
+}
+
+static int dma_alloc_descriptors_two_queues(struct ps_pcie_dma_chan *chan)
+{
+ size_t size;
+
+ void *sgl_base;
+ void *sta_base;
+ dma_addr_t phy_addr_sglbase;
+ dma_addr_t phy_addr_stabase;
+
+ size = chan->total_descriptors *
+ sizeof(struct SOURCE_DMA_DESCRIPTOR);
+
+ sgl_base = dma_zalloc_coherent(chan->dev, size, &phy_addr_sglbase,
+ GFP_KERNEL);
+
+ if (!sgl_base) {
+ dev_err(chan->dev,
+ "Sgl bds in two channel mode for chan %d failed\n",
+ chan->channel_number);
+ goto err_no_sgl_bds;
+ }
+
+ size = chan->total_descriptors * sizeof(struct STATUS_DMA_DESCRIPTOR);
+ sta_base = dma_zalloc_coherent(chan->dev, size, &phy_addr_stabase,
+ GFP_KERNEL);
+
+ if (!sta_base) {
+ dev_err(chan->dev,
+ "Sta bds in two channel mode for chan %d failed\n",
+ chan->channel_number);
+ goto err_no_sta_bds;
+ }
+
+ if (chan->direction == DMA_TO_DEVICE) {
+ chan->psrc_sgl_bd = sgl_base;
+ chan->src_sgl_bd_pa = phy_addr_sglbase;
+
+ chan->psrc_sta_bd = sta_base;
+ chan->src_sta_bd_pa = phy_addr_stabase;
+
+ chan->pdst_sgl_bd = NULL;
+ chan->dst_sgl_bd_pa = 0;
+
+ chan->pdst_sta_bd = NULL;
+ chan->dst_sta_bd_pa = 0;
+
+ } else if (chan->direction == DMA_FROM_DEVICE) {
+ chan->psrc_sgl_bd = NULL;
+ chan->src_sgl_bd_pa = 0;
+
+ chan->psrc_sta_bd = NULL;
+ chan->src_sta_bd_pa = 0;
+
+ chan->pdst_sgl_bd = sgl_base;
+ chan->dst_sgl_bd_pa = phy_addr_sglbase;
+
+ chan->pdst_sta_bd = sta_base;
+ chan->dst_sta_bd_pa = phy_addr_stabase;
+
+ } else {
+ dev_err(chan->dev,
+ "%d %s() Unsupported channel direction\n",
+ __LINE__, __func__);
+ goto unsupported_channel_direction;
+ }
+
+ return 0;
+
+unsupported_channel_direction:
+ size = chan->total_descriptors *
+ sizeof(struct STATUS_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, sta_base, phy_addr_stabase);
+err_no_sta_bds:
+ size = chan->total_descriptors *
+ sizeof(struct SOURCE_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, sgl_base, phy_addr_sglbase);
+err_no_sgl_bds:
+
+ return -ENOMEM;
+}
+
+static int dma_alloc_decriptors_all_queues(struct ps_pcie_dma_chan *chan)
+{
+ size_t size;
+
+ size = chan->total_descriptors *
+ sizeof(struct SOURCE_DMA_DESCRIPTOR);
+ chan->psrc_sgl_bd =
+ dma_zalloc_coherent(chan->dev, size, &chan->src_sgl_bd_pa,
+ GFP_KERNEL);
+
+ if (!chan->psrc_sgl_bd) {
+ dev_err(chan->dev,
+ "Alloc fail src q buffer descriptors for chan %d\n",
+ chan->channel_number);
+ goto err_no_src_sgl_descriptors;
+ }
+
+ size = chan->total_descriptors * sizeof(struct DEST_DMA_DESCRIPTOR);
+ chan->pdst_sgl_bd =
+ dma_zalloc_coherent(chan->dev, size, &chan->dst_sgl_bd_pa,
+ GFP_KERNEL);
+
+ if (!chan->pdst_sgl_bd) {
+ dev_err(chan->dev,
+ "Alloc fail dst q buffer descriptors for chan %d\n",
+ chan->channel_number);
+ goto err_no_dst_sgl_descriptors;
+ }
+
+ size = chan->total_descriptors * sizeof(struct STATUS_DMA_DESCRIPTOR);
+ chan->psrc_sta_bd =
+ dma_zalloc_coherent(chan->dev, size, &chan->src_sta_bd_pa,
+ GFP_KERNEL);
+
+ if (!chan->psrc_sta_bd) {
+ dev_err(chan->dev,
+ "Unable to allocate src q status bds for chan %d\n",
+ chan->channel_number);
+ goto err_no_src_sta_descriptors;
+ }
+
+ chan->pdst_sta_bd =
+ dma_zalloc_coherent(chan->dev, size, &chan->dst_sta_bd_pa,
+ GFP_KERNEL);
+
+ if (!chan->pdst_sta_bd) {
+ dev_err(chan->dev,
+ "Unable to allocate Dst q status bds for chan %d\n",
+ chan->channel_number);
+ goto err_no_dst_sta_descriptors;
+ }
+
+ return 0;
+
+err_no_dst_sta_descriptors:
+ size = chan->total_descriptors *
+ sizeof(struct STATUS_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, chan->psrc_sta_bd,
+ chan->src_sta_bd_pa);
+err_no_src_sta_descriptors:
+ size = chan->total_descriptors *
+ sizeof(struct DEST_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, chan->pdst_sgl_bd,
+ chan->dst_sgl_bd_pa);
+err_no_dst_sgl_descriptors:
+ size = chan->total_descriptors *
+ sizeof(struct SOURCE_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, chan->psrc_sgl_bd,
+ chan->src_sgl_bd_pa);
+
+err_no_src_sgl_descriptors:
+ return -ENOMEM;
+}
+
+static void xlnx_ps_pcie_dma_free_chan_resources(struct dma_chan *dchan)
+{
+ struct ps_pcie_dma_chan *chan;
+
+ if (!dchan)
+ return;
+
+ chan = to_xilinx_chan(dchan);
+
+ if (chan->state == CHANNEL_RESOURCE_UNALLOCATED)
+ return;
+
+ if (chan->maintenance_workq) {
+ if (completion_done(&chan->chan_shutdown_complt))
+ reinit_completion(&chan->chan_shutdown_complt);
+ queue_work(chan->maintenance_workq,
+ &chan->handle_chan_shutdown);
+ wait_for_completion_interruptible(&chan->chan_shutdown_complt);
+
+ xlnx_ps_pcie_free_worker_queues(chan);
+ xlnx_ps_pcie_free_pkt_ctxts(chan);
+ xlnx_ps_pcie_destroy_mempool(chan);
+ xlnx_ps_pcie_free_descriptors(chan);
+
+ spin_lock(&chan->channel_lock);
+ chan->state = CHANNEL_RESOURCE_UNALLOCATED;
+ spin_unlock(&chan->channel_lock);
+ }
+}
+
+static int xlnx_ps_pcie_dma_alloc_chan_resources(struct dma_chan *dchan)
+{
+ struct ps_pcie_dma_chan *chan;
+
+ if (!dchan)
+ return PTR_ERR(dchan);
+
+ chan = to_xilinx_chan(dchan);
+
+ if (chan->state != CHANNEL_RESOURCE_UNALLOCATED)
+ return 0;
+
+ if (chan->num_queues == DEFAULT_DMA_QUEUES) {
+ if (dma_alloc_decriptors_all_queues(chan) != 0) {
+ dev_err(chan->dev,
+ "Alloc fail bds for channel %d\n",
+ chan->channel_number);
+ goto err_no_descriptors;
+ }
+ } else if (chan->num_queues == TWO_DMA_QUEUES) {
+ if (dma_alloc_descriptors_two_queues(chan) != 0) {
+ dev_err(chan->dev,
+ "Alloc fail bds for two queues of channel %d\n",
+ chan->channel_number);
+ goto err_no_descriptors;
+ }
+ }
+
+ if (xlnx_ps_pcie_alloc_mempool(chan) != 0) {
+ dev_err(chan->dev,
+ "Unable to allocate memory pool for channel %d\n",
+ chan->channel_number);
+ goto err_no_mempools;
+ }
+
+ if (xlnx_ps_pcie_alloc_pkt_contexts(chan) != 0) {
+ dev_err(chan->dev,
+ "Unable to allocate packet contexts for channel %d\n",
+ chan->channel_number);
+ goto err_no_pkt_ctxts;
+ }
+
+ if (xlnx_ps_pcie_alloc_worker_threads(chan) != 0) {
+ dev_err(chan->dev,
+ "Unable to allocate worker queues for channel %d\n",
+ chan->channel_number);
+ goto err_no_worker_queues;
+ }
+
+ xlnx_ps_pcie_reset_channel(chan);
+
+ dma_cookie_init(dchan);
+
+ return 0;
+
+err_no_worker_queues:
+ xlnx_ps_pcie_free_pkt_ctxts(chan);
+err_no_pkt_ctxts:
+ xlnx_ps_pcie_destroy_mempool(chan);
+err_no_mempools:
+ xlnx_ps_pcie_free_descriptors(chan);
+err_no_descriptors:
+ return -ENOMEM;
+}
+
+static dma_cookie_t xilinx_intr_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct ps_pcie_intr_segment *intr_seg =
+ to_ps_pcie_dma_tx_intr_descriptor(tx);
+ struct ps_pcie_dma_chan *chan = to_xilinx_chan(tx->chan);
+ dma_cookie_t cookie;
+
+ if (chan->state != CHANNEL_AVAILABLE)
+ return -EINVAL;
+
+ spin_lock(&chan->cookie_lock);
+ cookie = dma_cookie_assign(tx);
+ spin_unlock(&chan->cookie_lock);
+
+ spin_lock(&chan->pending_interrupts_lock);
+ list_add_tail(&intr_seg->node, &chan->pending_interrupts_list);
+ spin_unlock(&chan->pending_interrupts_lock);
+
+ return cookie;
+}
+
+static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct ps_pcie_tx_segment *seg = to_ps_pcie_dma_tx_descriptor(tx);
+ struct ps_pcie_dma_chan *chan = to_xilinx_chan(tx->chan);
+ dma_cookie_t cookie;
+
+ if (chan->state != CHANNEL_AVAILABLE)
+ return -EINVAL;
+
+ spin_lock(&chan->cookie_lock);
+ cookie = dma_cookie_assign(tx);
+ spin_unlock(&chan->cookie_lock);
+
+ spin_lock(&chan->pending_list_lock);
+ list_add_tail(&seg->node, &chan->pending_list);
+ spin_unlock(&chan->pending_list_lock);
+
+ return cookie;
+}
+
+static struct dma_async_tx_descriptor *xlnx_ps_pcie_dma_prep_dma_sg(
+ struct dma_chan *channel, struct scatterlist *dst_sg,
+ unsigned int dst_nents, struct scatterlist *src_sg,
+ unsigned int src_nents, unsigned long flags)
+{
+ struct ps_pcie_dma_chan *chan = to_xilinx_chan(channel);
+ struct ps_pcie_tx_segment *seg = NULL;
+
+ if (chan->state != CHANNEL_AVAILABLE)
+ return NULL;
+
+ if (dst_nents == 0 || src_nents == 0)
+ return NULL;
+
+ if (!dst_sg || !src_sg)
+ return NULL;
+
+ if (chan->num_queues != DEFAULT_DMA_QUEUES) {
+ dev_err(chan->dev, "Only prep_slave_sg for channel %d\n",
+ chan->channel_number);
+ return NULL;
+ }
+
+ seg = mempool_alloc(chan->transactions_pool, GFP_ATOMIC);
+ if (!seg) {
+ dev_err(chan->dev, "Tx segment alloc for channel %d\n",
+ chan->channel_number);
+ return NULL;
+ }
+
+ memset(seg, 0, sizeof(*seg));
+
+ seg->tx_elements.dst_sgl = dst_sg;
+ seg->tx_elements.dstq_num_elemets = dst_nents;
+ seg->tx_elements.src_sgl = src_sg;
+ seg->tx_elements.srcq_num_elemets = src_nents;
+
+ dma_async_tx_descriptor_init(&seg->async_tx, &chan->common);
+ seg->async_tx.flags = flags;
+ async_tx_ack(&seg->async_tx);
+ seg->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+ return &seg->async_tx;
+}
+
+static struct dma_async_tx_descriptor *xlnx_ps_pcie_dma_prep_slave_sg(
+ struct dma_chan *channel, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct ps_pcie_dma_chan *chan = to_xilinx_chan(channel);
+ struct ps_pcie_tx_segment *seg = NULL;
+
+ if (chan->state != CHANNEL_AVAILABLE)
+ return NULL;
+
+ if (!(is_slave_direction(direction)))
+ return NULL;
+
+ if (!sgl || sg_len == 0)
+ return NULL;
+
+ if (chan->num_queues != TWO_DMA_QUEUES) {
+ dev_err(chan->dev, "Only prep_dma_sg is supported channel %d\n",
+ chan->channel_number);
+ return NULL;
+ }
+
+ seg = mempool_alloc(chan->transactions_pool, GFP_ATOMIC);
+ if (!seg) {
+ dev_err(chan->dev, "Unable to allocate tx segment channel %d\n",
+ chan->channel_number);
+ return NULL;
+ }
+
+ memset(seg, 0, sizeof(*seg));
+
+ if (chan->direction == DMA_TO_DEVICE) {
+ seg->tx_elements.src_sgl = sgl;
+ seg->tx_elements.srcq_num_elemets = sg_len;
+ seg->tx_elements.dst_sgl = NULL;
+ seg->tx_elements.dstq_num_elemets = 0;
+ } else {
+ seg->tx_elements.src_sgl = NULL;
+ seg->tx_elements.srcq_num_elemets = 0;
+ seg->tx_elements.dst_sgl = sgl;
+ seg->tx_elements.dstq_num_elemets = sg_len;
+ }
+
+ dma_async_tx_descriptor_init(&seg->async_tx, &chan->common);
+ seg->async_tx.flags = flags;
+ async_tx_ack(&seg->async_tx);
+ seg->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+ return &seg->async_tx;
+}
+
+static void xlnx_ps_pcie_dma_issue_pending(struct dma_chan *channel)
+{
+ struct ps_pcie_dma_chan *chan;
+
+ if (!channel)
+ return;
+
+ chan = to_xilinx_chan(channel);
+
+ if (!list_empty(&chan->pending_list)) {
+ spin_lock(&chan->pending_list_lock);
+ spin_lock(&chan->active_list_lock);
+ list_splice_tail_init(&chan->pending_list,
+ &chan->active_list);
+ spin_unlock(&chan->active_list_lock);
+ spin_unlock(&chan->pending_list_lock);
+ }
+
+ if (!list_empty(&chan->pending_interrupts_list)) {
+ spin_lock(&chan->pending_interrupts_lock);
+ spin_lock(&chan->active_interrupts_lock);
+ list_splice_tail_init(&chan->pending_interrupts_list,
+ &chan->active_interrupts_list);
+ spin_unlock(&chan->active_interrupts_lock);
+ spin_unlock(&chan->pending_interrupts_lock);
+ }
+
+ if (chan->chan_programming)
+ queue_work(chan->chan_programming,
+ &chan->handle_chan_programming);
+}
+
+static int xlnx_ps_pcie_dma_terminate_all(struct dma_chan *channel)
+{
+ struct ps_pcie_dma_chan *chan;
+
+ if (!channel)
+ return PTR_ERR(channel);
+
+ chan = to_xilinx_chan(channel);
+
+ if (chan->state != CHANNEL_AVAILABLE)
+ return 1;
+
+ if (chan->maintenance_workq) {
+ if (completion_done(&chan->chan_terminate_complete))
+ reinit_completion(&chan->chan_terminate_complete);
+ queue_work(chan->maintenance_workq,
+ &chan->handle_chan_terminate);
+ wait_for_completion_interruptible(
+ &chan->chan_terminate_complete);
+ }
+
+ return 0;
+}
+
+static struct dma_async_tx_descriptor *xlnx_ps_pcie_dma_prep_interrupt(
+ struct dma_chan *channel, unsigned long flags)
+{
+ struct ps_pcie_dma_chan *chan;
+ struct ps_pcie_intr_segment *intr_segment = NULL;
+
+ if (!channel)
+ return NULL;
+
+ chan = to_xilinx_chan(channel);
+
+ if (chan->state != CHANNEL_AVAILABLE)
+ return NULL;
+
+ intr_segment = mempool_alloc(chan->intr_transactions_pool, GFP_ATOMIC);
+
+ memset(intr_segment, 0, sizeof(*intr_segment));
+
+ dma_async_tx_descriptor_init(&intr_segment->async_intr_tx,
+ &chan->common);
+ intr_segment->async_intr_tx.flags = flags;
+ async_tx_ack(&intr_segment->async_intr_tx);
+ intr_segment->async_intr_tx.tx_submit = xilinx_intr_tx_submit;
+
+ return &intr_segment->async_intr_tx;
+}
+
+static int xlnx_pcie_dma_driver_probe(struct platform_device *platform_dev)
+{
+ int err, i;
+ struct xlnx_pcie_dma_device *xdev;
+ static u16 board_number;
+
+ xdev = devm_kzalloc(&platform_dev->dev,
+ sizeof(struct xlnx_pcie_dma_device), GFP_KERNEL);
+
+ if (!xdev)
+ return -ENOMEM;
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ xdev->dma_buf_ext_addr = true;
+#else
+ xdev->dma_buf_ext_addr = false;
+#endif
+
+ xdev->is_rootdma = device_property_read_bool(&platform_dev->dev,
+ "rootdma");
+
+ xdev->dev = &platform_dev->dev;
+ xdev->board_number = board_number;
+
+ err = device_property_read_u32(&platform_dev->dev, "numchannels",
+ &xdev->num_channels);
+ if (err) {
+ dev_err(&platform_dev->dev,
+ "Unable to find numchannels property\n");
+ goto platform_driver_probe_return;
+ }
+
+ if (xdev->num_channels == 0 || xdev->num_channels >
+ MAX_ALLOWED_CHANNELS_IN_HW) {
+ dev_warn(&platform_dev->dev,
+ "Invalid xlnx-num_channels property value\n");
+ xdev->num_channels = MAX_ALLOWED_CHANNELS_IN_HW;
+ }
+
+ xdev->channels =
+ (struct ps_pcie_dma_chan *)devm_kzalloc(&platform_dev->dev,
+ sizeof(struct ps_pcie_dma_chan)
+ * xdev->num_channels,
+ GFP_KERNEL);
+ if (!xdev->channels) {
+ err = -ENOMEM;
+ goto platform_driver_probe_return;
+ }
+
+ if (xdev->is_rootdma)
+ err = read_rootdma_config(platform_dev, xdev);
+ else
+ err = read_epdma_config(platform_dev, xdev);
+
+ if (err) {
+ dev_err(&platform_dev->dev,
+ "Unable to initialize dma configuration\n");
+ goto platform_driver_probe_return;
+ }
+
+ /* Initialize the DMA engine */
+ INIT_LIST_HEAD(&xdev->common.channels);
+
+ dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
+ dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
+ dma_cap_set(DMA_SG, xdev->common.cap_mask);
+ dma_cap_set(DMA_INTERRUPT, xdev->common.cap_mask);
+
+ xdev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ xdev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ xdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ xdev->common.device_alloc_chan_resources =
+ xlnx_ps_pcie_dma_alloc_chan_resources;
+ xdev->common.device_free_chan_resources =
+ xlnx_ps_pcie_dma_free_chan_resources;
+ xdev->common.device_terminate_all = xlnx_ps_pcie_dma_terminate_all;
+ xdev->common.device_tx_status = dma_cookie_status;
+ xdev->common.device_issue_pending = xlnx_ps_pcie_dma_issue_pending;
+ xdev->common.device_prep_dma_interrupt =
+ xlnx_ps_pcie_dma_prep_interrupt;
+ xdev->common.device_prep_dma_sg = xlnx_ps_pcie_dma_prep_dma_sg;
+ xdev->common.device_prep_slave_sg = xlnx_ps_pcie_dma_prep_slave_sg;
+ xdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+
+ for (i = 0; i < xdev->num_channels; i++) {
+ err = probe_channel_properties(platform_dev, xdev, i);
+
+ if (err != 0) {
+ dev_err(xdev->dev,
+ "Unable to read channel properties\n");
+ goto platform_driver_probe_return;
+ }
+ }
+
+ if (xdev->is_rootdma)
+ err = platform_irq_setup(xdev);
+ else
+ err = irq_setup(xdev);
+ if (err) {
+ dev_err(xdev->dev, "Cannot request irq lines for device %d\n",
+ xdev->board_number);
+ goto platform_driver_probe_return;
+ }
+
+ err = dma_async_device_register(&xdev->common);
+ if (err) {
+ dev_err(xdev->dev,
+ "Unable to register board %d with dma framework\n",
+ xdev->board_number);
+ goto platform_driver_probe_return;
+ }
+
+ platform_set_drvdata(platform_dev, xdev);
+
+ board_number++;
+
+ dev_info(&platform_dev->dev, "PS PCIe Platform driver probed\n");
+ return 0;
+
+platform_driver_probe_return:
+ return err;
+}
+
+static int xlnx_pcie_dma_driver_remove(struct platform_device *platform_dev)
+{
+ struct xlnx_pcie_dma_device *xdev =
+ platform_get_drvdata(platform_dev);
+ int i;
+
+ for (i = 0; i < xdev->num_channels; i++)
+ xlnx_ps_pcie_dma_free_chan_resources(&xdev->channels[i].common);
+
+ dma_async_device_unregister(&xdev->common);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id xlnx_pcie_root_dma_of_ids[] = {
+ { .compatible = "xlnx,ps_pcie_dma-1.00.a", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, xlnx_pcie_root_dma_of_ids);
+#endif
+
+static struct platform_driver xlnx_pcie_dma_driver = {
+ .driver = {
+ .name = XLNX_PLATFORM_DRIVER_NAME,
+ .of_match_table = of_match_ptr(xlnx_pcie_root_dma_of_ids),
+ .owner = THIS_MODULE,
+ },
+ .probe = xlnx_pcie_dma_driver_probe,
+ .remove = xlnx_pcie_dma_driver_remove,
+};
+
+int dma_platform_driver_register(void)
+{
+ return platform_driver_register(&xlnx_pcie_dma_driver);
+}
+
+void dma_platform_driver_unregister(void)
+{
+ platform_driver_unregister(&xlnx_pcie_dma_driver);
+}
--
2.7.4
Platform driver handles transactions for PCIe EP DMA and Root DMA
Signed-off-by: Ravi Shankar Jonnalagadda <[email protected]>
Signed-off-by: RaviKiran Gummaluri <[email protected]>
---
drivers/dma/xilinx/ps_pcie_platform.c | 3055 +++++++++++++++++++++++++++++++++
1 file changed, 3055 insertions(+)
create mode 100644 drivers/dma/xilinx/ps_pcie_platform.c
diff --git a/drivers/dma/xilinx/ps_pcie_platform.c b/drivers/dma/xilinx/ps_pcie_platform.c
new file mode 100644
index 0000000..79f324a
--- /dev/null
+++ b/drivers/dma/xilinx/ps_pcie_platform.c
@@ -0,0 +1,3055 @@
+/*
+ * XILINX PS PCIe DMA driver
+ *
+ * Copyright (C) 2017 Xilinx, Inc. All rights reserved.
+ *
+ * Description
+ * PS PCIe DMA is memory mapped DMA used to execute PS to PL transfers
+ * on ZynqMP UltraScale+ Devices
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation
+ */
+
+#include "ps_pcie.h"
+#include "../dmaengine.h"
+
+#define PLATFORM_DRIVER_NAME "ps_pcie_pform_dma"
+#define MAX_BARS 6
+
+#define DMA_BAR_NUMBER 0
+
+#define MIN_SW_INTR_TRANSACTIONS 2
+
+#define CHANNEL_PROPERTY_LENGTH 50
+#define WORKQ_NAME_SIZE 100
+#define INTR_HANDLR_NAME_SIZE 100
+
+#define PS_PCIE_DMA_IRQ_NOSHARE 0
+
+#define MAX_COALESCE_COUNT 255
+
+#define DMA_CHANNEL_REGS_SIZE 0x80
+
+#define DMA_SRCQPTRLO_REG_OFFSET (0x00) /* Source Q pointer Lo */
+#define DMA_SRCQPTRHI_REG_OFFSET (0x04) /* Source Q pointer Hi */
+#define DMA_SRCQSZ_REG_OFFSET (0x08) /* Source Q size */
+#define DMA_SRCQLMT_REG_OFFSET (0x0C) /* Source Q limit */
+#define DMA_DSTQPTRLO_REG_OFFSET (0x10) /* Destination Q pointer Lo */
+#define DMA_DSTQPTRHI_REG_OFFSET (0x14) /* Destination Q pointer Hi */
+#define DMA_DSTQSZ_REG_OFFSET (0x18) /* Destination Q size */
+#define DMA_DSTQLMT_REG_OFFSET (0x1C) /* Destination Q limit */
+#define DMA_SSTAQPTRLO_REG_OFFSET (0x20) /* Source Status Q pointer Lo */
+#define DMA_SSTAQPTRHI_REG_OFFSET (0x24) /* Source Status Q pointer Hi */
+#define DMA_SSTAQSZ_REG_OFFSET (0x28) /* Source Status Q size */
+#define DMA_SSTAQLMT_REG_OFFSET (0x2C) /* Source Status Q limit */
+#define DMA_DSTAQPTRLO_REG_OFFSET (0x30) /* Destination Status Q pointer Lo */
+#define DMA_DSTAQPTRHI_REG_OFFSET (0x34) /* Destination Status Q pointer Hi */
+#define DMA_DSTAQSZ_REG_OFFSET (0x38) /* Destination Status Q size */
+#define DMA_DSTAQLMT_REG_OFFSET (0x3C) /* Destination Status Q limit */
+#define DMA_SRCQNXT_REG_OFFSET (0x40) /* Source Q next */
+#define DMA_DSTQNXT_REG_OFFSET (0x44) /* Destination Q next */
+#define DMA_SSTAQNXT_REG_OFFSET (0x48) /* Source Status Q next */
+#define DMA_DSTAQNXT_REG_OFFSET (0x4C) /* Destination Status Q next */
+#define DMA_SCRATCH0_REG_OFFSET (0x50) /* Scratch pad register 0 */
+
+#define DMA_PCIE_INTR_CNTRL_REG_OFFSET (0x60) /* DMA PCIe intr control reg */
+#define DMA_PCIE_INTR_STATUS_REG_OFFSET (0x64) /* DMA PCIe intr status reg */
+#define DMA_AXI_INTR_CNTRL_REG_OFFSET (0x68) /* DMA AXI intr control reg */
+#define DMA_AXI_INTR_STATUS_REG_OFFSET (0x6C) /* DMA AXI intr status reg */
+#define DMA_PCIE_INTR_ASSRT_REG_OFFSET (0x70) /* PCIe intr assert reg */
+#define DMA_AXI_INTR_ASSRT_REG_OFFSET (0x74) /* AXI intr assert register */
+#define DMA_CNTRL_REG_OFFSET (0x78) /* DMA control register */
+#define DMA_STATUS_REG_OFFSET (0x7C) /* DMA status register */
+
+#define DMA_CNTRL_RST_BIT BIT(1)
+#define DMA_CNTRL_64BIT_STAQ_ELEMSZ_BIT BIT(2)
+#define DMA_CNTRL_ENABL_BIT BIT(0)
+#define DMA_STATUS_DMA_PRES_BIT BIT(15)
+#define DMA_STATUS_DMA_RUNNING_BIT BIT(0)
+#define DMA_QPTRLO_QLOCAXI_BIT BIT(0)
+#define DMA_QPTRLO_Q_ENABLE_BIT BIT(1)
+#define DMA_INTSTATUS_DMAERR_BIT BIT(1)
+#define DMA_INTSTATUS_SGLINTR_BIT BIT(2)
+#define DMA_INTSTATUS_SWINTR_BIT BIT(3)
+#define DMA_INTCNTRL_ENABLINTR_BIT BIT(0)
+#define DMA_INTCNTRL_DMAERRINTR_BIT BIT(1)
+#define DMA_INTCNTRL_DMASGINTR_BIT BIT(2)
+#define DMA_SW_INTR_ASSRT_BIT BIT(3)
+
+#define SOURCE_CONTROL_BD_BYTE_COUNT_MASK GENMASK(23, 0)
+#define SOURCE_CONTROL_BD_LOC_AXI BIT(24)
+#define SOURCE_CONTROL_BD_EOP_BIT BIT(25)
+#define SOURCE_CONTROL_BD_INTR_BIT BIT(26)
+#define SOURCE_CONTROL_BACK_TO_BACK_PACK_BIT BIT(25)
+#define SOURCE_CONTROL_ATTRIBUTES_MASK GENMASK(31, 28)
+#define SRC_CTL_ATTRIB_BIT_SHIFT (29)
+
+#define STA_BD_COMPLETED_BIT BIT(0)
+#define STA_BD_SOURCE_ERROR_BIT BIT(1)
+#define STA_BD_DESTINATION_ERROR_BIT BIT(2)
+#define STA_BD_INTERNAL_ERROR_BIT BIT(3)
+#define STA_BD_UPPER_STATUS_NONZERO_BIT BIT(31)
+#define STA_BD_BYTE_COUNT_MASK GENMASK(30, 4)
+
+#define STA_BD_BYTE_COUNT_SHIFT 4
+
+#define DMA_INTCNTRL_SGCOLSCCNT_BIT_SHIFT (16)
+
+#define DMA_SRC_Q_LOW_BIT_SHIFT GENMASK(5, 0)
+
+#define MAX_TRANSFER_LENGTH 0x1000000
+
+#define AXI_ATTRIBUTE 0x3
+#define PCI_ATTRIBUTE 0x2
+
+#define ROOTDMA_Q_READ_ATTRIBUTE 0x8
+
+/*
+ * User Id programmed into Source Q will be copied into Status Q of Destination
+ */
+#define DEFAULT_UID 1
+
+/*
+ * DMA channel registers
+ */
+struct DMA_ENGINE_REGISTERS {
+ u32 src_q_low; /* 0x00 */
+ u32 src_q_high; /* 0x04 */
+ u32 src_q_size; /* 0x08 */
+ u32 src_q_limit; /* 0x0C */
+ u32 dst_q_low; /* 0x10 */
+ u32 dst_q_high; /* 0x14 */
+ u32 dst_q_size; /* 0x18 */
+ u32 dst_q_limit; /* 0x1c */
+ u32 stas_q_low; /* 0x20 */
+ u32 stas_q_high; /* 0x24 */
+ u32 stas_q_size; /* 0x28 */
+ u32 stas_q_limit; /* 0x2C */
+ u32 stad_q_low; /* 0x30 */
+ u32 stad_q_high; /* 0x34 */
+ u32 stad_q_size; /* 0x38 */
+ u32 stad_q_limit; /* 0x3C */
+ u32 src_q_next; /* 0x40 */
+ u32 dst_q_next; /* 0x44 */
+ u32 stas_q_next; /* 0x48 */
+ u32 stad_q_next; /* 0x4C */
+ u32 scrathc0; /* 0x50 */
+ u32 scrathc1; /* 0x54 */
+ u32 scrathc2; /* 0x58 */
+ u32 scrathc3; /* 0x5C */
+ u32 pcie_intr_cntrl; /* 0x60 */
+ u32 pcie_intr_status; /* 0x64 */
+ u32 axi_intr_cntrl; /* 0x68 */
+ u32 axi_intr_status; /* 0x6C */
+ u32 pcie_intr_assert; /* 0x70 */
+ u32 axi_intr_assert; /* 0x74 */
+ u32 dma_channel_ctrl; /* 0x78 */
+ u32 dma_channel_status; /* 0x7C */
+} __attribute__((__packed__));
+
+/**
+ * struct SOURCE_DMA_DESCRIPTOR - Source Hardware Descriptor
+ * @system_address: 64 bit buffer physical address
+ * @control_byte_count: Byte count/buffer length and control flags
+ * @user_handle: User handle gets copied to status q on completion
+ * @user_id: User id gets copied to status q of destination
+ */
+struct SOURCE_DMA_DESCRIPTOR {
+ u64 system_address;
+ u32 control_byte_count;
+ u16 user_handle;
+ u16 user_id;
+} __attribute__((__packed__));
+
+/**
+ * struct DEST_DMA_DESCRIPTOR - Destination Hardware Descriptor
+ * @system_address: 64 bit buffer physical address
+ * @control_byte_count: Byte count/buffer length and control flags
+ * @user_handle: User handle gets copied to status q on completion
+ * @reserved: Reserved field
+ */
+struct DEST_DMA_DESCRIPTOR {
+ u64 system_address;
+ u32 control_byte_count;
+ u16 user_handle;
+ u16 reserved;
+} __attribute__((__packed__));
+
+/**
+ * struct STATUS_DMA_DESCRIPTOR - Status Hardware Descriptor
+ * @status_flag_byte_count: Byte count/buffer length and status flags
+ * @user_handle: User handle gets copied from src/dstq on completion
+ * @user_id: User id gets copied from srcq
+ */
+struct STATUS_DMA_DESCRIPTOR {
+ u32 status_flag_byte_count;
+ u16 user_handle;
+ u16 user_id;
+} __attribute__((__packed__));
+
+enum PACKET_CONTEXT_AVAILABILITY {
+ FREE = 0, /*Packet transfer Parameter context is free.*/
+ IN_USE /*Packet transfer Parameter context is in use.*/
+};
+
+struct ps_pcie_transfer_elements {
+ struct scatterlist *src_sgl;
+ unsigned int srcq_num_elemets;
+ struct scatterlist *dst_sgl;
+ unsigned int dstq_num_elemets;
+};
+
+struct ps_pcie_tx_segment {
+ struct list_head node;
+ struct dma_async_tx_descriptor async_tx;
+ struct ps_pcie_transfer_elements tx_elements;
+};
+
+struct ps_pcie_intr_segment {
+ struct list_head node;
+ struct dma_async_tx_descriptor async_intr_tx;
+};
+
+/*
+ * The context structure stored for each DMA transaction
+ * This structure is maintained separately for Src Q and Destination Q
+ * @availability_status: Indicates whether packet context is available
+ * @idx_sop: Indicates starting index of buffer descriptor for a transfer
+ * @idx_eop: Indicates ending index of buffer descriptor for a transfer
+ * @sgl: Indicates either src or dst sglist for the transaction
+ */
+struct PACKET_TRANSFER_PARAMS {
+ enum PACKET_CONTEXT_AVAILABILITY availability_status;
+ u16 idx_sop;
+ u16 idx_eop;
+ struct scatterlist *sgl;
+ struct ps_pcie_tx_segment *seg;
+ u32 requested_bytes;
+};
+
+enum CHANNEL_STATE {
+ CHANNEL_RESOURCE_UNALLOCATED = 0, /* Channel resources not allocated */
+ CHANNEL_UNAVIALBLE, /* Channel inactive */
+ CHANNEL_AVAILABLE, /* Channel available for transfers */
+ CHANNEL_ERROR /* Channel encountered errors */
+};
+
+enum BUFFER_LOCATION {
+ BUFFER_LOC_PCI = 0,
+ BUFFER_LOC_AXI,
+ BUFFER_LOC_INVALID
+};
+
+enum dev_channel_properties {
+ DMA_CHANNEL_DIRECTION = 0,
+ NUM_DESCRIPTORS,
+ NUM_QUEUES,
+ COALESE_COUNT,
+ POLL_TIMER_FREQUENCY
+};
+
+/*
+ * struct ps_pcie_dma_chan - Driver specific DMA channel structure
+ * @xdev: Driver specific device structure
+ * @dev: The dma device
+ * @common: DMA common channel
+ * @chan_base: Pointer to Channel registers
+ * @channel_number: DMA channel number in the device
+ * @num_queues: Number of queues per channel.
+ * It should be four for memory mapped case and
+ * two for Streaming case
+ * @direction: Transfer direction
+ * @state: Indicates channel state
+ * @channel_lock: Spin lock to be used before changing channel state
+ * @cookie_lock: Spin lock to be used before assigning cookie for a transaction
+ * @coalesce_count: Indicates number of packet transfers before interrupts
+ * @poll_timer_freq:Indicates frequency of polling for completed transactions
+ * @poll_timer: Timer to poll dma buffer descriptors if coalesce count is > 0
+ * @src_avail_descriptors: Available sgl source descriptors
+ * @src_desc_lock: Lock for synchronizing src_avail_descriptors
+ * @dst_avail_descriptors: Available sgl destination descriptors
+ * @dst_desc_lock: Lock for synchronizing
+ * dst_avail_descriptors
+ * @src_sgl_bd_pa: Physical address of Source SGL buffer Descriptors
+ * @psrc_sgl_bd: Virtual address of Source SGL buffer Descriptors
+ * @src_sgl_freeidx: Holds index of Source SGL buffer descriptor to be filled
+ * @sglDestinationQLock:Lock to serialize Destination Q updates
+ * @dst_sgl_bd_pa: Physical address of Dst SGL buffer Descriptors
+ * @pdst_sgl_bd: Virtual address of Dst SGL buffer Descriptors
+ * @dst_sgl_freeidx: Holds index of Destination SGL
+ * @src_sta_bd_pa: Physical address of StatusQ buffer Descriptors
+ * @psrc_sta_bd: Virtual address of Src StatusQ buffer Descriptors
+ * @src_staprobe_idx: Holds index of Status Q to be examined for SrcQ updates
+ * @src_sta_hw_probe_idx: Holds index of maximum limit of Status Q for hardware
+ * @dst_sta_bd_pa: Physical address of Dst StatusQ buffer Descriptor
+ * @pdst_sta_bd: Virtual address of Dst Status Q buffer Descriptors
+ * @dst_staprobe_idx: Holds index of Status Q to be examined for updates
+ * @dst_sta_hw_probe_idx: Holds index of max limit of Dst Status Q for hardware
+ * @@read_attribute: Describes the attributes of buffer in srcq
+ * @@write_attribute: Describes the attributes of buffer in dstq
+ * @@intr_status_offset: Register offset to be cheked on receiving interrupt
+ * @@intr_status_offset: Register offset to be used to control interrupts
+ * @ppkt_ctx_srcq: Virtual address of packet context to Src Q updates
+ * @idx_ctx_srcq_head: Holds index of packet context to be filled for Source Q
+ * @idx_ctx_srcq_tail: Holds index of packet context to be examined for Source Q
+ * @ppkt_ctx_dstq: Virtual address of packet context to Dst Q updates
+ * @idx_ctx_dstq_head: Holds index of packet context to be filled for Dst Q
+ * @idx_ctx_dstq_tail: Holds index of packet context to be examined for Dst Q
+ * @pending_list_lock: Lock to be taken before updating pending transfers list
+ * @pending_list: List of transactions submitted to channel
+ * @active_list_lock: Lock to be taken before transferring transactions from
+ * pending list to active list which will be subsequently
+ * submitted to hardware
+ * @active_list: List of transactions that will be submitted to hardware
+ * @pending_interrupts_lock: Lock to be taken before updating pending Intr list
+ * @pending_interrupts_list: List of interrupt transactions submitted to channel
+ * @active_interrupts_lock: Lock to be taken before transferring transactions
+ * from pending interrupt list to active interrupt list
+ * @active_interrupts_list: List of interrupt transactions that are active
+ * @transactions_pool: Mem pool to allocate dma transactions quickly
+ * @intr_transactions_pool: Mem pool to allocate interrupt transactions quickly
+ * @sw_intrs_wrkq: Work Q which performs handling of software intrs
+ * @handle_sw_intrs:Work function handling software interrupts
+ * @maintenance_workq: Work Q to perform maintenance tasks during stop or error
+ * @handle_chan_reset: Work that invokes channel reset function
+ * @handle_chan_shutdown: Work that invokes channel shutdown function
+ * @handle_chan_terminate: Work that invokes channel transactions termination
+ * @chan_shutdown_complt: Completion variable which says shutdown is done
+ * @chan_terminate_complete: Completion variable which says terminate is done
+ * @primary_desc_cleanup: Work Q which performs work related to sgl handling
+ * @handle_primary_desc_cleanup: Work that invokes src Q, dst Q cleanup
+ * and programming
+ * @chan_programming: Work Q which performs work related to channel programming
+ * @handle_chan_programming: Work that invokes channel programming function
+ * @srcq_desc_cleanup: Work Q which performs src Q descriptor cleanup
+ * @handle_srcq_desc_cleanup: Work function handling Src Q completions
+ * @dstq_desc_cleanup: Work Q which performs dst Q descriptor cleanup
+ * @handle_dstq_desc_cleanup: Work function handling Dst Q completions
+ * @srcq_work_complete: Src Q Work completion variable for primary work
+ * @dstq_work_complete: Dst Q Work completion variable for primary work
+ */
+struct ps_pcie_dma_chan {
+ struct xlnx_pcie_dma_device *xdev;
+ struct device *dev;
+
+ struct dma_chan common;
+
+ struct DMA_ENGINE_REGISTERS *chan_base;
+ u16 channel_number;
+
+ u32 num_queues;
+ enum dma_data_direction direction;
+ enum BUFFER_LOCATION srcq_buffer_location;
+ enum BUFFER_LOCATION dstq_buffer_location;
+
+ u32 total_descriptors;
+
+ enum CHANNEL_STATE state;
+ spinlock_t channel_lock; /* For changing channel state */
+
+ spinlock_t cookie_lock; /* For acquiring cookie from dma framework*/
+
+ u32 coalesce_count;
+ u32 poll_timer_freq;
+
+ struct timer_list poll_timer;
+
+ u32 src_avail_descriptors;
+ spinlock_t src_desc_lock; /* For handling srcq available descriptors */
+
+ u32 dst_avail_descriptors;
+ spinlock_t dst_desc_lock; /* For handling dstq available descriptors */
+
+ dma_addr_t src_sgl_bd_pa;
+ struct SOURCE_DMA_DESCRIPTOR *psrc_sgl_bd;
+ u32 src_sgl_freeidx;
+
+ dma_addr_t dst_sgl_bd_pa;
+ struct DEST_DMA_DESCRIPTOR *pdst_sgl_bd;
+ u32 dst_sgl_freeidx;
+
+ dma_addr_t src_sta_bd_pa;
+ struct STATUS_DMA_DESCRIPTOR *psrc_sta_bd;
+ u32 src_staprobe_idx;
+ u32 src_sta_hw_probe_idx;
+
+ dma_addr_t dst_sta_bd_pa;
+ struct STATUS_DMA_DESCRIPTOR *pdst_sta_bd;
+ u32 dst_staprobe_idx;
+ u32 dst_sta_hw_probe_idx;
+
+ u32 read_attribute;
+ u32 write_attribute;
+
+ u32 intr_status_offset;
+ u32 intr_control_offset;
+
+ struct PACKET_TRANSFER_PARAMS *ppkt_ctx_srcq;
+ u16 idx_ctx_srcq_head;
+ u16 idx_ctx_srcq_tail;
+
+ struct PACKET_TRANSFER_PARAMS *ppkt_ctx_dstq;
+ u16 idx_ctx_dstq_head;
+ u16 idx_ctx_dstq_tail;
+
+ spinlock_t pending_list_lock; /* For handling dma pending_list */
+ struct list_head pending_list;
+ spinlock_t active_list_lock; /* For handling dma active_list */
+ struct list_head active_list;
+
+ spinlock_t pending_interrupts_lock; /* For dma pending interrupts list*/
+ struct list_head pending_interrupts_list;
+ spinlock_t active_interrupts_lock; /* For dma active interrupts list*/
+ struct list_head active_interrupts_list;
+
+ mempool_t *transactions_pool;
+ mempool_t *intr_transactions_pool;
+
+ struct workqueue_struct *sw_intrs_wrkq;
+ struct work_struct handle_sw_intrs;
+
+ struct workqueue_struct *maintenance_workq;
+ struct work_struct handle_chan_reset;
+ struct work_struct handle_chan_shutdown;
+ struct work_struct handle_chan_terminate;
+
+ struct completion chan_shutdown_complt;
+ struct completion chan_terminate_complete;
+
+ struct workqueue_struct *primary_desc_cleanup;
+ struct work_struct handle_primary_desc_cleanup;
+
+ struct workqueue_struct *chan_programming;
+ struct work_struct handle_chan_programming;
+
+ struct workqueue_struct *srcq_desc_cleanup;
+ struct work_struct handle_srcq_desc_cleanup;
+ struct completion srcq_work_complete;
+
+ struct workqueue_struct *dstq_desc_cleanup;
+ struct work_struct handle_dstq_desc_cleanup;
+ struct completion dstq_work_complete;
+};
+
+/*
+ * struct xlnx_pcie_dma_device - Driver specific platform device structure
+ * @is_rootdma: Indicates whether the dma instance is root port dma
+ * @dma_buf_ext_addr: Indicates whether target system is 32 bit or 64 bit
+ * @bar_mask: Indicates available pcie bars
+ * @board_number: Count value of platform device
+ * @dev: Device structure pointer for pcie device
+ * @channels: Pointer to device DMA channels structure
+ * @common: DMA device structure
+ * @num_channels: Number of channels active for the device
+ * @reg_base: Base address of first DMA channel of the device
+ * @irq_vecs: Number of irq vectors allocated to pci device
+ * @pci_dev: Parent pci device which created this platform device
+ * @bar_info: PCIe bar related information
+ * @platform_irq_vec: Platform irq vector number for root dma
+ * @rootdma_vendor: PCI Vendor id for root dma
+ * @rootdma_device: PCI Device id for root dma
+ */
+struct xlnx_pcie_dma_device {
+ bool is_rootdma;
+ bool dma_buf_ext_addr;
+ u32 bar_mask;
+ u16 board_number;
+ struct device *dev;
+ struct ps_pcie_dma_chan *channels;
+ struct dma_device common;
+ int num_channels;
+ int irq_vecs;
+ void __iomem *reg_base;
+ struct pci_dev *pci_dev;
+ struct BAR_PARAMS bar_info[MAX_BARS];
+ int platform_irq_vec;
+ u16 rootdma_vendor;
+ u16 rootdma_device;
+};
+
+#define to_xilinx_chan(chan) \
+ container_of(chan, struct ps_pcie_dma_chan, common)
+#define to_ps_pcie_dma_tx_descriptor(tx) \
+ container_of(tx, struct ps_pcie_tx_segment, async_tx)
+#define to_ps_pcie_dma_tx_intr_descriptor(tx) \
+ container_of(tx, struct ps_pcie_intr_segment, async_intr_tx)
+
+/* Function Protypes */
+static u32 ps_pcie_dma_read(struct ps_pcie_dma_chan *chan, u32 reg);
+static void ps_pcie_dma_write(struct ps_pcie_dma_chan *chan, u32 reg,
+ u32 value);
+static void ps_pcie_dma_clr_mask(struct ps_pcie_dma_chan *chan, u32 reg,
+ u32 mask);
+static void ps_pcie_dma_set_mask(struct ps_pcie_dma_chan *chan, u32 reg,
+ u32 mask);
+static int irq_setup(struct xlnx_pcie_dma_device *xdev);
+static int platform_irq_setup(struct xlnx_pcie_dma_device *xdev);
+static int chan_intr_setup(struct xlnx_pcie_dma_device *xdev);
+static int device_intr_setup(struct xlnx_pcie_dma_device *xdev);
+static int irq_probe(struct xlnx_pcie_dma_device *xdev);
+static int ps_pcie_check_intr_status(struct ps_pcie_dma_chan *chan);
+static irqreturn_t ps_pcie_dma_dev_intr_handler(int irq, void *data);
+static irqreturn_t ps_pcie_dma_chan_intr_handler(int irq, void *data);
+static int init_hw_components(struct ps_pcie_dma_chan *chan);
+static int init_sw_components(struct ps_pcie_dma_chan *chan);
+static void update_channel_read_attribute(struct ps_pcie_dma_chan *chan);
+static void update_channel_write_attribute(struct ps_pcie_dma_chan *chan);
+static void ps_pcie_chan_reset(struct ps_pcie_dma_chan *chan);
+static void poll_completed_transactions(unsigned long arg);
+static bool check_descriptors_for_two_queues(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg);
+static bool check_descriptors_for_all_queues(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg);
+static bool check_descriptor_availability(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg);
+static void handle_error(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_update_srcq(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg);
+static void xlnx_ps_pcie_update_dstq(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg);
+static void ps_pcie_chan_program_work(struct work_struct *work);
+static void dst_cleanup_work(struct work_struct *work);
+static void src_cleanup_work(struct work_struct *work);
+static void ps_pcie_chan_primary_work(struct work_struct *work);
+static int probe_channel_properties(struct platform_device *platform_dev,
+ struct xlnx_pcie_dma_device *xdev,
+ u16 channel_number);
+static void xlnx_ps_pcie_destroy_mempool(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_free_worker_queues(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_free_pkt_ctxts(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_free_descriptors(struct ps_pcie_dma_chan *chan);
+static int xlnx_ps_pcie_channel_activate(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_channel_quiesce(struct ps_pcie_dma_chan *chan);
+static void ivk_cbk_for_pending(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_reset_channel(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_free_poll_timer(struct ps_pcie_dma_chan *chan);
+static int xlnx_ps_pcie_alloc_poll_timer(struct ps_pcie_dma_chan *chan);
+static void terminate_transactions_work(struct work_struct *work);
+static void chan_shutdown_work(struct work_struct *work);
+static void chan_reset_work(struct work_struct *work);
+static int xlnx_ps_pcie_alloc_worker_threads(struct ps_pcie_dma_chan *chan);
+static int xlnx_ps_pcie_alloc_mempool(struct ps_pcie_dma_chan *chan);
+static int xlnx_ps_pcie_alloc_pkt_contexts(struct ps_pcie_dma_chan *chan);
+static int dma_alloc_descriptors_two_queues(struct ps_pcie_dma_chan *chan);
+static int dma_alloc_decriptors_all_queues(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_dma_free_chan_resources(struct dma_chan *dchan);
+static int xlnx_ps_pcie_dma_alloc_chan_resources(struct dma_chan *dchan);
+static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx);
+static dma_cookie_t xilinx_intr_tx_submit(struct dma_async_tx_descriptor *tx);
+static struct dma_async_tx_descriptor *xlnx_ps_pcie_dma_prep_dma_sg(
+ struct dma_chan *channel, struct scatterlist *dst_sg,
+ unsigned int dst_nents, struct scatterlist *src_sg,
+ unsigned int src_nents, unsigned long flags);
+static struct dma_async_tx_descriptor *xlnx_ps_pcie_dma_prep_slave_sg(
+ struct dma_chan *channel, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context);
+static struct dma_async_tx_descriptor *xlnx_ps_pcie_dma_prep_interrupt(
+ struct dma_chan *channel, unsigned long flags);
+static void xlnx_ps_pcie_dma_issue_pending(struct dma_chan *channel);
+static int xlnx_ps_pcie_dma_terminate_all(struct dma_chan *channel);
+static int read_rootdma_config(struct platform_device *platform_dev,
+ struct xlnx_pcie_dma_device *xdev);
+static int read_epdma_config(struct platform_device *platform_dev,
+ struct xlnx_pcie_dma_device *xdev);
+static int xlnx_pcie_dma_driver_probe(struct platform_device *platform_dev);
+static int xlnx_pcie_dma_driver_remove(struct platform_device *platform_dev);
+
+/* IO accessors */
+static inline u32 ps_pcie_dma_read(struct ps_pcie_dma_chan *chan, u32 reg)
+{
+ return ioread32((void __iomem *)((char *)(chan->chan_base) + reg));
+}
+
+static inline void ps_pcie_dma_write(struct ps_pcie_dma_chan *chan, u32 reg,
+ u32 value)
+{
+ iowrite32(value, (void __iomem *)((char *)(chan->chan_base) + reg));
+}
+
+static inline void ps_pcie_dma_clr_mask(struct ps_pcie_dma_chan *chan, u32 reg,
+ u32 mask)
+{
+ ps_pcie_dma_write(chan, reg, ps_pcie_dma_read(chan, reg) & ~mask);
+}
+
+static inline void ps_pcie_dma_set_mask(struct ps_pcie_dma_chan *chan, u32 reg,
+ u32 mask)
+{
+ ps_pcie_dma_write(chan, reg, ps_pcie_dma_read(chan, reg) | mask);
+}
+
+/**
+ * ps_pcie_dma_dev_intr_handler - This will be invoked for MSI/Legacy interrupts
+ *
+ * @irq: IRQ number
+ * @data: Pointer to the PS PCIe DMA channel structure
+ *
+ * Return: IRQ_HANDLED/IRQ_NONE
+ */
+static irqreturn_t ps_pcie_dma_dev_intr_handler(int irq, void *data)
+{
+ struct xlnx_pcie_dma_device *xdev =
+ (struct xlnx_pcie_dma_device *)data;
+ struct ps_pcie_dma_chan *chan = NULL;
+ int i;
+ int err = -1;
+ int ret = -1;
+
+ for (i = 0; i < xdev->num_channels; i++) {
+ chan = &xdev->channels[i];
+ err = ps_pcie_check_intr_status(chan);
+ if (err == 0)
+ ret = 0;
+ }
+
+ return (ret == 0) ? IRQ_HANDLED : IRQ_NONE;
+}
+
+/**
+ * ps_pcie_dma_chan_intr_handler - This will be invoked for MSI-X interrupts
+ *
+ * @irq: IRQ number
+ * @data: Pointer to the PS PCIe DMA channel structure
+ *
+ * Return: IRQ_HANDLED
+ */
+static irqreturn_t ps_pcie_dma_chan_intr_handler(int irq, void *data)
+{
+ struct ps_pcie_dma_chan *chan = (struct ps_pcie_dma_chan *)data;
+
+ ps_pcie_check_intr_status(chan);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * chan_intr_setup - Requests Interrupt handler for individual channels
+ *
+ * @xdev: Driver specific data for device
+ *
+ * Return: 0 on success and non zero value on failure.
+ */
+static int chan_intr_setup(struct xlnx_pcie_dma_device *xdev)
+{
+ struct ps_pcie_dma_chan *chan;
+ int i;
+ int err = 0;
+
+ for (i = 0; i < xdev->num_channels; i++) {
+ chan = &xdev->channels[i];
+ err = devm_request_irq(xdev->dev,
+ pci_irq_vector(xdev->pci_dev, i),
+ ps_pcie_dma_chan_intr_handler,
+ PS_PCIE_DMA_IRQ_NOSHARE,
+ "PS PCIe DMA Chan Intr handler", chan);
+ if (err) {
+ dev_err(xdev->dev,
+ "Irq %d for chan %d error %d\n",
+ pci_irq_vector(xdev->pci_dev, i),
+ chan->channel_number, err);
+ break;
+ }
+ }
+
+ if (err) {
+ while (--i >= 0) {
+ chan = &xdev->channels[i];
+ devm_free_irq(xdev->dev,
+ pci_irq_vector(xdev->pci_dev, i), chan);
+ }
+ }
+
+ return err;
+}
+
+/**
+ * device_intr_setup - Requests interrupt handler for DMA device
+ *
+ * @xdev: Driver specific data for device
+ *
+ * Return: 0 on success and non zero value on failure.
+ */
+static int device_intr_setup(struct xlnx_pcie_dma_device *xdev)
+{
+ int err;
+ unsigned long intr_flags = IRQF_SHARED;
+
+ if (xdev->pci_dev->msix_enabled || xdev->pci_dev->msi_enabled)
+ intr_flags = PS_PCIE_DMA_IRQ_NOSHARE;
+
+ err = devm_request_irq(xdev->dev,
+ pci_irq_vector(xdev->pci_dev, 0),
+ ps_pcie_dma_dev_intr_handler,
+ intr_flags,
+ "PS PCIe DMA Intr Handler", xdev);
+ if (err)
+ dev_err(xdev->dev, "Couldn't request irq %d\n",
+ pci_irq_vector(xdev->pci_dev, 0));
+
+ return err;
+}
+
+/**
+ * irq_setup - Requests interrupts based on the interrupt type detected
+ *
+ * @xdev: Driver specific data for device
+ *
+ * Return: 0 on success and non zero value on failure.
+ */
+static int irq_setup(struct xlnx_pcie_dma_device *xdev)
+{
+ int err;
+
+ if (xdev->irq_vecs == xdev->num_channels)
+ err = chan_intr_setup(xdev);
+ else
+ err = device_intr_setup(xdev);
+
+ return err;
+}
+
+static int platform_irq_setup(struct xlnx_pcie_dma_device *xdev)
+{
+ int err;
+
+ err = devm_request_irq(xdev->dev,
+ xdev->platform_irq_vec,
+ ps_pcie_dma_dev_intr_handler,
+ IRQF_SHARED,
+ "PS PCIe Root DMA Handler", xdev);
+ if (err)
+ dev_err(xdev->dev, "Couldn't request irq %d\n",
+ xdev->platform_irq_vec);
+
+ return err;
+}
+
+/**
+ * irq_probe - Checks which interrupt types can be serviced by hardware
+ *
+ * @xdev: Driver specific data for device
+ *
+ * Return: Number of interrupt vectors when successful or -ENOSPC on failure
+ */
+static int irq_probe(struct xlnx_pcie_dma_device *xdev)
+{
+ struct pci_dev *pdev;
+
+ pdev = xdev->pci_dev;
+
+ xdev->irq_vecs = pci_alloc_irq_vectors(pdev, 1, xdev->num_channels,
+ PCI_IRQ_ALL_TYPES);
+ return xdev->irq_vecs;
+}
+
+/**
+ * ps_pcie_check_intr_status - Checks channel interrupt status
+ *
+ * @chan: Pointer to the PS PCIe DMA channel structure
+ *
+ * Return: 0 if interrupt is pending on channel
+ * -1 if no interrupt is pending on channel
+ */
+static int ps_pcie_check_intr_status(struct ps_pcie_dma_chan *chan)
+{
+ int err = -1;
+ u32 status;
+
+ if (chan->state != CHANNEL_AVAILABLE)
+ return err;
+
+ status = ps_pcie_dma_read(chan, chan->intr_status_offset);
+
+ if (status & DMA_INTSTATUS_SGLINTR_BIT) {
+ if (chan->primary_desc_cleanup) {
+ queue_work(chan->primary_desc_cleanup,
+ &chan->handle_primary_desc_cleanup);
+ }
+ /* Clearing Persistent bit */
+ ps_pcie_dma_set_mask(chan, chan->intr_status_offset,
+ DMA_INTSTATUS_SGLINTR_BIT);
+ err = 0;
+ }
+
+ if (status & DMA_INTSTATUS_SWINTR_BIT) {
+ if (chan->sw_intrs_wrkq)
+ queue_work(chan->sw_intrs_wrkq, &chan->handle_sw_intrs);
+ /* Clearing Persistent bit */
+ ps_pcie_dma_set_mask(chan, chan->intr_status_offset,
+ DMA_INTSTATUS_SWINTR_BIT);
+ err = 0;
+ }
+
+ if (status & DMA_INTSTATUS_DMAERR_BIT) {
+ dev_err(chan->dev,
+ "DMA Channel %d ControlStatus Reg: 0x%x",
+ chan->channel_number, status);
+ dev_err(chan->dev,
+ "Chn %d SrcQLmt = %d SrcQSz = %d SrcQNxt = %d",
+ chan->channel_number,
+ chan->chan_base->src_q_limit,
+ chan->chan_base->src_q_size,
+ chan->chan_base->src_q_next);
+ dev_err(chan->dev,
+ "Chn %d SrcStaLmt = %d SrcStaSz = %d SrcStaNxt = %d",
+ chan->channel_number,
+ chan->chan_base->stas_q_limit,
+ chan->chan_base->stas_q_size,
+ chan->chan_base->stas_q_next);
+ dev_err(chan->dev,
+ "Chn %d DstQLmt = %d DstQSz = %d DstQNxt = %d",
+ chan->channel_number,
+ chan->chan_base->dst_q_limit,
+ chan->chan_base->dst_q_size,
+ chan->chan_base->dst_q_next);
+ dev_err(chan->dev,
+ "Chan %d DstStaLmt = %d DstStaSz = %d DstStaNxt = %d",
+ chan->channel_number,
+ chan->chan_base->stad_q_limit,
+ chan->chan_base->stad_q_size,
+ chan->chan_base->stad_q_next);
+ /* Clearing Persistent bit */
+ ps_pcie_dma_set_mask(chan, chan->intr_status_offset,
+ DMA_INTSTATUS_DMAERR_BIT);
+
+ handle_error(chan);
+
+ err = 0;
+ }
+
+ return err;
+}
+
+static int init_hw_components(struct ps_pcie_dma_chan *chan)
+{
+ if (chan->psrc_sgl_bd && chan->psrc_sta_bd) {
+ /* Programming SourceQ and StatusQ bd addresses */
+ chan->chan_base->src_q_next = 0;
+ chan->chan_base->src_q_high =
+ upper_32_bits(chan->src_sgl_bd_pa);
+ chan->chan_base->src_q_size = chan->total_descriptors;
+ chan->chan_base->src_q_limit = 0;
+ if (chan->xdev->is_rootdma) {
+ chan->chan_base->src_q_low = ROOTDMA_Q_READ_ATTRIBUTE
+ | DMA_QPTRLO_QLOCAXI_BIT;
+ } else {
+ chan->chan_base->src_q_low = 0;
+ }
+ chan->chan_base->src_q_low |=
+ (lower_32_bits((chan->src_sgl_bd_pa))
+ & ~(DMA_SRC_Q_LOW_BIT_SHIFT))
+ | DMA_QPTRLO_Q_ENABLE_BIT;
+
+ chan->chan_base->stas_q_next = 0;
+ chan->chan_base->stas_q_high =
+ upper_32_bits(chan->src_sta_bd_pa);
+ chan->chan_base->stas_q_size = chan->total_descriptors;
+ chan->chan_base->stas_q_limit = chan->total_descriptors - 1;
+ if (chan->xdev->is_rootdma) {
+ chan->chan_base->stas_q_low = ROOTDMA_Q_READ_ATTRIBUTE
+ | DMA_QPTRLO_QLOCAXI_BIT;
+ } else {
+ chan->chan_base->stas_q_low = 0;
+ }
+ chan->chan_base->stas_q_low |=
+ (lower_32_bits(chan->src_sta_bd_pa)
+ & ~(DMA_SRC_Q_LOW_BIT_SHIFT))
+ | DMA_QPTRLO_Q_ENABLE_BIT;
+ }
+
+ if (chan->pdst_sgl_bd && chan->pdst_sta_bd) {
+ /* Programming DestinationQ and StatusQ buffer descriptors */
+ chan->chan_base->dst_q_next = 0;
+ chan->chan_base->dst_q_high =
+ upper_32_bits(chan->dst_sgl_bd_pa);
+ chan->chan_base->dst_q_size = chan->total_descriptors;
+ chan->chan_base->dst_q_limit = 0;
+ if (chan->xdev->is_rootdma) {
+ chan->chan_base->dst_q_low = ROOTDMA_Q_READ_ATTRIBUTE
+ | DMA_QPTRLO_QLOCAXI_BIT;
+ } else {
+ chan->chan_base->dst_q_low = 0;
+ }
+ chan->chan_base->dst_q_low |=
+ (lower_32_bits(chan->dst_sgl_bd_pa)
+ & ~(DMA_SRC_Q_LOW_BIT_SHIFT))
+ | DMA_QPTRLO_Q_ENABLE_BIT;
+
+ chan->chan_base->stad_q_next = 0;
+ chan->chan_base->stad_q_high =
+ upper_32_bits(chan->dst_sta_bd_pa);
+ chan->chan_base->stad_q_size = chan->total_descriptors;
+ chan->chan_base->stad_q_limit = chan->total_descriptors - 1;
+ if (chan->xdev->is_rootdma) {
+ chan->chan_base->stad_q_low = ROOTDMA_Q_READ_ATTRIBUTE
+ | DMA_QPTRLO_QLOCAXI_BIT;
+ } else {
+ chan->chan_base->stad_q_low = 0;
+ }
+ chan->chan_base->stad_q_low |=
+ (lower_32_bits(chan->dst_sta_bd_pa)
+ & ~(DMA_SRC_Q_LOW_BIT_SHIFT))
+ | DMA_QPTRLO_Q_ENABLE_BIT;
+ }
+
+ return 0;
+}
+
+static void update_channel_read_attribute(struct ps_pcie_dma_chan *chan)
+{
+ if (chan->xdev->is_rootdma) {
+ /* For Root DMA, Host Memory and Buffer Descriptors
+ * will be on AXI side
+ */
+ if (chan->srcq_buffer_location == BUFFER_LOC_PCI) {
+ chan->read_attribute = (AXI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT) |
+ SOURCE_CONTROL_BD_LOC_AXI;
+ } else if (chan->srcq_buffer_location == BUFFER_LOC_AXI) {
+ chan->read_attribute = AXI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT;
+ }
+ } else {
+ if (chan->srcq_buffer_location == BUFFER_LOC_PCI) {
+ chan->read_attribute = PCI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT;
+ } else if (chan->srcq_buffer_location == BUFFER_LOC_AXI) {
+ chan->read_attribute = (AXI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT) |
+ SOURCE_CONTROL_BD_LOC_AXI;
+ }
+ }
+}
+
+static void update_channel_write_attribute(struct ps_pcie_dma_chan *chan)
+{
+ if (chan->xdev->is_rootdma) {
+ /* For Root DMA, Host Memory and Buffer Descriptors
+ * will be on AXI side
+ */
+ if (chan->dstq_buffer_location == BUFFER_LOC_PCI) {
+ chan->write_attribute = (AXI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT) |
+ SOURCE_CONTROL_BD_LOC_AXI;
+ } else if (chan->srcq_buffer_location == BUFFER_LOC_AXI) {
+ chan->write_attribute = AXI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT;
+ }
+ } else {
+ if (chan->dstq_buffer_location == BUFFER_LOC_PCI) {
+ chan->write_attribute = PCI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT;
+ } else if (chan->dstq_buffer_location == BUFFER_LOC_AXI) {
+ chan->write_attribute = (AXI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT) |
+ SOURCE_CONTROL_BD_LOC_AXI;
+ }
+ }
+ chan->write_attribute |= SOURCE_CONTROL_BACK_TO_BACK_PACK_BIT;
+}
+
+static int init_sw_components(struct ps_pcie_dma_chan *chan)
+{
+ if ((chan->ppkt_ctx_srcq) && (chan->psrc_sgl_bd) &&
+ (chan->psrc_sta_bd)) {
+ memset(chan->ppkt_ctx_srcq, 0,
+ sizeof(struct PACKET_TRANSFER_PARAMS)
+ * chan->total_descriptors);
+
+ memset(chan->psrc_sgl_bd, 0,
+ sizeof(struct SOURCE_DMA_DESCRIPTOR)
+ * chan->total_descriptors);
+
+ memset(chan->psrc_sta_bd, 0,
+ sizeof(struct STATUS_DMA_DESCRIPTOR)
+ * chan->total_descriptors);
+
+ chan->src_avail_descriptors = chan->total_descriptors;
+
+ chan->src_sgl_freeidx = 0;
+ chan->src_staprobe_idx = 0;
+ chan->src_sta_hw_probe_idx = chan->total_descriptors - 1;
+ chan->idx_ctx_srcq_head = 0;
+ chan->idx_ctx_srcq_tail = 0;
+ }
+
+ if ((chan->ppkt_ctx_dstq) && (chan->pdst_sgl_bd) &&
+ (chan->pdst_sta_bd)) {
+ memset(chan->ppkt_ctx_dstq, 0,
+ sizeof(struct PACKET_TRANSFER_PARAMS)
+ * chan->total_descriptors);
+
+ memset(chan->pdst_sgl_bd, 0,
+ sizeof(struct DEST_DMA_DESCRIPTOR)
+ * chan->total_descriptors);
+
+ memset(chan->pdst_sta_bd, 0,
+ sizeof(struct STATUS_DMA_DESCRIPTOR)
+ * chan->total_descriptors);
+
+ chan->dst_avail_descriptors = chan->total_descriptors;
+
+ chan->dst_sgl_freeidx = 0;
+ chan->dst_staprobe_idx = 0;
+ chan->dst_sta_hw_probe_idx = chan->total_descriptors - 1;
+ chan->idx_ctx_dstq_head = 0;
+ chan->idx_ctx_dstq_tail = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * ps_pcie_chan_reset - Resets channel, by programming relevant registers
+ *
+ * @chan: PS PCIe DMA channel information holder
+ * Return: void
+ */
+static void ps_pcie_chan_reset(struct ps_pcie_dma_chan *chan)
+{
+ /* Enable channel reset */
+ ps_pcie_dma_set_mask(chan, DMA_CNTRL_REG_OFFSET, DMA_CNTRL_RST_BIT);
+
+ mdelay(10);
+
+ /* Disable channel reset */
+ ps_pcie_dma_clr_mask(chan, DMA_CNTRL_REG_OFFSET, DMA_CNTRL_RST_BIT);
+}
+
+/**
+ * poll_completed_transactions - Function invoked by poll timer
+ *
+ * @arg: Pointer to PS PCIe DMA channel information
+ * Return: void
+ */
+static void poll_completed_transactions(unsigned long arg)
+{
+ struct ps_pcie_dma_chan *chan = (struct ps_pcie_dma_chan *)arg;
+
+ if (chan->state == CHANNEL_AVAILABLE) {
+ queue_work(chan->primary_desc_cleanup,
+ &chan->handle_primary_desc_cleanup);
+ }
+
+ mod_timer(&chan->poll_timer, jiffies + chan->poll_timer_freq);
+}
+
+static bool check_descriptors_for_two_queues(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg)
+{
+ if (seg->tx_elements.src_sgl) {
+ if (chan->src_avail_descriptors >=
+ seg->tx_elements.srcq_num_elemets) {
+ return true;
+ }
+ } else if (seg->tx_elements.dst_sgl) {
+ if (chan->dst_avail_descriptors >=
+ seg->tx_elements.dstq_num_elemets) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool check_descriptors_for_all_queues(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg)
+{
+ if ((chan->src_avail_descriptors >=
+ seg->tx_elements.srcq_num_elemets) &&
+ (chan->dst_avail_descriptors >=
+ seg->tx_elements.dstq_num_elemets)) {
+ return true;
+ }
+
+ return false;
+}
+
+static bool check_descriptor_availability(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg)
+{
+ if (chan->num_queues == DEFAULT_DMA_QUEUES)
+ return check_descriptors_for_all_queues(chan, seg);
+ else
+ return check_descriptors_for_two_queues(chan, seg);
+}
+
+static void handle_error(struct ps_pcie_dma_chan *chan)
+{
+ if (chan->state != CHANNEL_AVAILABLE)
+ return;
+
+ spin_lock(&chan->channel_lock);
+ chan->state = CHANNEL_ERROR;
+ spin_unlock(&chan->channel_lock);
+
+ if (chan->maintenance_workq)
+ queue_work(chan->maintenance_workq, &chan->handle_chan_reset);
+}
+
+static void xlnx_ps_pcie_update_srcq(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg)
+{
+ struct SOURCE_DMA_DESCRIPTOR *pdesc;
+ struct PACKET_TRANSFER_PARAMS *pkt_ctx = NULL;
+ struct scatterlist *sgl_ptr;
+ unsigned int i;
+
+ pkt_ctx = chan->ppkt_ctx_srcq + chan->idx_ctx_srcq_head;
+ if (pkt_ctx->availability_status == IN_USE) {
+ dev_err(chan->dev,
+ "src pkt context not avail for channel %d\n",
+ chan->channel_number);
+ handle_error(chan);
+ return;
+ }
+
+ pkt_ctx->availability_status = IN_USE;
+ pkt_ctx->sgl = seg->tx_elements.src_sgl;
+
+ if (chan->srcq_buffer_location == BUFFER_LOC_PCI)
+ pkt_ctx->seg = seg;
+
+ /* Get the address of the next available DMA Descriptor */
+ pdesc = chan->psrc_sgl_bd + chan->src_sgl_freeidx;
+ pkt_ctx->idx_sop = chan->src_sgl_freeidx;
+
+ /* Build transactions using information in the scatter gather list */
+ for_each_sg(seg->tx_elements.src_sgl, sgl_ptr,
+ seg->tx_elements.srcq_num_elemets, i) {
+ if (chan->xdev->dma_buf_ext_addr) {
+ pdesc->system_address =
+ (u64)sg_dma_address(sgl_ptr);
+ } else {
+ pdesc->system_address =
+ (u32)sg_dma_address(sgl_ptr);
+ }
+
+ pdesc->control_byte_count = (sg_dma_len(sgl_ptr) &
+ SOURCE_CONTROL_BD_BYTE_COUNT_MASK) |
+ chan->read_attribute;
+ if (pkt_ctx->seg)
+ pkt_ctx->requested_bytes += sg_dma_len(sgl_ptr);
+
+ pdesc->user_handle = chan->idx_ctx_srcq_head;
+ pdesc->user_id = DEFAULT_UID;
+ /* Check if this is last descriptor */
+ if (i == (seg->tx_elements.srcq_num_elemets - 1)) {
+ pkt_ctx->idx_eop = chan->src_sgl_freeidx;
+ pdesc->control_byte_count = pdesc->control_byte_count |
+ SOURCE_CONTROL_BD_EOP_BIT |
+ SOURCE_CONTROL_BD_INTR_BIT;
+ }
+ chan->src_sgl_freeidx++;
+ if (chan->src_sgl_freeidx == chan->total_descriptors)
+ chan->src_sgl_freeidx = 0;
+ pdesc = chan->psrc_sgl_bd + chan->src_sgl_freeidx;
+ spin_lock(&chan->src_desc_lock);
+ chan->src_avail_descriptors--;
+ spin_unlock(&chan->src_desc_lock);
+ }
+
+ chan->chan_base->src_q_limit = chan->src_sgl_freeidx;
+ chan->idx_ctx_srcq_head++;
+ if (chan->idx_ctx_srcq_head == chan->total_descriptors)
+ chan->idx_ctx_srcq_head = 0;
+}
+
+static void xlnx_ps_pcie_update_dstq(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg)
+{
+ struct DEST_DMA_DESCRIPTOR *pdesc;
+ struct PACKET_TRANSFER_PARAMS *pkt_ctx = NULL;
+ struct scatterlist *sgl_ptr;
+ unsigned int i;
+
+ pkt_ctx = chan->ppkt_ctx_dstq + chan->idx_ctx_dstq_head;
+ if (pkt_ctx->availability_status == IN_USE) {
+ dev_err(chan->dev,
+ "dst pkt context not avail for channel %d\n",
+ chan->channel_number);
+ handle_error(chan);
+
+ return;
+ }
+
+ pkt_ctx->availability_status = IN_USE;
+ pkt_ctx->sgl = seg->tx_elements.dst_sgl;
+
+ if (chan->dstq_buffer_location == BUFFER_LOC_PCI)
+ pkt_ctx->seg = seg;
+
+ pdesc = chan->pdst_sgl_bd + chan->dst_sgl_freeidx;
+ pkt_ctx->idx_sop = chan->dst_sgl_freeidx;
+
+ /* Build transactions using information in the scatter gather list */
+ for_each_sg(seg->tx_elements.dst_sgl, sgl_ptr,
+ seg->tx_elements.dstq_num_elemets, i) {
+ if (chan->xdev->dma_buf_ext_addr) {
+ pdesc->system_address =
+ (u64)sg_dma_address(sgl_ptr);
+ } else {
+ pdesc->system_address =
+ (u32)sg_dma_address(sgl_ptr);
+ }
+
+ pdesc->control_byte_count = (sg_dma_len(sgl_ptr) &
+ SOURCE_CONTROL_BD_BYTE_COUNT_MASK) |
+ chan->write_attribute;
+
+ if (pkt_ctx->seg)
+ pkt_ctx->requested_bytes += sg_dma_len(sgl_ptr);
+
+ pdesc->user_handle = chan->idx_ctx_dstq_head;
+ /* Check if this is last descriptor */
+ if (i == (seg->tx_elements.dstq_num_elemets - 1))
+ pkt_ctx->idx_eop = chan->dst_sgl_freeidx;
+ chan->dst_sgl_freeidx++;
+ if (chan->dst_sgl_freeidx == chan->total_descriptors)
+ chan->dst_sgl_freeidx = 0;
+ pdesc = chan->pdst_sgl_bd + chan->dst_sgl_freeidx;
+ spin_lock(&chan->dst_desc_lock);
+ chan->dst_avail_descriptors--;
+ spin_unlock(&chan->dst_desc_lock);
+ }
+
+ chan->chan_base->dst_q_limit = chan->dst_sgl_freeidx;
+ chan->idx_ctx_dstq_head++;
+ if (chan->idx_ctx_dstq_head == chan->total_descriptors)
+ chan->idx_ctx_dstq_head = 0;
+}
+
+static void ps_pcie_chan_program_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(work,
+ struct ps_pcie_dma_chan,
+ handle_chan_programming);
+ struct ps_pcie_tx_segment *seg = NULL;
+
+ while (chan->state == CHANNEL_AVAILABLE) {
+ spin_lock(&chan->active_list_lock);
+ seg = list_first_entry_or_null(&chan->active_list,
+ struct ps_pcie_tx_segment, node);
+ spin_unlock(&chan->active_list_lock);
+
+ if (!seg)
+ break;
+
+ if (check_descriptor_availability(chan, seg) == false)
+ break;
+
+ spin_lock(&chan->active_list_lock);
+ list_del(&seg->node);
+ spin_unlock(&chan->active_list_lock);
+
+ if (seg->tx_elements.src_sgl)
+ xlnx_ps_pcie_update_srcq(chan, seg);
+
+ if (seg->tx_elements.dst_sgl)
+ xlnx_ps_pcie_update_dstq(chan, seg);
+ }
+}
+
+/**
+ * dst_cleanup_work - Goes through all completed elements in status Q
+ * and invokes callbacks for the concerned DMA transaction.
+ *
+ * @work: Work associated with the task
+ *
+ * Return: void
+ */
+static void dst_cleanup_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(work,
+ struct ps_pcie_dma_chan, handle_dstq_desc_cleanup);
+
+ struct STATUS_DMA_DESCRIPTOR *psta_bd;
+ struct DEST_DMA_DESCRIPTOR *pdst_bd;
+ struct PACKET_TRANSFER_PARAMS *ppkt_ctx;
+ struct dmaengine_result rslt;
+ u32 completed_bytes;
+ u32 dstq_desc_idx;
+
+ psta_bd = chan->pdst_sta_bd + chan->dst_staprobe_idx;
+
+ while (psta_bd->status_flag_byte_count & STA_BD_COMPLETED_BIT) {
+ if (psta_bd->status_flag_byte_count &
+ STA_BD_DESTINATION_ERROR_BIT) {
+ dev_err(chan->dev,
+ "Dst Sts Elmnt %d chan %d has Destination Err",
+ chan->dst_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+ if (psta_bd->status_flag_byte_count & STA_BD_SOURCE_ERROR_BIT) {
+ dev_err(chan->dev,
+ "Dst Sts Elmnt %d chan %d has Source Error",
+ chan->dst_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+ if (psta_bd->status_flag_byte_count &
+ STA_BD_INTERNAL_ERROR_BIT) {
+ dev_err(chan->dev,
+ "Dst Sts Elmnt %d chan %d has Internal Error",
+ chan->dst_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+ /* we are using 64 bit USER field. */
+ if ((psta_bd->status_flag_byte_count &
+ STA_BD_UPPER_STATUS_NONZERO_BIT) == 0) {
+ dev_err(chan->dev,
+ "Dst Sts Elmnt %d for chan %d has NON ZERO",
+ chan->dst_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+
+ chan->idx_ctx_dstq_tail = psta_bd->user_handle;
+ ppkt_ctx = chan->ppkt_ctx_dstq + chan->idx_ctx_dstq_tail;
+ completed_bytes = (psta_bd->status_flag_byte_count &
+ STA_BD_BYTE_COUNT_MASK) >>
+ STA_BD_BYTE_COUNT_SHIFT;
+
+ memset(psta_bd, 0, sizeof(struct STATUS_DMA_DESCRIPTOR));
+
+ chan->dst_staprobe_idx++;
+
+ if (chan->dst_staprobe_idx == chan->total_descriptors)
+ chan->dst_staprobe_idx = 0;
+
+ chan->dst_sta_hw_probe_idx++;
+
+ if (chan->dst_sta_hw_probe_idx == chan->total_descriptors)
+ chan->dst_sta_hw_probe_idx = 0;
+
+ chan->chan_base->stad_q_limit = chan->dst_sta_hw_probe_idx;
+
+ psta_bd = chan->pdst_sta_bd + chan->dst_staprobe_idx;
+
+ dstq_desc_idx = ppkt_ctx->idx_sop;
+
+ do {
+ pdst_bd = chan->pdst_sgl_bd + dstq_desc_idx;
+ memset(pdst_bd, 0,
+ sizeof(struct DEST_DMA_DESCRIPTOR));
+
+ spin_lock(&chan->dst_desc_lock);
+ chan->dst_avail_descriptors++;
+ spin_unlock(&chan->dst_desc_lock);
+
+ if (dstq_desc_idx == ppkt_ctx->idx_eop)
+ break;
+
+ dstq_desc_idx++;
+
+ if (dstq_desc_idx == chan->total_descriptors)
+ dstq_desc_idx = 0;
+
+ } while (1);
+
+ /* Invoking callback */
+ if (ppkt_ctx->seg) {
+ spin_lock(&chan->cookie_lock);
+ dma_cookie_complete(&ppkt_ctx->seg->async_tx);
+ spin_unlock(&chan->cookie_lock);
+ rslt.result = DMA_TRANS_NOERROR;
+ rslt.residue = ppkt_ctx->requested_bytes -
+ completed_bytes;
+ dmaengine_desc_get_callback_invoke(&ppkt_ctx->seg->async_tx,
+ &rslt);
+ mempool_free(ppkt_ctx->seg, chan->transactions_pool);
+ }
+ memset(ppkt_ctx, 0, sizeof(struct PACKET_TRANSFER_PARAMS));
+ }
+
+ complete(&chan->dstq_work_complete);
+}
+
+/**
+ * src_cleanup_work - Goes through all completed elements in status Q and
+ * invokes callbacks for the concerned DMA transaction.
+ *
+ * @work: Work associated with the task
+ *
+ * Return: void
+ */
+static void src_cleanup_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(
+ work, struct ps_pcie_dma_chan, handle_srcq_desc_cleanup);
+
+ struct STATUS_DMA_DESCRIPTOR *psta_bd;
+ struct SOURCE_DMA_DESCRIPTOR *psrc_bd;
+ struct PACKET_TRANSFER_PARAMS *ppkt_ctx;
+ struct dmaengine_result rslt;
+ u32 completed_bytes;
+ u32 srcq_desc_idx;
+
+ psta_bd = chan->psrc_sta_bd + chan->src_staprobe_idx;
+
+ while (psta_bd->status_flag_byte_count & STA_BD_COMPLETED_BIT) {
+ if (psta_bd->status_flag_byte_count &
+ STA_BD_DESTINATION_ERROR_BIT) {
+ dev_err(chan->dev,
+ "Src Sts Elmnt %d chan %d has Dst Error",
+ chan->src_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+ if (psta_bd->status_flag_byte_count & STA_BD_SOURCE_ERROR_BIT) {
+ dev_err(chan->dev,
+ "Src Sts Elmnt %d chan %d has Source Error",
+ chan->src_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+ if (psta_bd->status_flag_byte_count &
+ STA_BD_INTERNAL_ERROR_BIT) {
+ dev_err(chan->dev,
+ "Src Sts Elmnt %d chan %d has Internal Error",
+ chan->src_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+ if ((psta_bd->status_flag_byte_count
+ & STA_BD_UPPER_STATUS_NONZERO_BIT) == 0) {
+ dev_err(chan->dev,
+ "Src Sts Elmnt %d chan %d has NonZero",
+ chan->src_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+ chan->idx_ctx_srcq_tail = psta_bd->user_handle;
+ ppkt_ctx = chan->ppkt_ctx_srcq + chan->idx_ctx_srcq_tail;
+ completed_bytes = (psta_bd->status_flag_byte_count
+ & STA_BD_BYTE_COUNT_MASK) >>
+ STA_BD_BYTE_COUNT_SHIFT;
+
+ memset(psta_bd, 0, sizeof(struct STATUS_DMA_DESCRIPTOR));
+
+ chan->src_staprobe_idx++;
+
+ if (chan->src_staprobe_idx == chan->total_descriptors)
+ chan->src_staprobe_idx = 0;
+
+ chan->src_sta_hw_probe_idx++;
+
+ if (chan->src_sta_hw_probe_idx == chan->total_descriptors)
+ chan->src_sta_hw_probe_idx = 0;
+
+ chan->chan_base->stas_q_limit = chan->src_sta_hw_probe_idx;
+
+ psta_bd = chan->psrc_sta_bd + chan->src_staprobe_idx;
+
+ srcq_desc_idx = ppkt_ctx->idx_sop;
+
+ do {
+ psrc_bd = chan->psrc_sgl_bd + srcq_desc_idx;
+ memset(psrc_bd, 0,
+ sizeof(struct SOURCE_DMA_DESCRIPTOR));
+
+ spin_lock(&chan->src_desc_lock);
+ chan->src_avail_descriptors++;
+ spin_unlock(&chan->src_desc_lock);
+
+ if (srcq_desc_idx == ppkt_ctx->idx_eop)
+ break;
+ srcq_desc_idx++;
+
+ if (srcq_desc_idx == chan->total_descriptors)
+ srcq_desc_idx = 0;
+
+ } while (1);
+
+ /* Invoking callback */
+ if (ppkt_ctx->seg) {
+ spin_lock(&chan->cookie_lock);
+ dma_cookie_complete(&ppkt_ctx->seg->async_tx);
+ spin_unlock(&chan->cookie_lock);
+ rslt.result = DMA_TRANS_NOERROR;
+ rslt.residue = ppkt_ctx->requested_bytes -
+ completed_bytes;
+ dmaengine_desc_get_callback_invoke(&ppkt_ctx->seg->async_tx,
+ &rslt);
+ mempool_free(ppkt_ctx->seg, chan->transactions_pool);
+ }
+ memset(ppkt_ctx, 0, sizeof(struct PACKET_TRANSFER_PARAMS));
+ }
+
+ complete(&chan->srcq_work_complete);
+}
+
+/**
+ * ps_pcie_chan_primary_work - Masks out interrupts, invokes source Q and
+ * destination Q processing. Waits for source Q and destination Q processing
+ * and re enables interrupts. Same work is invoked by timer if coalesce count
+ * is greater than zero and interrupts are not invoked before the timeout period
+ *
+ * @work: Work associated with the task
+ *
+ * Return: void
+ */
+static void ps_pcie_chan_primary_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(
+ work, struct ps_pcie_dma_chan,
+ handle_primary_desc_cleanup);
+
+ /* Disable interrupts for Channel */
+ ps_pcie_dma_clr_mask(chan, chan->intr_control_offset,
+ DMA_INTCNTRL_ENABLINTR_BIT);
+
+ if (chan->psrc_sgl_bd) {
+ reinit_completion(&chan->srcq_work_complete);
+ if (chan->srcq_desc_cleanup)
+ queue_work(chan->srcq_desc_cleanup,
+ &chan->handle_srcq_desc_cleanup);
+ }
+ if (chan->pdst_sgl_bd) {
+ reinit_completion(&chan->dstq_work_complete);
+ if (chan->dstq_desc_cleanup)
+ queue_work(chan->dstq_desc_cleanup,
+ &chan->handle_dstq_desc_cleanup);
+ }
+
+ if (chan->psrc_sgl_bd)
+ wait_for_completion_interruptible(&chan->srcq_work_complete);
+ if (chan->pdst_sgl_bd)
+ wait_for_completion_interruptible(&chan->dstq_work_complete);
+
+ /* Enable interrupts for channel */
+ ps_pcie_dma_set_mask(chan, chan->intr_control_offset,
+ DMA_INTCNTRL_ENABLINTR_BIT);
+
+ if (chan->chan_programming) {
+ queue_work(chan->chan_programming,
+ &chan->handle_chan_programming);
+ }
+
+ if (chan->coalesce_count > 0 && chan->poll_timer.function)
+ mod_timer(&chan->poll_timer, jiffies + chan->poll_timer_freq);
+}
+
+static int read_rootdma_config(struct platform_device *platform_dev,
+ struct xlnx_pcie_dma_device *xdev)
+{
+ int err;
+ struct resource *r;
+
+ err = dma_set_mask(&platform_dev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_info(&platform_dev->dev, "Cannot set 64 bit DMA mask\n");
+ err = dma_set_mask(&platform_dev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&platform_dev->dev, "DMA mask set error\n");
+ return err;
+ }
+ }
+
+ err = dma_set_coherent_mask(&platform_dev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_info(&platform_dev->dev, "Cannot set 64 bit consistent DMA mask\n");
+ err = dma_set_coherent_mask(&platform_dev->dev,
+ DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&platform_dev->dev, "Cannot set consistent DMA mask\n");
+ return err;
+ }
+ }
+
+ r = platform_get_resource_byname(platform_dev, IORESOURCE_MEM,
+ "ps_pcie_regbase");
+ if (!r) {
+ dev_err(&platform_dev->dev,
+ "Unable to find memory resource for root dma\n");
+ return PTR_ERR(r);
+ }
+
+ xdev->reg_base = devm_ioremap_resource(&platform_dev->dev, r);
+ if (IS_ERR(xdev->reg_base)) {
+ dev_err(&platform_dev->dev, "ioresource error for root dma\n");
+ return PTR_ERR(xdev->reg_base);
+ }
+
+ xdev->platform_irq_vec =
+ platform_get_irq_byname(platform_dev,
+ "ps_pcie_rootdma_intr");
+ if (xdev->platform_irq_vec < 0) {
+ dev_err(&platform_dev->dev,
+ "Unable to get interrupt number for root dma\n");
+ return xdev->platform_irq_vec;
+ }
+
+ err = device_property_read_u16(&platform_dev->dev, "dma_vendorid",
+ &xdev->rootdma_vendor);
+ if (err) {
+ dev_err(&platform_dev->dev,
+ "Unable to find RootDMA PCI Vendor Id\n");
+ return err;
+ }
+
+ err = device_property_read_u16(&platform_dev->dev, "dma_deviceid",
+ &xdev->rootdma_device);
+ if (err) {
+ dev_err(&platform_dev->dev,
+ "Unable to find RootDMA PCI Device Id\n");
+ return err;
+ }
+
+ xdev->common.dev = xdev->dev;
+
+ return 0;
+}
+
+static int read_epdma_config(struct platform_device *platform_dev,
+ struct xlnx_pcie_dma_device *xdev)
+{
+ int err;
+ struct pci_dev *pdev;
+ u16 i;
+ void __iomem * const *pci_iomap;
+ unsigned long pci_bar_length;
+
+ pdev = *((struct pci_dev **)(platform_dev->dev.platform_data));
+ xdev->pci_dev = pdev;
+
+ for (i = 0; i < MAX_BARS; i++) {
+ if (pci_resource_len(pdev, i) == 0)
+ continue;
+ xdev->bar_mask = xdev->bar_mask | (1 << (i));
+ }
+
+ err = pcim_iomap_regions(pdev, xdev->bar_mask, PLATFORM_DRIVER_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot request PCI regions, aborting\n");
+ return err;
+ }
+
+ pci_iomap = pcim_iomap_table(pdev);
+ if (!pci_iomap) {
+ err = -ENOMEM;
+ return err;
+ }
+
+ for (i = 0; i < MAX_BARS; i++) {
+ pci_bar_length = pci_resource_len(pdev, i);
+ if (pci_bar_length == 0) {
+ xdev->bar_info[i].BAR_LENGTH = 0;
+ xdev->bar_info[i].BAR_PHYS_ADDR = 0;
+ xdev->bar_info[i].BAR_VIRT_ADDR = NULL;
+ } else {
+ xdev->bar_info[i].BAR_LENGTH =
+ pci_bar_length;
+ xdev->bar_info[i].BAR_PHYS_ADDR =
+ pci_resource_start(pdev, i);
+ xdev->bar_info[i].BAR_VIRT_ADDR =
+ pci_iomap[i];
+ }
+ }
+
+ xdev->reg_base = pci_iomap[DMA_BAR_NUMBER];
+
+ err = irq_probe(xdev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Cannot probe irq lines for device %d\n",
+ platform_dev->id);
+ return err;
+ }
+
+ xdev->common.dev = &pdev->dev;
+
+ return 0;
+}
+
+static int probe_channel_properties(struct platform_device *platform_dev,
+ struct xlnx_pcie_dma_device *xdev,
+ u16 channel_number)
+{
+ int i;
+ char propertyname[CHANNEL_PROPERTY_LENGTH];
+ int numvals, ret;
+ u32 *val;
+ struct ps_pcie_dma_chan *channel;
+ struct ps_pcie_dma_channel_match *xlnx_match;
+
+ snprintf(propertyname, CHANNEL_PROPERTY_LENGTH,
+ "ps_pcie_channel%d", channel_number);
+
+ channel = &xdev->channels[channel_number];
+
+ spin_lock_init(&channel->channel_lock);
+ spin_lock_init(&channel->cookie_lock);
+
+ INIT_LIST_HEAD(&channel->pending_list);
+ spin_lock_init(&channel->pending_list_lock);
+
+ INIT_LIST_HEAD(&channel->active_list);
+ spin_lock_init(&channel->active_list_lock);
+
+ spin_lock_init(&channel->src_desc_lock);
+ spin_lock_init(&channel->dst_desc_lock);
+
+ INIT_LIST_HEAD(&channel->pending_interrupts_list);
+ spin_lock_init(&channel->pending_interrupts_lock);
+
+ INIT_LIST_HEAD(&channel->active_interrupts_list);
+ spin_lock_init(&channel->active_interrupts_lock);
+
+ init_completion(&channel->srcq_work_complete);
+ init_completion(&channel->dstq_work_complete);
+ init_completion(&channel->chan_shutdown_complt);
+ init_completion(&channel->chan_terminate_complete);
+
+ if (device_property_present(&platform_dev->dev, propertyname)) {
+ numvals = device_property_read_u32_array(&platform_dev->dev,
+ propertyname, NULL, 0);
+
+ if (numvals < 0)
+ return numvals;
+
+ val = devm_kzalloc(&platform_dev->dev, sizeof(u32) * numvals,
+ GFP_KERNEL);
+
+ if (!val)
+ return -ENOMEM;
+
+ ret = device_property_read_u32_array(&platform_dev->dev,
+ propertyname, val,
+ numvals);
+ if (ret < 0) {
+ dev_err(&platform_dev->dev,
+ "Unable to read property %s\n", propertyname);
+ return ret;
+ }
+
+ for (i = 0; i < numvals; i++) {
+ switch (i) {
+ case DMA_CHANNEL_DIRECTION:
+ channel->direction =
+ (val[DMA_CHANNEL_DIRECTION] ==
+ PCIE_AXI_DIRECTION) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ break;
+ case NUM_DESCRIPTORS:
+ channel->total_descriptors =
+ val[NUM_DESCRIPTORS];
+ if (channel->total_descriptors >
+ MAX_DESCRIPTORS) {
+ dev_info(&platform_dev->dev,
+ "Descriptors > alowd max\n");
+ channel->total_descriptors =
+ MAX_DESCRIPTORS;
+ }
+ break;
+ case NUM_QUEUES:
+ channel->num_queues = val[NUM_QUEUES];
+ switch (channel->num_queues) {
+ case DEFAULT_DMA_QUEUES:
+ break;
+ case TWO_DMA_QUEUES:
+ break;
+ default:
+ dev_info(&platform_dev->dev,
+ "Incorrect Q number for dma chan\n");
+ channel->num_queues = DEFAULT_DMA_QUEUES;
+ }
+ break;
+ case COALESE_COUNT:
+ channel->coalesce_count = val[COALESE_COUNT];
+
+ if (channel->coalesce_count >
+ MAX_COALESCE_COUNT) {
+ dev_info(&platform_dev->dev,
+ "Invalid coalesce Count\n");
+ channel->coalesce_count =
+ MAX_COALESCE_COUNT;
+ }
+ break;
+ case POLL_TIMER_FREQUENCY:
+ channel->poll_timer_freq =
+ val[POLL_TIMER_FREQUENCY];
+ break;
+ default:
+ dev_err(&platform_dev->dev,
+ "Check order of channel properties!\n");
+ }
+ }
+ } else {
+ dev_err(&platform_dev->dev,
+ "Property %s not present. Invalid configuration!\n",
+ propertyname);
+ return -ENOTSUPP;
+ }
+
+ if (channel->direction == DMA_TO_DEVICE) {
+ if (channel->num_queues == DEFAULT_DMA_QUEUES) {
+ channel->srcq_buffer_location = BUFFER_LOC_PCI;
+ channel->dstq_buffer_location = BUFFER_LOC_AXI;
+ } else {
+ channel->srcq_buffer_location = BUFFER_LOC_PCI;
+ channel->dstq_buffer_location = BUFFER_LOC_INVALID;
+ }
+ } else {
+ if (channel->num_queues == DEFAULT_DMA_QUEUES) {
+ channel->srcq_buffer_location = BUFFER_LOC_AXI;
+ channel->dstq_buffer_location = BUFFER_LOC_PCI;
+ } else {
+ channel->srcq_buffer_location = BUFFER_LOC_INVALID;
+ channel->dstq_buffer_location = BUFFER_LOC_PCI;
+ }
+ }
+
+ channel->xdev = xdev;
+ channel->channel_number = channel_number;
+
+ if (xdev->is_rootdma) {
+ channel->dev = xdev->dev;
+ channel->intr_status_offset = DMA_AXI_INTR_STATUS_REG_OFFSET;
+ channel->intr_control_offset = DMA_AXI_INTR_CNTRL_REG_OFFSET;
+ } else {
+ channel->dev = &xdev->pci_dev->dev;
+ channel->intr_status_offset = DMA_PCIE_INTR_STATUS_REG_OFFSET;
+ channel->intr_control_offset = DMA_PCIE_INTR_CNTRL_REG_OFFSET;
+ }
+
+ channel->chan_base =
+ (struct DMA_ENGINE_REGISTERS *)((__force char *)(xdev->reg_base) +
+ (channel_number * DMA_CHANNEL_REGS_SIZE));
+
+ if (((channel->chan_base->dma_channel_status) &
+ DMA_STATUS_DMA_PRES_BIT) == 0) {
+ dev_err(&platform_dev->dev,
+ "Hardware reports channel not present\n");
+ return -ENOTSUPP;
+ }
+
+ update_channel_read_attribute(channel);
+ update_channel_write_attribute(channel);
+
+ xlnx_match = devm_kzalloc(&platform_dev->dev,
+ sizeof(struct ps_pcie_dma_channel_match),
+ GFP_KERNEL);
+
+ if (!xlnx_match)
+ return -ENOMEM;
+
+ if (xdev->is_rootdma) {
+ xlnx_match->pci_vendorid = xdev->rootdma_vendor;
+ xlnx_match->pci_deviceid = xdev->rootdma_device;
+ } else {
+ xlnx_match->pci_vendorid = xdev->pci_dev->vendor;
+ xlnx_match->pci_deviceid = xdev->pci_dev->device;
+ xlnx_match->bar_params = xdev->bar_info;
+ }
+
+ xlnx_match->board_number = xdev->board_number;
+ xlnx_match->channel_number = channel_number;
+ xlnx_match->direction = xdev->channels[channel_number].direction;
+
+ channel->common.private = (void *)xlnx_match;
+
+ channel->common.device = &xdev->common;
+ list_add_tail(&channel->common.device_node, &xdev->common.channels);
+
+ return 0;
+}
+
+static void xlnx_ps_pcie_destroy_mempool(struct ps_pcie_dma_chan *chan)
+{
+ mempool_destroy(chan->transactions_pool);
+
+ mempool_destroy(chan->intr_transactions_pool);
+}
+
+static void xlnx_ps_pcie_free_worker_queues(struct ps_pcie_dma_chan *chan)
+{
+ if (chan->maintenance_workq)
+ destroy_workqueue(chan->maintenance_workq);
+
+ if (chan->sw_intrs_wrkq)
+ destroy_workqueue(chan->sw_intrs_wrkq);
+
+ if (chan->srcq_desc_cleanup)
+ destroy_workqueue(chan->srcq_desc_cleanup);
+
+ if (chan->dstq_desc_cleanup)
+ destroy_workqueue(chan->dstq_desc_cleanup);
+
+ if (chan->chan_programming)
+ destroy_workqueue(chan->chan_programming);
+
+ if (chan->primary_desc_cleanup)
+ destroy_workqueue(chan->primary_desc_cleanup);
+}
+
+static void xlnx_ps_pcie_free_pkt_ctxts(struct ps_pcie_dma_chan *chan)
+{
+ kfree(chan->ppkt_ctx_srcq);
+
+ kfree(chan->ppkt_ctx_dstq);
+}
+
+static void xlnx_ps_pcie_free_descriptors(struct ps_pcie_dma_chan *chan)
+{
+ ssize_t size;
+
+ if (chan->psrc_sgl_bd) {
+ size = chan->total_descriptors *
+ sizeof(struct SOURCE_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, chan->psrc_sgl_bd,
+ chan->src_sgl_bd_pa);
+ }
+
+ if (chan->pdst_sgl_bd) {
+ size = chan->total_descriptors *
+ sizeof(struct DEST_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, chan->pdst_sgl_bd,
+ chan->dst_sgl_bd_pa);
+ }
+
+ if (chan->psrc_sta_bd) {
+ size = chan->total_descriptors *
+ sizeof(struct STATUS_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, chan->psrc_sta_bd,
+ chan->src_sta_bd_pa);
+ }
+
+ if (chan->pdst_sta_bd) {
+ size = chan->total_descriptors *
+ sizeof(struct STATUS_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, chan->pdst_sta_bd,
+ chan->dst_sta_bd_pa);
+ }
+}
+
+static int xlnx_ps_pcie_channel_activate(struct ps_pcie_dma_chan *chan)
+{
+ u32 reg = chan->coalesce_count;
+
+ reg = reg << DMA_INTCNTRL_SGCOLSCCNT_BIT_SHIFT;
+
+ /* Enable Interrupts for channel */
+ ps_pcie_dma_set_mask(chan, chan->intr_control_offset,
+ reg | DMA_INTCNTRL_ENABLINTR_BIT |
+ DMA_INTCNTRL_DMAERRINTR_BIT |
+ DMA_INTCNTRL_DMASGINTR_BIT);
+
+ /* Enable DMA */
+ ps_pcie_dma_set_mask(chan, DMA_CNTRL_REG_OFFSET,
+ DMA_CNTRL_ENABL_BIT |
+ DMA_CNTRL_64BIT_STAQ_ELEMSZ_BIT);
+
+ spin_lock(&chan->channel_lock);
+ chan->state = CHANNEL_AVAILABLE;
+ spin_unlock(&chan->channel_lock);
+
+ /* Activate timer if required */
+ if ((chan->coalesce_count > 0) && !chan->poll_timer.function)
+ xlnx_ps_pcie_alloc_poll_timer(chan);
+
+ return 0;
+}
+
+static void xlnx_ps_pcie_channel_quiesce(struct ps_pcie_dma_chan *chan)
+{
+ /* Disable interrupts for Channel */
+ ps_pcie_dma_clr_mask(chan, chan->intr_control_offset,
+ DMA_INTCNTRL_ENABLINTR_BIT);
+
+ /* Delete timer if it is created */
+ if ((chan->coalesce_count > 0) && (!chan->poll_timer.function))
+ xlnx_ps_pcie_free_poll_timer(chan);
+
+ /* Flush descriptor cleaning work queues */
+ if (chan->primary_desc_cleanup)
+ flush_workqueue(chan->primary_desc_cleanup);
+
+ /* Flush channel programming work queue */
+ if (chan->chan_programming)
+ flush_workqueue(chan->chan_programming);
+
+ /* Clear the persistent bits */
+ ps_pcie_dma_set_mask(chan, chan->intr_status_offset,
+ DMA_INTSTATUS_DMAERR_BIT |
+ DMA_INTSTATUS_SGLINTR_BIT |
+ DMA_INTSTATUS_SWINTR_BIT);
+
+ /* Disable DMA channel */
+ ps_pcie_dma_clr_mask(chan, DMA_CNTRL_REG_OFFSET, DMA_CNTRL_ENABL_BIT);
+
+ spin_lock(&chan->channel_lock);
+ chan->state = CHANNEL_UNAVIALBLE;
+ spin_unlock(&chan->channel_lock);
+}
+
+static u32 total_bytes_in_sgl(struct scatterlist *sgl,
+ unsigned int num_entries)
+{
+ u32 total_bytes = 0;
+ struct scatterlist *sgl_ptr;
+ unsigned int i;
+
+ for_each_sg(sgl, sgl_ptr, num_entries, i)
+ total_bytes += sg_dma_len(sgl_ptr);
+
+ return total_bytes;
+}
+
+static void ivk_cbk_intr_seg(struct ps_pcie_intr_segment *intr_seg,
+ struct ps_pcie_dma_chan *chan,
+ enum dmaengine_tx_result result)
+{
+ struct dmaengine_result rslt;
+
+ rslt.result = result;
+ rslt.residue = 0;
+
+ spin_lock(&chan->cookie_lock);
+ dma_cookie_complete(&intr_seg->async_intr_tx);
+ spin_unlock(&chan->cookie_lock);
+
+ dmaengine_desc_get_callback_invoke(&intr_seg->async_intr_tx, &rslt);
+}
+
+static void ivk_cbk_seg(struct ps_pcie_tx_segment *seg,
+ struct ps_pcie_dma_chan *chan,
+ enum dmaengine_tx_result result)
+{
+ struct dmaengine_result rslt, *prslt;
+
+ spin_lock(&chan->cookie_lock);
+ dma_cookie_complete(&seg->async_tx);
+ spin_unlock(&chan->cookie_lock);
+
+ rslt.result = result;
+ if (seg->tx_elements.src_sgl &&
+ chan->srcq_buffer_location == BUFFER_LOC_PCI) {
+ rslt.residue =
+ total_bytes_in_sgl(seg->tx_elements.src_sgl,
+ seg->tx_elements.srcq_num_elemets);
+ prslt = &rslt;
+ } else if (seg->tx_elements.dst_sgl &&
+ chan->dstq_buffer_location == BUFFER_LOC_PCI) {
+ rslt.residue =
+ total_bytes_in_sgl(seg->tx_elements.dst_sgl,
+ seg->tx_elements.dstq_num_elemets);
+ prslt = &rslt;
+ } else {
+ prslt = NULL;
+ }
+
+ dmaengine_desc_get_callback_invoke(&seg->async_tx, prslt);
+}
+
+static void ivk_cbk_ctx(struct PACKET_TRANSFER_PARAMS *ppkt_ctxt,
+ struct ps_pcie_dma_chan *chan,
+ enum dmaengine_tx_result result)
+{
+ if (ppkt_ctxt->availability_status == IN_USE) {
+ if (ppkt_ctxt->seg) {
+ ivk_cbk_seg(ppkt_ctxt->seg, chan, result);
+ mempool_free(ppkt_ctxt->seg,
+ chan->transactions_pool);
+ }
+ }
+}
+
+static void ivk_cbk_for_pending(struct ps_pcie_dma_chan *chan)
+{
+ int i;
+ struct PACKET_TRANSFER_PARAMS *ppkt_ctxt;
+ struct ps_pcie_tx_segment *seg, *seg_nxt;
+ struct ps_pcie_intr_segment *intr_seg, *intr_seg_next;
+
+ if (chan->ppkt_ctx_srcq) {
+ if (chan->idx_ctx_srcq_tail != chan->idx_ctx_srcq_head) {
+ i = chan->idx_ctx_srcq_tail;
+ while (i != chan->idx_ctx_srcq_head) {
+ ppkt_ctxt = chan->ppkt_ctx_srcq + i;
+ ivk_cbk_ctx(ppkt_ctxt, chan,
+ DMA_TRANS_READ_FAILED);
+ memset(ppkt_ctxt, 0,
+ sizeof(struct PACKET_TRANSFER_PARAMS));
+ i++;
+ if (i == chan->total_descriptors)
+ i = 0;
+ }
+ }
+ }
+
+ if (chan->ppkt_ctx_dstq) {
+ if (chan->idx_ctx_dstq_tail != chan->idx_ctx_dstq_head) {
+ i = chan->idx_ctx_dstq_tail;
+ while (i != chan->idx_ctx_dstq_head) {
+ ppkt_ctxt = chan->ppkt_ctx_dstq + i;
+ ivk_cbk_ctx(ppkt_ctxt, chan,
+ DMA_TRANS_WRITE_FAILED);
+ memset(ppkt_ctxt, 0,
+ sizeof(struct PACKET_TRANSFER_PARAMS));
+ i++;
+ if (i == chan->total_descriptors)
+ i = 0;
+ }
+ }
+ }
+
+ list_for_each_entry_safe(seg, seg_nxt, &chan->active_list, node) {
+ ivk_cbk_seg(seg, chan, DMA_TRANS_ABORTED);
+ spin_lock(&chan->active_list_lock);
+ list_del(&seg->node);
+ spin_unlock(&chan->active_list_lock);
+ mempool_free(seg, chan->transactions_pool);
+ }
+
+ list_for_each_entry_safe(seg, seg_nxt, &chan->pending_list, node) {
+ ivk_cbk_seg(seg, chan, DMA_TRANS_ABORTED);
+ spin_lock(&chan->pending_list_lock);
+ list_del(&seg->node);
+ spin_unlock(&chan->pending_list_lock);
+ mempool_free(seg, chan->transactions_pool);
+ }
+
+ list_for_each_entry_safe(intr_seg, intr_seg_next,
+ &chan->active_interrupts_list, node) {
+ ivk_cbk_intr_seg(intr_seg, chan, DMA_TRANS_ABORTED);
+ spin_lock(&chan->active_interrupts_lock);
+ list_del(&intr_seg->node);
+ spin_unlock(&chan->active_interrupts_lock);
+ mempool_free(intr_seg, chan->intr_transactions_pool);
+ }
+
+ list_for_each_entry_safe(intr_seg, intr_seg_next,
+ &chan->pending_interrupts_list, node) {
+ ivk_cbk_intr_seg(intr_seg, chan, DMA_TRANS_ABORTED);
+ spin_lock(&chan->pending_interrupts_lock);
+ list_del(&intr_seg->node);
+ spin_unlock(&chan->pending_interrupts_lock);
+ mempool_free(intr_seg, chan->intr_transactions_pool);
+ }
+}
+
+static void xlnx_ps_pcie_reset_channel(struct ps_pcie_dma_chan *chan)
+{
+ xlnx_ps_pcie_channel_quiesce(chan);
+
+ ivk_cbk_for_pending(chan);
+
+ ps_pcie_chan_reset(chan);
+
+ init_sw_components(chan);
+ init_hw_components(chan);
+
+ xlnx_ps_pcie_channel_activate(chan);
+}
+
+static void xlnx_ps_pcie_free_poll_timer(struct ps_pcie_dma_chan *chan)
+{
+ if (chan->poll_timer.function) {
+ del_timer_sync(&chan->poll_timer);
+ chan->poll_timer.function = NULL;
+ }
+}
+
+static int xlnx_ps_pcie_alloc_poll_timer(struct ps_pcie_dma_chan *chan)
+{
+ init_timer(&chan->poll_timer);
+ chan->poll_timer.function = poll_completed_transactions;
+ chan->poll_timer.expires = jiffies + chan->poll_timer_freq;
+ chan->poll_timer.data = (unsigned long)chan;
+
+ add_timer(&chan->poll_timer);
+
+ return 0;
+}
+
+static void terminate_transactions_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(work,
+ struct ps_pcie_dma_chan, handle_chan_terminate);
+
+ xlnx_ps_pcie_channel_quiesce(chan);
+ ivk_cbk_for_pending(chan);
+ xlnx_ps_pcie_channel_activate(chan);
+
+ complete(&chan->chan_terminate_complete);
+}
+
+static void chan_shutdown_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(work,
+ struct ps_pcie_dma_chan, handle_chan_shutdown);
+
+ xlnx_ps_pcie_channel_quiesce(chan);
+
+ complete(&chan->chan_shutdown_complt);
+}
+
+static void chan_reset_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(work,
+ struct ps_pcie_dma_chan, handle_chan_reset);
+
+ xlnx_ps_pcie_reset_channel(chan);
+}
+
+static void sw_intr_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(work,
+ struct ps_pcie_dma_chan, handle_sw_intrs);
+ struct ps_pcie_intr_segment *intr_seg, *intr_seg_next;
+
+ list_for_each_entry_safe(intr_seg, intr_seg_next,
+ &chan->active_interrupts_list, node) {
+ spin_lock(&chan->cookie_lock);
+ dma_cookie_complete(&intr_seg->async_intr_tx);
+ spin_unlock(&chan->cookie_lock);
+ dmaengine_desc_get_callback_invoke(&intr_seg->async_intr_tx,
+ NULL);
+ spin_lock(&chan->active_interrupts_lock);
+ list_del(&intr_seg->node);
+ spin_unlock(&chan->active_interrupts_lock);
+ }
+}
+
+static int xlnx_ps_pcie_alloc_worker_threads(struct ps_pcie_dma_chan *chan)
+{
+ char wq_name[WORKQ_NAME_SIZE];
+
+ snprintf(wq_name, WORKQ_NAME_SIZE,
+ "PS PCIe channel %d descriptor programming wq",
+ chan->channel_number);
+ chan->chan_programming =
+ create_singlethread_workqueue((const char *)wq_name);
+ if (!chan->chan_programming) {
+ dev_err(chan->dev,
+ "Unable to create programming wq for chan %d",
+ chan->channel_number);
+ goto err_no_desc_program_wq;
+ } else {
+ INIT_WORK(&chan->handle_chan_programming,
+ ps_pcie_chan_program_work);
+ }
+ memset(wq_name, 0, WORKQ_NAME_SIZE);
+
+ snprintf(wq_name, WORKQ_NAME_SIZE,
+ "PS PCIe channel %d primary cleanup wq", chan->channel_number);
+ chan->primary_desc_cleanup =
+ create_singlethread_workqueue((const char *)wq_name);
+ if (!chan->primary_desc_cleanup) {
+ dev_err(chan->dev,
+ "Unable to create primary cleanup wq for channel %d",
+ chan->channel_number);
+ goto err_no_primary_clean_wq;
+ } else {
+ INIT_WORK(&chan->handle_primary_desc_cleanup,
+ ps_pcie_chan_primary_work);
+ }
+ memset(wq_name, 0, WORKQ_NAME_SIZE);
+
+ snprintf(wq_name, WORKQ_NAME_SIZE,
+ "PS PCIe channel %d maintenance works wq",
+ chan->channel_number);
+ chan->maintenance_workq =
+ create_singlethread_workqueue((const char *)wq_name);
+ if (!chan->maintenance_workq) {
+ dev_err(chan->dev,
+ "Unable to create maintenance wq for channel %d",
+ chan->channel_number);
+ goto err_no_maintenance_wq;
+ } else {
+ INIT_WORK(&chan->handle_chan_reset, chan_reset_work);
+ INIT_WORK(&chan->handle_chan_shutdown, chan_shutdown_work);
+ INIT_WORK(&chan->handle_chan_terminate,
+ terminate_transactions_work);
+ }
+ memset(wq_name, 0, WORKQ_NAME_SIZE);
+
+ snprintf(wq_name, WORKQ_NAME_SIZE,
+ "PS PCIe channel %d software Interrupts wq",
+ chan->channel_number);
+ chan->sw_intrs_wrkq =
+ create_singlethread_workqueue((const char *)wq_name);
+ if (!chan->sw_intrs_wrkq) {
+ dev_err(chan->dev,
+ "Unable to create sw interrupts wq for channel %d",
+ chan->channel_number);
+ goto err_no_sw_intrs_wq;
+ } else {
+ INIT_WORK(&chan->handle_sw_intrs, sw_intr_work);
+ }
+ memset(wq_name, 0, WORKQ_NAME_SIZE);
+
+ if (chan->psrc_sgl_bd) {
+ snprintf(wq_name, WORKQ_NAME_SIZE,
+ "PS PCIe channel %d srcq handling wq",
+ chan->channel_number);
+ chan->srcq_desc_cleanup =
+ create_singlethread_workqueue((const char *)wq_name);
+ if (!chan->srcq_desc_cleanup) {
+ dev_err(chan->dev,
+ "Unable to create src q completion wq chan %d",
+ chan->channel_number);
+ goto err_no_src_q_completion_wq;
+ } else {
+ INIT_WORK(&chan->handle_srcq_desc_cleanup,
+ src_cleanup_work);
+ }
+ memset(wq_name, 0, WORKQ_NAME_SIZE);
+ }
+
+ if (chan->pdst_sgl_bd) {
+ snprintf(wq_name, WORKQ_NAME_SIZE,
+ "PS PCIe channel %d dstq handling wq",
+ chan->channel_number);
+ chan->dstq_desc_cleanup =
+ create_singlethread_workqueue((const char *)wq_name);
+ if (!chan->dstq_desc_cleanup) {
+ dev_err(chan->dev,
+ "Unable to create dst q completion wq chan %d",
+ chan->channel_number);
+ goto err_no_dst_q_completion_wq;
+ } else {
+ INIT_WORK(&chan->handle_dstq_desc_cleanup,
+ dst_cleanup_work);
+ }
+ memset(wq_name, 0, WORKQ_NAME_SIZE);
+ }
+
+ return 0;
+err_no_dst_q_completion_wq:
+ if (chan->srcq_desc_cleanup)
+ destroy_workqueue(chan->srcq_desc_cleanup);
+err_no_src_q_completion_wq:
+ if (chan->sw_intrs_wrkq)
+ destroy_workqueue(chan->sw_intrs_wrkq);
+err_no_sw_intrs_wq:
+ if (chan->maintenance_workq)
+ destroy_workqueue(chan->maintenance_workq);
+err_no_maintenance_wq:
+ if (chan->primary_desc_cleanup)
+ destroy_workqueue(chan->primary_desc_cleanup);
+err_no_primary_clean_wq:
+ if (chan->chan_programming)
+ destroy_workqueue(chan->chan_programming);
+err_no_desc_program_wq:
+ return -ENOMEM;
+}
+
+static int xlnx_ps_pcie_alloc_mempool(struct ps_pcie_dma_chan *chan)
+{
+ chan->transactions_pool =
+ mempool_create_kmalloc_pool(chan->total_descriptors,
+ sizeof(struct ps_pcie_tx_segment));
+
+ if (!chan->transactions_pool)
+ goto no_transactions_pool;
+
+ chan->intr_transactions_pool =
+ mempool_create_kmalloc_pool(MIN_SW_INTR_TRANSACTIONS,
+ sizeof(struct ps_pcie_intr_segment));
+
+ if (!chan->intr_transactions_pool)
+ goto no_intr_transactions_pool;
+
+ return 0;
+
+no_intr_transactions_pool:
+ mempool_destroy(chan->transactions_pool);
+
+no_transactions_pool:
+ return -ENOMEM;
+}
+
+static int xlnx_ps_pcie_alloc_pkt_contexts(struct ps_pcie_dma_chan *chan)
+{
+ if (chan->psrc_sgl_bd) {
+ chan->ppkt_ctx_srcq =
+ kcalloc(chan->total_descriptors,
+ sizeof(struct PACKET_TRANSFER_PARAMS),
+ GFP_KERNEL);
+ if (!chan->ppkt_ctx_srcq) {
+ dev_err(chan->dev,
+ "Src pkt cxt allocation for chan %d failed\n",
+ chan->channel_number);
+ goto err_no_src_pkt_ctx;
+ }
+ }
+
+ if (chan->pdst_sgl_bd) {
+ chan->ppkt_ctx_dstq =
+ kcalloc(chan->total_descriptors,
+ sizeof(struct PACKET_TRANSFER_PARAMS),
+ GFP_KERNEL);
+ if (!chan->ppkt_ctx_dstq) {
+ dev_err(chan->dev,
+ "Dst pkt cxt for chan %d failed\n",
+ chan->channel_number);
+ goto err_no_dst_pkt_ctx;
+ }
+ }
+
+ return 0;
+
+err_no_dst_pkt_ctx:
+ kfree(chan->ppkt_ctx_srcq);
+
+err_no_src_pkt_ctx:
+ return -ENOMEM;
+}
+
+static int dma_alloc_descriptors_two_queues(struct ps_pcie_dma_chan *chan)
+{
+ size_t size;
+
+ void *sgl_base;
+ void *sta_base;
+ dma_addr_t phy_addr_sglbase;
+ dma_addr_t phy_addr_stabase;
+
+ size = chan->total_descriptors *
+ sizeof(struct SOURCE_DMA_DESCRIPTOR);
+
+ sgl_base = dma_zalloc_coherent(chan->dev, size, &phy_addr_sglbase,
+ GFP_KERNEL);
+
+ if (!sgl_base) {
+ dev_err(chan->dev,
+ "Sgl bds in two channel mode for chan %d failed\n",
+ chan->channel_number);
+ goto err_no_sgl_bds;
+ }
+
+ size = chan->total_descriptors * sizeof(struct STATUS_DMA_DESCRIPTOR);
+ sta_base = dma_zalloc_coherent(chan->dev, size, &phy_addr_stabase,
+ GFP_KERNEL);
+
+ if (!sta_base) {
+ dev_err(chan->dev,
+ "Sta bds in two channel mode for chan %d failed\n",
+ chan->channel_number);
+ goto err_no_sta_bds;
+ }
+
+ if (chan->direction == DMA_TO_DEVICE) {
+ chan->psrc_sgl_bd = sgl_base;
+ chan->src_sgl_bd_pa = phy_addr_sglbase;
+
+ chan->psrc_sta_bd = sta_base;
+ chan->src_sta_bd_pa = phy_addr_stabase;
+
+ chan->pdst_sgl_bd = NULL;
+ chan->dst_sgl_bd_pa = 0;
+
+ chan->pdst_sta_bd = NULL;
+ chan->dst_sta_bd_pa = 0;
+
+ } else if (chan->direction == DMA_FROM_DEVICE) {
+ chan->psrc_sgl_bd = NULL;
+ chan->src_sgl_bd_pa = 0;
+
+ chan->psrc_sta_bd = NULL;
+ chan->src_sta_bd_pa = 0;
+
+ chan->pdst_sgl_bd = sgl_base;
+ chan->dst_sgl_bd_pa = phy_addr_sglbase;
+
+ chan->pdst_sta_bd = sta_base;
+ chan->dst_sta_bd_pa = phy_addr_stabase;
+
+ } else {
+ dev_err(chan->dev,
+ "%d %s() Unsupported channel direction\n",
+ __LINE__, __func__);
+ goto unsupported_channel_direction;
+ }
+
+ return 0;
+
+unsupported_channel_direction:
+ size = chan->total_descriptors *
+ sizeof(struct STATUS_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, sta_base, phy_addr_stabase);
+err_no_sta_bds:
+ size = chan->total_descriptors *
+ sizeof(struct SOURCE_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, sgl_base, phy_addr_sglbase);
+err_no_sgl_bds:
+
+ return -ENOMEM;
+}
+
+static int dma_alloc_decriptors_all_queues(struct ps_pcie_dma_chan *chan)
+{
+ size_t size;
+
+ size = chan->total_descriptors *
+ sizeof(struct SOURCE_DMA_DESCRIPTOR);
+ chan->psrc_sgl_bd =
+ dma_zalloc_coherent(chan->dev, size, &chan->src_sgl_bd_pa,
+ GFP_KERNEL);
+
+ if (!chan->psrc_sgl_bd) {
+ dev_err(chan->dev,
+ "Alloc fail src q buffer descriptors for chan %d\n",
+ chan->channel_number);
+ goto err_no_src_sgl_descriptors;
+ }
+
+ size = chan->total_descriptors * sizeof(struct DEST_DMA_DESCRIPTOR);
+ chan->pdst_sgl_bd =
+ dma_zalloc_coherent(chan->dev, size, &chan->dst_sgl_bd_pa,
+ GFP_KERNEL);
+
+ if (!chan->pdst_sgl_bd) {
+ dev_err(chan->dev,
+ "Alloc fail dst q buffer descriptors for chan %d\n",
+ chan->channel_number);
+ goto err_no_dst_sgl_descriptors;
+ }
+
+ size = chan->total_descriptors * sizeof(struct STATUS_DMA_DESCRIPTOR);
+ chan->psrc_sta_bd =
+ dma_zalloc_coherent(chan->dev, size, &chan->src_sta_bd_pa,
+ GFP_KERNEL);
+
+ if (!chan->psrc_sta_bd) {
+ dev_err(chan->dev,
+ "Unable to allocate src q status bds for chan %d\n",
+ chan->channel_number);
+ goto err_no_src_sta_descriptors;
+ }
+
+ chan->pdst_sta_bd =
+ dma_zalloc_coherent(chan->dev, size, &chan->dst_sta_bd_pa,
+ GFP_KERNEL);
+
+ if (!chan->pdst_sta_bd) {
+ dev_err(chan->dev,
+ "Unable to allocate Dst q status bds for chan %d\n",
+ chan->channel_number);
+ goto err_no_dst_sta_descriptors;
+ }
+
+ return 0;
+
+err_no_dst_sta_descriptors:
+ size = chan->total_descriptors *
+ sizeof(struct STATUS_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, chan->psrc_sta_bd,
+ chan->src_sta_bd_pa);
+err_no_src_sta_descriptors:
+ size = chan->total_descriptors *
+ sizeof(struct DEST_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, chan->pdst_sgl_bd,
+ chan->dst_sgl_bd_pa);
+err_no_dst_sgl_descriptors:
+ size = chan->total_descriptors *
+ sizeof(struct SOURCE_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, chan->psrc_sgl_bd,
+ chan->src_sgl_bd_pa);
+
+err_no_src_sgl_descriptors:
+ return -ENOMEM;
+}
+
+static void xlnx_ps_pcie_dma_free_chan_resources(struct dma_chan *dchan)
+{
+ struct ps_pcie_dma_chan *chan;
+
+ if (!dchan)
+ return;
+
+ chan = to_xilinx_chan(dchan);
+
+ if (chan->state == CHANNEL_RESOURCE_UNALLOCATED)
+ return;
+
+ if (chan->maintenance_workq) {
+ if (completion_done(&chan->chan_shutdown_complt))
+ reinit_completion(&chan->chan_shutdown_complt);
+ queue_work(chan->maintenance_workq,
+ &chan->handle_chan_shutdown);
+ wait_for_completion_interruptible(&chan->chan_shutdown_complt);
+
+ xlnx_ps_pcie_free_worker_queues(chan);
+ xlnx_ps_pcie_free_pkt_ctxts(chan);
+ xlnx_ps_pcie_destroy_mempool(chan);
+ xlnx_ps_pcie_free_descriptors(chan);
+
+ spin_lock(&chan->channel_lock);
+ chan->state = CHANNEL_RESOURCE_UNALLOCATED;
+ spin_unlock(&chan->channel_lock);
+ }
+}
+
+static int xlnx_ps_pcie_dma_alloc_chan_resources(struct dma_chan *dchan)
+{
+ struct ps_pcie_dma_chan *chan;
+
+ if (!dchan)
+ return PTR_ERR(dchan);
+
+ chan = to_xilinx_chan(dchan);
+
+ if (chan->state != CHANNEL_RESOURCE_UNALLOCATED)
+ return 0;
+
+ if (chan->num_queues == DEFAULT_DMA_QUEUES) {
+ if (dma_alloc_decriptors_all_queues(chan) != 0) {
+ dev_err(chan->dev,
+ "Alloc fail bds for channel %d\n",
+ chan->channel_number);
+ goto err_no_descriptors;
+ }
+ } else if (chan->num_queues == TWO_DMA_QUEUES) {
+ if (dma_alloc_descriptors_two_queues(chan) != 0) {
+ dev_err(chan->dev,
+ "Alloc fail bds for two queues of channel %d\n",
+ chan->channel_number);
+ goto err_no_descriptors;
+ }
+ }
+
+ if (xlnx_ps_pcie_alloc_mempool(chan) != 0) {
+ dev_err(chan->dev,
+ "Unable to allocate memory pool for channel %d\n",
+ chan->channel_number);
+ goto err_no_mempools;
+ }
+
+ if (xlnx_ps_pcie_alloc_pkt_contexts(chan) != 0) {
+ dev_err(chan->dev,
+ "Unable to allocate packet contexts for channel %d\n",
+ chan->channel_number);
+ goto err_no_pkt_ctxts;
+ }
+
+ if (xlnx_ps_pcie_alloc_worker_threads(chan) != 0) {
+ dev_err(chan->dev,
+ "Unable to allocate worker queues for channel %d\n",
+ chan->channel_number);
+ goto err_no_worker_queues;
+ }
+
+ xlnx_ps_pcie_reset_channel(chan);
+
+ dma_cookie_init(dchan);
+
+ return 0;
+
+err_no_worker_queues:
+ xlnx_ps_pcie_free_pkt_ctxts(chan);
+err_no_pkt_ctxts:
+ xlnx_ps_pcie_destroy_mempool(chan);
+err_no_mempools:
+ xlnx_ps_pcie_free_descriptors(chan);
+err_no_descriptors:
+ return -ENOMEM;
+}
+
+static dma_cookie_t xilinx_intr_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct ps_pcie_intr_segment *intr_seg =
+ to_ps_pcie_dma_tx_intr_descriptor(tx);
+ struct ps_pcie_dma_chan *chan = to_xilinx_chan(tx->chan);
+ dma_cookie_t cookie;
+
+ if (chan->state != CHANNEL_AVAILABLE)
+ return -EINVAL;
+
+ spin_lock(&chan->cookie_lock);
+ cookie = dma_cookie_assign(tx);
+ spin_unlock(&chan->cookie_lock);
+
+ spin_lock(&chan->pending_interrupts_lock);
+ list_add_tail(&intr_seg->node, &chan->pending_interrupts_list);
+ spin_unlock(&chan->pending_interrupts_lock);
+
+ return cookie;
+}
+
+static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct ps_pcie_tx_segment *seg = to_ps_pcie_dma_tx_descriptor(tx);
+ struct ps_pcie_dma_chan *chan = to_xilinx_chan(tx->chan);
+ dma_cookie_t cookie;
+
+ if (chan->state != CHANNEL_AVAILABLE)
+ return -EINVAL;
+
+ spin_lock(&chan->cookie_lock);
+ cookie = dma_cookie_assign(tx);
+ spin_unlock(&chan->cookie_lock);
+
+ spin_lock(&chan->pending_list_lock);
+ list_add_tail(&seg->node, &chan->pending_list);
+ spin_unlock(&chan->pending_list_lock);
+
+ return cookie;
+}
+
+static struct dma_async_tx_descriptor *xlnx_ps_pcie_dma_prep_dma_sg(
+ struct dma_chan *channel, struct scatterlist *dst_sg,
+ unsigned int dst_nents, struct scatterlist *src_sg,
+ unsigned int src_nents, unsigned long flags)
+{
+ struct ps_pcie_dma_chan *chan = to_xilinx_chan(channel);
+ struct ps_pcie_tx_segment *seg = NULL;
+
+ if (chan->state != CHANNEL_AVAILABLE)
+ return NULL;
+
+ if (dst_nents == 0 || src_nents == 0)
+ return NULL;
+
+ if (!dst_sg || !src_sg)
+ return NULL;
+
+ if (chan->num_queues != DEFAULT_DMA_QUEUES) {
+ dev_err(chan->dev, "Only prep_slave_sg for channel %d\n",
+ chan->channel_number);
+ return NULL;
+ }
+
+ seg = mempool_alloc(chan->transactions_pool, GFP_ATOMIC);
+ if (!seg) {
+ dev_err(chan->dev, "Tx segment alloc for channel %d\n",
+ chan->channel_number);
+ return NULL;
+ }
+
+ memset(seg, 0, sizeof(*seg));
+
+ seg->tx_elements.dst_sgl = dst_sg;
+ seg->tx_elements.dstq_num_elemets = dst_nents;
+ seg->tx_elements.src_sgl = src_sg;
+ seg->tx_elements.srcq_num_elemets = src_nents;
+
+ dma_async_tx_descriptor_init(&seg->async_tx, &chan->common);
+ seg->async_tx.flags = flags;
+ async_tx_ack(&seg->async_tx);
+ seg->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+ return &seg->async_tx;
+}
+
+static struct dma_async_tx_descriptor *xlnx_ps_pcie_dma_prep_slave_sg(
+ struct dma_chan *channel, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct ps_pcie_dma_chan *chan = to_xilinx_chan(channel);
+ struct ps_pcie_tx_segment *seg = NULL;
+
+ if (chan->state != CHANNEL_AVAILABLE)
+ return NULL;
+
+ if (!(is_slave_direction(direction)))
+ return NULL;
+
+ if (!sgl || sg_len == 0)
+ return NULL;
+
+ if (chan->num_queues != TWO_DMA_QUEUES) {
+ dev_err(chan->dev, "Only prep_dma_sg is supported channel %d\n",
+ chan->channel_number);
+ return NULL;
+ }
+
+ seg = mempool_alloc(chan->transactions_pool, GFP_ATOMIC);
+ if (!seg) {
+ dev_err(chan->dev, "Unable to allocate tx segment channel %d\n",
+ chan->channel_number);
+ return NULL;
+ }
+
+ memset(seg, 0, sizeof(*seg));
+
+ if (chan->direction == DMA_TO_DEVICE) {
+ seg->tx_elements.src_sgl = sgl;
+ seg->tx_elements.srcq_num_elemets = sg_len;
+ seg->tx_elements.dst_sgl = NULL;
+ seg->tx_elements.dstq_num_elemets = 0;
+ } else {
+ seg->tx_elements.src_sgl = NULL;
+ seg->tx_elements.srcq_num_elemets = 0;
+ seg->tx_elements.dst_sgl = sgl;
+ seg->tx_elements.dstq_num_elemets = sg_len;
+ }
+
+ dma_async_tx_descriptor_init(&seg->async_tx, &chan->common);
+ seg->async_tx.flags = flags;
+ async_tx_ack(&seg->async_tx);
+ seg->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+ return &seg->async_tx;
+}
+
+static void xlnx_ps_pcie_dma_issue_pending(struct dma_chan *channel)
+{
+ struct ps_pcie_dma_chan *chan;
+
+ if (!channel)
+ return;
+
+ chan = to_xilinx_chan(channel);
+
+ if (!list_empty(&chan->pending_list)) {
+ spin_lock(&chan->pending_list_lock);
+ spin_lock(&chan->active_list_lock);
+ list_splice_tail_init(&chan->pending_list,
+ &chan->active_list);
+ spin_unlock(&chan->active_list_lock);
+ spin_unlock(&chan->pending_list_lock);
+ }
+
+ if (!list_empty(&chan->pending_interrupts_list)) {
+ spin_lock(&chan->pending_interrupts_lock);
+ spin_lock(&chan->active_interrupts_lock);
+ list_splice_tail_init(&chan->pending_interrupts_list,
+ &chan->active_interrupts_list);
+ spin_unlock(&chan->active_interrupts_lock);
+ spin_unlock(&chan->pending_interrupts_lock);
+ }
+
+ if (chan->chan_programming)
+ queue_work(chan->chan_programming,
+ &chan->handle_chan_programming);
+}
+
+static int xlnx_ps_pcie_dma_terminate_all(struct dma_chan *channel)
+{
+ struct ps_pcie_dma_chan *chan;
+
+ if (!channel)
+ return PTR_ERR(channel);
+
+ chan = to_xilinx_chan(channel);
+
+ if (chan->state != CHANNEL_AVAILABLE)
+ return 1;
+
+ if (chan->maintenance_workq) {
+ if (completion_done(&chan->chan_terminate_complete))
+ reinit_completion(&chan->chan_terminate_complete);
+ queue_work(chan->maintenance_workq,
+ &chan->handle_chan_terminate);
+ wait_for_completion_interruptible(
+ &chan->chan_terminate_complete);
+ }
+
+ return 0;
+}
+
+static struct dma_async_tx_descriptor *xlnx_ps_pcie_dma_prep_interrupt(
+ struct dma_chan *channel, unsigned long flags)
+{
+ struct ps_pcie_dma_chan *chan;
+ struct ps_pcie_intr_segment *intr_segment = NULL;
+
+ if (!channel)
+ return NULL;
+
+ chan = to_xilinx_chan(channel);
+
+ if (chan->state != CHANNEL_AVAILABLE)
+ return NULL;
+
+ intr_segment = mempool_alloc(chan->intr_transactions_pool, GFP_ATOMIC);
+
+ memset(intr_segment, 0, sizeof(*intr_segment));
+
+ dma_async_tx_descriptor_init(&intr_segment->async_intr_tx,
+ &chan->common);
+ intr_segment->async_intr_tx.flags = flags;
+ async_tx_ack(&intr_segment->async_intr_tx);
+ intr_segment->async_intr_tx.tx_submit = xilinx_intr_tx_submit;
+
+ return &intr_segment->async_intr_tx;
+}
+
+static int xlnx_pcie_dma_driver_probe(struct platform_device *platform_dev)
+{
+ int err, i;
+ struct xlnx_pcie_dma_device *xdev;
+ static u16 board_number;
+
+ xdev = devm_kzalloc(&platform_dev->dev,
+ sizeof(struct xlnx_pcie_dma_device), GFP_KERNEL);
+
+ if (!xdev)
+ return -ENOMEM;
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ xdev->dma_buf_ext_addr = true;
+#else
+ xdev->dma_buf_ext_addr = false;
+#endif
+
+ xdev->is_rootdma = device_property_read_bool(&platform_dev->dev,
+ "rootdma");
+
+ xdev->dev = &platform_dev->dev;
+ xdev->board_number = board_number;
+
+ err = device_property_read_u32(&platform_dev->dev, "numchannels",
+ &xdev->num_channels);
+ if (err) {
+ dev_err(&platform_dev->dev,
+ "Unable to find numchannels property\n");
+ goto platform_driver_probe_return;
+ }
+
+ if (xdev->num_channels == 0 || xdev->num_channels >
+ MAX_ALLOWED_CHANNELS_IN_HW) {
+ dev_warn(&platform_dev->dev,
+ "Invalid xlnx-num_channels property value\n");
+ xdev->num_channels = MAX_ALLOWED_CHANNELS_IN_HW;
+ }
+
+ xdev->channels =
+ (struct ps_pcie_dma_chan *)devm_kzalloc(&platform_dev->dev,
+ sizeof(struct ps_pcie_dma_chan)
+ * xdev->num_channels,
+ GFP_KERNEL);
+ if (!xdev->channels) {
+ err = -ENOMEM;
+ goto platform_driver_probe_return;
+ }
+
+ if (xdev->is_rootdma)
+ err = read_rootdma_config(platform_dev, xdev);
+ else
+ err = read_epdma_config(platform_dev, xdev);
+
+ if (err) {
+ dev_err(&platform_dev->dev,
+ "Unable to initialize dma configuration\n");
+ goto platform_driver_probe_return;
+ }
+
+ /* Initialize the DMA engine */
+ INIT_LIST_HEAD(&xdev->common.channels);
+
+ dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
+ dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
+ dma_cap_set(DMA_SG, xdev->common.cap_mask);
+ dma_cap_set(DMA_INTERRUPT, xdev->common.cap_mask);
+
+ xdev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ xdev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ xdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ xdev->common.device_alloc_chan_resources =
+ xlnx_ps_pcie_dma_alloc_chan_resources;
+ xdev->common.device_free_chan_resources =
+ xlnx_ps_pcie_dma_free_chan_resources;
+ xdev->common.device_terminate_all = xlnx_ps_pcie_dma_terminate_all;
+ xdev->common.device_tx_status = dma_cookie_status;
+ xdev->common.device_issue_pending = xlnx_ps_pcie_dma_issue_pending;
+ xdev->common.device_prep_dma_interrupt =
+ xlnx_ps_pcie_dma_prep_interrupt;
+ xdev->common.device_prep_dma_sg = xlnx_ps_pcie_dma_prep_dma_sg;
+ xdev->common.device_prep_slave_sg = xlnx_ps_pcie_dma_prep_slave_sg;
+ xdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+
+ for (i = 0; i < xdev->num_channels; i++) {
+ err = probe_channel_properties(platform_dev, xdev, i);
+
+ if (err != 0) {
+ dev_err(xdev->dev,
+ "Unable to read channel properties\n");
+ goto platform_driver_probe_return;
+ }
+ }
+
+ if (xdev->is_rootdma)
+ err = platform_irq_setup(xdev);
+ else
+ err = irq_setup(xdev);
+ if (err) {
+ dev_err(xdev->dev, "Cannot request irq lines for device %d\n",
+ xdev->board_number);
+ goto platform_driver_probe_return;
+ }
+
+ err = dma_async_device_register(&xdev->common);
+ if (err) {
+ dev_err(xdev->dev,
+ "Unable to register board %d with dma framework\n",
+ xdev->board_number);
+ goto platform_driver_probe_return;
+ }
+
+ platform_set_drvdata(platform_dev, xdev);
+
+ board_number++;
+
+ dev_info(&platform_dev->dev, "PS PCIe Platform driver probed\n");
+ return 0;
+
+platform_driver_probe_return:
+ return err;
+}
+
+static int xlnx_pcie_dma_driver_remove(struct platform_device *platform_dev)
+{
+ struct xlnx_pcie_dma_device *xdev =
+ platform_get_drvdata(platform_dev);
+ int i;
+
+ for (i = 0; i < xdev->num_channels; i++)
+ xlnx_ps_pcie_dma_free_chan_resources(&xdev->channels[i].common);
+
+ dma_async_device_unregister(&xdev->common);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id xlnx_pcie_root_dma_of_ids[] = {
+ { .compatible = "xlnx,ps_pcie_dma-1.00.a", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, xlnx_pcie_root_dma_of_ids);
+#endif
+
+static struct platform_driver xlnx_pcie_dma_driver = {
+ .driver = {
+ .name = XLNX_PLATFORM_DRIVER_NAME,
+ .of_match_table = of_match_ptr(xlnx_pcie_root_dma_of_ids),
+ .owner = THIS_MODULE,
+ },
+ .probe = xlnx_pcie_dma_driver_probe,
+ .remove = xlnx_pcie_dma_driver_remove,
+};
+
+int dma_platform_driver_register(void)
+{
+ return platform_driver_register(&xlnx_pcie_dma_driver);
+}
+
+void dma_platform_driver_unregister(void)
+{
+ platform_driver_unregister(&xlnx_pcie_dma_driver);
+}
--
2.7.4
Enabling Root DMA interrupts
Adding Root DMA translations to bridge for Register Access
Signed-off-by: Ravi Shankar Jonnalagadda <[email protected]>
Signed-off-by: RaviKiran Gummaluri <[email protected]>
---
drivers/pci/host/pcie-xilinx-nwl.c | 15 +++++++++++++++
1 file changed, 15 insertions(+)
diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c
index eec641a..5766582 100644
--- a/drivers/pci/host/pcie-xilinx-nwl.c
+++ b/drivers/pci/host/pcie-xilinx-nwl.c
@@ -39,6 +39,11 @@
#define E_ECAM_CONTROL 0x00000228
#define E_ECAM_BASE_LO 0x00000230
#define E_ECAM_BASE_HI 0x00000234
+#define E_DREG_CTRL 0x00000288
+#define E_DREG_BASE_LO 0x00000290
+
+#define DREG_DMA_EN BIT(0)
+#define DREG_DMA_BASE_LO 0xFD0F0000
/* Ingress - address translations */
#define I_MSII_CAPABILITIES 0x00000300
@@ -57,6 +62,10 @@
#define MSGF_MSI_STATUS_HI 0x00000444
#define MSGF_MSI_MASK_LO 0x00000448
#define MSGF_MSI_MASK_HI 0x0000044C
+/* Root DMA Interrupt register */
+#define MSGF_DMA_MASK 0x00000464
+
+#define MSGF_INTR_EN BIT(0)
/* Msg filter mask bits */
#define CFG_ENABLE_PM_MSG_FWD BIT(1)
@@ -766,6 +775,12 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_LEG_STATUS) &
MSGF_LEG_SR_MASKALL, MSGF_LEG_STATUS);
+ /* Enabling DREG translations */
+ nwl_bridge_writel(pcie, DREG_DMA_EN, E_DREG_CTRL);
+ nwl_bridge_writel(pcie, DREG_DMA_BASE_LO, E_DREG_BASE_LO);
+ /* Enabling Root DMA interrupts */
+ nwl_bridge_writel(pcie, MSGF_INTR_EN, MSGF_DMA_MASK);
+
/* Enable all legacy interrupts */
nwl_bridge_writel(pcie, MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK);
--
2.7.4
Hi Ravi,
[auto build test ERROR on linus/master]
[also build test ERROR on v4.13 next-20170908]
[cannot apply to xlnx/master]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]
url: https://github.com/0day-ci/linux/commits/Ravi-Shankar-Jonnalagadda/dmaengine-ZynqMP-PS-PCIe-DMA-driver/20170911-052150
config: arm64-allmodconfig (attached as .config)
compiler: aarch64-linux-gnu-gcc (Debian 6.1.1-9) 6.1.1 20160705
reproduce:
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# save the attached .config to linux build tree
make.cross ARCH=arm64
Note: the linux-review/Ravi-Shankar-Jonnalagadda/dmaengine-ZynqMP-PS-PCIe-DMA-driver/20170911-052150 HEAD 442a00e11c4fa28ed29541773946aa1cda153e7e builds fine.
It only hurts bisectibility.
All errors (new ones prefixed by >>):
>> make[4]: *** No rule to make target 'drivers/dma/xilinx/ps_pcie_platform.o', needed by 'drivers/dma/xilinx/ps_pcie_dma.o'.
make[4]: Target '__build' not remade because of errors.
---
0-DAY kernel test infrastructure Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all Intel Corporation
On Fri, Sep 08, 2017 at 05:53:08PM +0530, Ravi Shankar Jonnalagadda wrote:
> Binding explaining devicetree usage for enabling Root DMA capability
>
> Signed-off-by: Ravi Shankar Jonnalagadda <[email protected]>
> Signed-off-by: RaviKiran Gummaluri <[email protected]>
> ---
> .../devicetree/bindings/dma/xilinx/ps-pcie-dma.txt | 67 ++++++++++++++++++++++
> 1 file changed, 67 insertions(+)
> create mode 100644 Documentation/devicetree/bindings/dma/xilinx/ps-pcie-dma.txt
>
> diff --git a/Documentation/devicetree/bindings/dma/xilinx/ps-pcie-dma.txt b/Documentation/devicetree/bindings/dma/xilinx/ps-pcie-dma.txt
> new file mode 100644
> index 0000000..1522a49
> --- /dev/null
> +++ b/Documentation/devicetree/bindings/dma/xilinx/ps-pcie-dma.txt
> @@ -0,0 +1,67 @@
> +* Xilinx PS PCIe Root DMA
> +
> +Required properties:
> +- compatible: Should be "xlnx,ps_pcie_dma-1.00.a"
> +- reg: Register offset for Root DMA channels
> +- reg-names: Name for the register. Should be "xlnx,ps_pcie_regbase"
*-names for a single entry is pointless.
> +- interrupts: Interrupt pin for Root DMA
> +- interrupt-names: Name for the interrupt. Should be "xlnx,ps_pcie_rootdma_intr"
ditto
> +- interrupt-parent: Should be gic in case of zynqmp
> +- xlnx,rootdma: Indicates this platform device is root dma.
> + This is required as the same platform driver will be invoked by pcie end points too
platform device and driver are Linux terms.
> +- xlnx,dma_vendorid: 16 bit PCIe device vendor id.
> + This can be later used by dma client for matching while using dma_request_channel
> +- xlnx,dma_deviceid: 16 bit PCIe device id
> + This can be later used by dma client for matching while using dma_request_channel
This is the id's of the client? Sounds like you should use the DMA
binding.
> +- xlnx,numchannels: Indicates number of channels to be enabled for the device.
> + Valid values are from 1 to 4 for zynqmp
DMA binding has a similar property.
> +- xlnx,ps_pcie_channel : One for each channel to be enabled.
s/_/-/
> + This array contains channel specific properties.
> + Index 0: Direction of channel
> + Direction of channel can be either PCIe Memory to AXI memory i.e., Host to Card or
> + AXI Memory to PCIe memory i.e., Card to Host
> + PCIe to AXI Channel Direction is represented as 0x1
> + AXI to PCIe Channel Direction is represented as 0x0
> + Index 1: Number of Buffer Descriptors
> + This number describes number of buffer descriptors to be allocated for a channel
> + Index 2: Number of Queues
> + Each Channel has four DMA Buffer Descriptor Queues.
> + By default All four Queues will be managed by Root DMA driver.
> + User may choose to have only two queues either Source and it's Status Queue or
> + Destination and it's Status Queue to be handled by Driver.
> + The other two queues need to be handled by user logic which will not be part of this driver.
> + All Queues on Host is represented by 0x4
> + Two Queues on Host is represented by 0x2
> + Index 3: Coaelse Count
> + This number indicates the number of transfers after which interrupt needs to
> + be raised for the particular channel. The allowed range is from 0 to 255
> + Index 4: Coaelse Count Timer frequency
> + This property is used to control the frequency of poll timer. Poll timer is
> + created for a channel whenever coalesce count value (>= 1) is programmed for the particular
> + channel. This timer is helpful in draining out completed transactions even though interrupt is
> + not generated.
> +
> +Client Usage:
> + DMA clients can request for these channels using dma_request_channel API
> +
> +
> +Xilinx PS PCIe Root DMA node Example
> +++++++++++++++++++++++++++++++++++++
> +
> + pci_rootdma: rootdma@fd0f0000 {
dma-controller@...
> + compatible = "xlnx,ps_pcie_dma-1.00.a";
> + reg = <0x0 0xfd0f0000 0x0 0x1000>;
> + reg-names = "xlnx,ps_pcie_regbase";
> + interrupts = <0 117 4>;
> + interrupt-names = "xlnx,ps_pcie_rootdma_intr";
> + interrupt-parent = <&gic>;
> + xlnx,rootdma;
> + xlnx,dma_vendorid = /bits/ 16 <0x10EE>;
> + xlnx,dma_deviceid = /bits/ 16 <0xD021>;
> + xlnx,numchannels = <0x4>;
> + #size-cells = <0x5>;
> + xlnx,ps_pcie_channel0 = <0x1 0x7CF 0x4 0x0 0x3E8>;
> + xlnx,ps_pcie_channel1 = <0x0 0x7CF 0x4 0x0 0x3E8>;
> + xlnx,ps_pcie_channel2 = <0x1 0x7CF 0x4 0x0 0x3E8>;
> + xlnx,ps_pcie_channel3 = <0x0 0x7CF 0x4 0x0 0x3E8>;
> + };
> --
> 2.7.4
>
> --
> To unsubscribe from this list: send the line "unsubscribe devicetree" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html
Hi Ravi,
Please make the subject line follow the existing convention, i.e., run
$ git log --oneline drivers/pci/host/pcie-xilinx-nwl.c
and make yours match spacing, capitalization, and style (imperative
sentence).
Also waiting for ack from Michal.
On Fri, Sep 08, 2017 at 05:53:03PM +0530, Ravi Shankar Jonnalagadda wrote:
> Enabling Root DMA interrupts
>
> Adding Root DMA translations to bridge for Register Access
>
> Signed-off-by: Ravi Shankar Jonnalagadda <[email protected]>
> Signed-off-by: RaviKiran Gummaluri <[email protected]>
> ---
> drivers/pci/host/pcie-xilinx-nwl.c | 15 +++++++++++++++
> 1 file changed, 15 insertions(+)
>
> diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c
> index eec641a..5766582 100644
> --- a/drivers/pci/host/pcie-xilinx-nwl.c
> +++ b/drivers/pci/host/pcie-xilinx-nwl.c
> @@ -39,6 +39,11 @@
> #define E_ECAM_CONTROL 0x00000228
> #define E_ECAM_BASE_LO 0x00000230
> #define E_ECAM_BASE_HI 0x00000234
> +#define E_DREG_CTRL 0x00000288
> +#define E_DREG_BASE_LO 0x00000290
> +
> +#define DREG_DMA_EN BIT(0)
> +#define DREG_DMA_BASE_LO 0xFD0F0000
>
> /* Ingress - address translations */
> #define I_MSII_CAPABILITIES 0x00000300
> @@ -57,6 +62,10 @@
> #define MSGF_MSI_STATUS_HI 0x00000444
> #define MSGF_MSI_MASK_LO 0x00000448
> #define MSGF_MSI_MASK_HI 0x0000044C
> +/* Root DMA Interrupt register */
> +#define MSGF_DMA_MASK 0x00000464
> +
> +#define MSGF_INTR_EN BIT(0)
>
> /* Msg filter mask bits */
> #define CFG_ENABLE_PM_MSG_FWD BIT(1)
> @@ -766,6 +775,12 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
> nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_LEG_STATUS) &
> MSGF_LEG_SR_MASKALL, MSGF_LEG_STATUS);
>
> + /* Enabling DREG translations */
> + nwl_bridge_writel(pcie, DREG_DMA_EN, E_DREG_CTRL);
> + nwl_bridge_writel(pcie, DREG_DMA_BASE_LO, E_DREG_BASE_LO);
> + /* Enabling Root DMA interrupts */
> + nwl_bridge_writel(pcie, MSGF_INTR_EN, MSGF_DMA_MASK);
> +
> /* Enable all legacy interrupts */
> nwl_bridge_writel(pcie, MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK);
>
> --
> 2.7.4
>
On 8.9.2017 14:23, Ravi Shankar Jonnalagadda wrote:
> Adding support for ZynqmMP PS PCIe EP driver.
> Adding support for ZynqmMP PS PCIe Root DMA driver.
> Modifying Kconfig and Makefile to add the support.
>
> Signed-off-by: Ravi Shankar Jonnalagadda <[email protected]>
> Signed-off-by: RaviKiran Gummaluri <[email protected]>
> ---
> drivers/dma/Kconfig | 12 +++
> drivers/dma/xilinx/Makefile | 2 +
> drivers/dma/xilinx/ps_pcie.h | 44 +++++++++
> drivers/dma/xilinx/ps_pcie_main.c | 200 ++++++++++++++++++++++++++++++++++++++
> include/linux/dma/ps_pcie_dma.h | 69 +++++++++++++
> 5 files changed, 327 insertions(+)
> create mode 100644 drivers/dma/xilinx/ps_pcie.h
> create mode 100644 drivers/dma/xilinx/ps_pcie_main.c
> create mode 100644 include/linux/dma/ps_pcie_dma.h
>
> diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
> index fa8f9c0..e2fe4e5 100644
> --- a/drivers/dma/Kconfig
> +++ b/drivers/dma/Kconfig
> @@ -586,6 +586,18 @@ config XILINX_ZYNQMP_DMA
> help
> Enable support for Xilinx ZynqMP DMA controller.
>
> +config XILINX_PS_PCIE_DMA
> + tristate "Xilinx PS PCIe DMA support"
> + depends on (PCI && X86_64 || ARM64)
> + select DMA_ENGINE
> + help
> + Enable support for the Xilinx PS PCIe DMA engine present
> + in recent Xilinx ZynqMP chipsets.
> +
> + Say Y here if you have such a chipset.
> +
> + If unsure, say N.
> +
> config ZX_DMA
> tristate "ZTE ZX DMA support"
> depends on ARCH_ZX || COMPILE_TEST
> diff --git a/drivers/dma/xilinx/Makefile b/drivers/dma/xilinx/Makefile
> index 9e91f8f..04f6f99 100644
> --- a/drivers/dma/xilinx/Makefile
> +++ b/drivers/dma/xilinx/Makefile
> @@ -1,2 +1,4 @@
> obj-$(CONFIG_XILINX_DMA) += xilinx_dma.o
> obj-$(CONFIG_XILINX_ZYNQMP_DMA) += zynqmp_dma.o
> +ps_pcie_dma-objs := ps_pcie_main.o ps_pcie_platform.o
> +obj-$(CONFIG_XILINX_PS_PCIE_DMA) += ps_pcie_dma.o
> diff --git a/drivers/dma/xilinx/ps_pcie.h b/drivers/dma/xilinx/ps_pcie.h
> new file mode 100644
> index 0000000..351f051
> --- /dev/null
> +++ b/drivers/dma/xilinx/ps_pcie.h
> @@ -0,0 +1,44 @@
> +/*
> + * Xilinx PS PCIe DMA Engine platform header file
> + *
> + * Copyright (C) 2010-2017 Xilinx, Inc. All rights reserved.
> + *
> + * This program is free software: you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation
> + */
> +
> +#ifndef __XILINX_PS_PCIE_H
> +#define __XILINX_PS_PCIE_H
> +
> +#include <linux/delay.h>
> +#include <linux/dma-direction.h>
this is included via dma-mapping.h below.
> +#include <linux/dmaengine.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/interrupt.h>
> +#include <linux/ioport.h>
> +#include <linux/irqreturn.h>
> +#include <linux/kernel.h>
> +#include <linux/module.h>
> +#include <linux/mempool.h>
> +#include <linux/of.h>
> +#include <linux/pci.h>
> +#include <linux/property.h>
this is already include via of.h
> +#include <linux/platform_device.h>
> +#include <linux/timer.h>
> +#include <linux/dma/ps_pcie_dma.h>
Don't we have any script for checking this?
> +
> +/**
> + * dma_platform_driver_register - This will be invoked by module init
> + *
> + * Return: returns status of platform_driver_register
> + */
> +int dma_platform_driver_register(void);
put empty line here.
> +/**
> + * dma_platform_driver_unregister - This will be invoked by module exit
> + *
> + * Return: returns void after unregustering platform driver
> + */
> +void dma_platform_driver_unregister(void);
> +
> +#endif
> diff --git a/drivers/dma/xilinx/ps_pcie_main.c b/drivers/dma/xilinx/ps_pcie_main.c
> new file mode 100644
> index 0000000..4ccd8ef
> --- /dev/null
> +++ b/drivers/dma/xilinx/ps_pcie_main.c
> @@ -0,0 +1,200 @@
> +/*
> + * XILINX PS PCIe driver
> + *
> + * Copyright (C) 2017 Xilinx, Inc. All rights reserved.
> + *
> + * Description
> + * PS PCIe DMA is memory mapped DMA used to execute PS to PL transfers
> + * on ZynqMP UltraScale+ Devices.
> + * This PCIe driver creates a platform device with specific platform
> + * info enabling creation of DMA device corresponding to the channel
> + * information provided in the properties
> + *
> + * This program is free software: you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation
> + */
> +
> +#include "ps_pcie.h"
> +#include "../dmaengine.h"
> +
> +#define DRV_MODULE_NAME "ps_pcie_dma"
> +
> +static int ps_pcie_dma_probe(struct pci_dev *pdev,
> + const struct pci_device_id *ent);
> +static void ps_pcie_dma_remove(struct pci_dev *pdev);
> +
> +static u32 channel_properties_pcie_axi[] = {
> + (u32)(PCIE_AXI_DIRECTION), (u32)(NUMBER_OF_BUFFER_DESCRIPTORS),
> + (u32)(DEFAULT_DMA_QUEUES), (u32)(CHANNEL_COAELSE_COUNT),
> + (u32)(CHANNEL_POLL_TIMER_FREQUENCY) };
> +
> +static u32 channel_properties_axi_pcie[] = {
> + (u32)(AXI_PCIE_DIRECTION), (u32)(NUMBER_OF_BUFFER_DESCRIPTORS),
> + (u32)(DEFAULT_DMA_QUEUES), (u32)(CHANNEL_COAELSE_COUNT),
> + (u32)(CHANNEL_POLL_TIMER_FREQUENCY) };
> +
> +static struct property_entry generic_pcie_ep_property[] = {
> + PROPERTY_ENTRY_U32("numchannels", (u32)MAX_NUMBER_OF_CHANNELS),
> + PROPERTY_ENTRY_U32_ARRAY("ps_pcie_channel0",
> + channel_properties_pcie_axi),
> + PROPERTY_ENTRY_U32_ARRAY("ps_pcie_channel1",
> + channel_properties_axi_pcie),
> + PROPERTY_ENTRY_U32_ARRAY("ps_pcie_channel2",
> + channel_properties_pcie_axi),
> + PROPERTY_ENTRY_U32_ARRAY("ps_pcie_channel3",
> + channel_properties_axi_pcie),
> + { },
> +};
> +
> +static const struct platform_device_info xlnx_std_platform_dev_info = {
> + .name = XLNX_PLATFORM_DRIVER_NAME,
> + .properties = generic_pcie_ep_property,
> +};
> +
> +/**
> + * ps_pcie_dma_probe - Driver probe function
> + * @pdev: Pointer to the pci_dev structure
> + * @ent: pci device id
> + *
> + * Return: '0' on success and failure value on error
> + */
> +static int ps_pcie_dma_probe(struct pci_dev *pdev,
> + const struct pci_device_id *ent)
> +{
> + int err;
> + struct platform_device *platform_dev;
> + struct platform_device_info platform_dev_info;
> +
> + dev_info(&pdev->dev, "PS PCIe DMA Driver probe\n");
> +
> + err = pcim_enable_device(pdev);
> + if (err) {
> + dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
> + return err;
> + }
> +
> + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
> + if (err) {
> + dev_info(&pdev->dev, "Cannot set 64 bit DMA mask\n");
> + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
> + if (err) {
> + dev_err(&pdev->dev, "DMA mask set error\n");
> + return err;
> + }
> + }
> +
> + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
> + if (err) {
> + dev_info(&pdev->dev, "Cannot set 64 bit consistent DMA mask\n");
> + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
> + if (err) {
> + dev_err(&pdev->dev, "Cannot set consistent DMA mask\n");
> + return err;
> + }
> + }
> +
> + pci_set_master(pdev);
> +
> + /* For Root DMA platform device will be created through device tree */
> + if (pdev->vendor == PCI_VENDOR_ID_XILINX &&
> + pdev->device == ZYNQMP_RC_DMA_DEVID)
> + return 0;
> +
> + memcpy(&platform_dev_info, &xlnx_std_platform_dev_info,
> + sizeof(xlnx_std_platform_dev_info));
> +
> + /* Do device specific channel configuration changes to
> + * platform_dev_info.properties if required
> + * More information on channel properties can be found
> + * at Documentation/devicetree/bindings/dma/xilinx/ps-pcie-dma.txt
> + */
> +
> + platform_dev_info.parent = &pdev->dev;
> + platform_dev_info.data = &pdev;
> + platform_dev_info.size_data = sizeof(struct pci_dev **);
> +
> + platform_dev = platform_device_register_full(&platform_dev_info);
> + if (IS_ERR(platform_dev)) {
> + dev_err(&pdev->dev,
> + "Cannot create platform device, aborting\n");
> + return PTR_ERR(platform_dev);
> + }
> +
> + pci_set_drvdata(pdev, platform_dev);
> +
> + dev_info(&pdev->dev, "PS PCIe DMA driver successfully probed\n");
> +
> + return 0;
> +}
> +
> +static struct pci_device_id ps_pcie_dma_tbl[] = {
> + { PCI_DEVICE(PCI_VENDOR_ID_XILINX, ZYNQMP_DMA_DEVID) },
> + { PCI_DEVICE(PCI_VENDOR_ID_XILINX, ZYNQMP_RC_DMA_DEVID) },
> + { }
> +};
> +
> +static struct pci_driver ps_pcie_dma_driver = {
> + .name = DRV_MODULE_NAME,
> + .id_table = ps_pcie_dma_tbl,
> + .probe = ps_pcie_dma_probe,
> + .remove = ps_pcie_dma_remove,
> +};
> +
> +/**
> + * ps_pcie_init - Driver init function
> + *
> + * Return: 0 on success. Non zero on failure
> + */
> +static int __init ps_pcie_init(void)
> +{
> + int ret;
> +
> + pr_info("%s init()\n", DRV_MODULE_NAME);
> +
> + ret = pci_register_driver(&ps_pcie_dma_driver);
> + if (ret)
> + return ret;
> +
> + ret = dma_platform_driver_register();
> + if (ret)
> + pci_unregister_driver(&ps_pcie_dma_driver);
> +
> + return ret;
> +}
> +
> +/**
> + * ps_pcie_dma_remove - Driver remove function
> + * @pdev: Pointer to the pci_dev structure
> + *
> + * Return: void
> + */
> +static void ps_pcie_dma_remove(struct pci_dev *pdev)
> +{
> + struct platform_device *platform_dev;
> +
> + platform_dev = (struct platform_device *)pci_get_drvdata(pdev);
> +
> + if (platform_dev)
> + platform_device_unregister(platform_dev);
> +}
> +
> +/**
> + * ps_pcie_exit - Driver exit function
> + *
> + * Return: void
> + */
> +static void __exit ps_pcie_exit(void)
> +{
> + pr_info("%s exit()\n", DRV_MODULE_NAME);
> +
> + dma_platform_driver_unregister();
> + pci_unregister_driver(&ps_pcie_dma_driver);
> +}
> +
> +module_init(ps_pcie_init);
> +module_exit(ps_pcie_exit);
> +
> +MODULE_AUTHOR("Xilinx Inc");
> +MODULE_DESCRIPTION("Xilinx PS PCIe DMA Driver");
> +MODULE_LICENSE("GPL v2");
> diff --git a/include/linux/dma/ps_pcie_dma.h b/include/linux/dma/ps_pcie_dma.h
> new file mode 100644
> index 0000000..d11323a
> --- /dev/null
> +++ b/include/linux/dma/ps_pcie_dma.h
> @@ -0,0 +1,69 @@
> +/*
> + * Xilinx PS PCIe DMA Engine support header file
> + *
> + * Copyright (C) 2017 Xilinx, Inc. All rights reserved.
> + *
> + * This program is free software: you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation
> + */
> +
> +#ifndef __DMA_XILINX_PS_PCIE_H
> +#define __DMA_XILINX_PS_PCIE_H
> +
> +#include <linux/dma-mapping.h>
> +#include <linux/dmaengine.h>
> +
> +#define XLNX_PLATFORM_DRIVER_NAME "xlnx-platform-dma-driver"
> +
> +#define ZYNQMP_DMA_DEVID (0xD024)
> +#define ZYNQMP_RC_DMA_DEVID (0xD021)
Are these hardcoded? If yes, maybe we have better location where these
should be put.
> +
> +#define MAX_ALLOWED_CHANNELS_IN_HW 4
> +
> +#define MAX_NUMBER_OF_CHANNELS MAX_ALLOWED_CHANNELS_IN_HW
> +
> +#define DEFAULT_DMA_QUEUES 4
> +#define TWO_DMA_QUEUES 2
> +
> +#define NUMBER_OF_BUFFER_DESCRIPTORS 1999
> +#define MAX_DESCRIPTORS 65536
> +
> +#define CHANNEL_COAELSE_COUNT 0
> +
> +#define CHANNEL_POLL_TIMER_FREQUENCY 1000 /* in milli seconds */
> +
> +#define PCIE_AXI_DIRECTION DMA_TO_DEVICE
> +#define AXI_PCIE_DIRECTION DMA_FROM_DEVICE
> +
> +/**
> + * struct BAR_PARAMS - PCIe Bar Parameters
> + * @BAR_PHYS_ADDR: PCIe BAR Physical address
> + * @BAR_LENGTH: Length of PCIe BAR
> + * @BAR_VIRT_ADDR: Virtual Address to access PCIe BAR
> + */
> +struct BAR_PARAMS {
> + dma_addr_t BAR_PHYS_ADDR; /**< Base physical address of BAR memory */
> + unsigned long BAR_LENGTH; /**< Length of BAR memory window */
> + void *BAR_VIRT_ADDR; /**< Virtual Address of mapped BAR memory */
> +};
> +
> +/**
> + * struct ps_pcie_dma_channel_match - Match structure for dma clients
> + * @pci_vendorid: PCIe Vendor id of PS PCIe DMA device
> + * @pci_deviceid: PCIe Device id of PS PCIe DMA device
> + * @board_number: Unique id to identify individual device in a system
> + * @channel_number: Unique channel number of the device
> + * @direction: DMA channel direction
> + * @bar_params: Pointer to BAR_PARAMS for accessing application specific data
> + */
> +struct ps_pcie_dma_channel_match {
> + u16 pci_vendorid;
> + u16 pci_deviceid;
> + u16 board_number;
> + u16 channel_number;
> + enum dma_data_direction direction;
> + struct BAR_PARAMS *bar_params;
> +};
> +
> +#endif
>
Thanks,
Michal
--
Michal Simek, Ing. (M.Eng), OpenPGP -> KeyID: FE3D1F91
w: http://www.monstr.eu p: +42-0-721842854
Maintainer of Linux kernel - Xilinx Microblaze
Maintainer of Linux kernel - Xilinx Zynq ARM and ZynqMP ARM64 SoCs
U-Boot custodian - Xilinx Microblaze/Zynq/ZynqMP SoCs
On Fri, Sep 08, 2017 at 05:53:05PM +0530, Ravi Shankar Jonnalagadda wrote:
> Adding support for ZynqmMP PS PCIe EP driver.
> Adding support for ZynqmMP PS PCIe Root DMA driver.
/s/Adding/Add/
Please descibe the dmaengines here so people can know what to expect.
> Modifying Kconfig and Makefile to add the support.
You can remobe this
>
> Signed-off-by: Ravi Shankar Jonnalagadda <[email protected]>
> Signed-off-by: RaviKiran Gummaluri <[email protected]>
> ---
> drivers/dma/Kconfig | 12 +++
> drivers/dma/xilinx/Makefile | 2 +
> drivers/dma/xilinx/ps_pcie.h | 44 +++++++++
> drivers/dma/xilinx/ps_pcie_main.c | 200 ++++++++++++++++++++++++++++++++++++++
> include/linux/dma/ps_pcie_dma.h | 69 +++++++++++++
> 5 files changed, 327 insertions(+)
> create mode 100644 drivers/dma/xilinx/ps_pcie.h
> create mode 100644 drivers/dma/xilinx/ps_pcie_main.c
> create mode 100644 include/linux/dma/ps_pcie_dma.h
>
> diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
> index fa8f9c0..e2fe4e5 100644
> --- a/drivers/dma/Kconfig
> +++ b/drivers/dma/Kconfig
> @@ -586,6 +586,18 @@ config XILINX_ZYNQMP_DMA
> help
> Enable support for Xilinx ZynqMP DMA controller.
>
> +config XILINX_PS_PCIE_DMA
> + tristate "Xilinx PS PCIe DMA support"
> + depends on (PCI && X86_64 || ARM64)
> + select DMA_ENGINE
> + help
> + Enable support for the Xilinx PS PCIe DMA engine present
> + in recent Xilinx ZynqMP chipsets.
> +
> + Say Y here if you have such a chipset.
> +
> + If unsure, say N.
Can you remove last two lines, they dont convey anything useful
> +
> config ZX_DMA
> tristate "ZTE ZX DMA support"
> depends on ARCH_ZX || COMPILE_TEST
> diff --git a/drivers/dma/xilinx/Makefile b/drivers/dma/xilinx/Makefile
> index 9e91f8f..04f6f99 100644
> --- a/drivers/dma/xilinx/Makefile
> +++ b/drivers/dma/xilinx/Makefile
> @@ -1,2 +1,4 @@
> obj-$(CONFIG_XILINX_DMA) += xilinx_dma.o
> obj-$(CONFIG_XILINX_ZYNQMP_DMA) += zynqmp_dma.o
> +ps_pcie_dma-objs := ps_pcie_main.o ps_pcie_platform.o
> +obj-$(CONFIG_XILINX_PS_PCIE_DMA) += ps_pcie_dma.o
> diff --git a/drivers/dma/xilinx/ps_pcie.h b/drivers/dma/xilinx/ps_pcie.h
> new file mode 100644
> index 0000000..351f051
> --- /dev/null
> +++ b/drivers/dma/xilinx/ps_pcie.h
> @@ -0,0 +1,44 @@
> +/*
> + * Xilinx PS PCIe DMA Engine platform header file
> + *
> + * Copyright (C) 2010-2017 Xilinx, Inc. All rights reserved.
> + *
> + * This program is free software: you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation
> + */
> +
> +#ifndef __XILINX_PS_PCIE_H
> +#define __XILINX_PS_PCIE_H
> +
> +#include <linux/delay.h>
> +#include <linux/dma-direction.h>
> +#include <linux/dmaengine.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/interrupt.h>
> +#include <linux/ioport.h>
> +#include <linux/irqreturn.h>
> +#include <linux/kernel.h>
> +#include <linux/module.h>
> +#include <linux/mempool.h>
> +#include <linux/of.h>
> +#include <linux/pci.h>
> +#include <linux/property.h>
> +#include <linux/platform_device.h>
> +#include <linux/timer.h>
> +#include <linux/dma/ps_pcie_dma.h>
Do you really need all these headers
> +
> +/**
> + * dma_platform_driver_register - This will be invoked by module init
> + *
> + * Return: returns status of platform_driver_register
> + */
> +int dma_platform_driver_register(void);
> +/**
> + * dma_platform_driver_unregister - This will be invoked by module exit
> + *
> + * Return: returns void after unregustering platform driver
typo, please run spell checker & checkpatch on your patches
> + */
> +void dma_platform_driver_unregister(void);
> +
> +#endif
> diff --git a/drivers/dma/xilinx/ps_pcie_main.c b/drivers/dma/xilinx/ps_pcie_main.c
> new file mode 100644
> index 0000000..4ccd8ef
> --- /dev/null
> +++ b/drivers/dma/xilinx/ps_pcie_main.c
> @@ -0,0 +1,200 @@
> +/*
> + * XILINX PS PCIe driver
> + *
> + * Copyright (C) 2017 Xilinx, Inc. All rights reserved.
> + *
> + * Description
> + * PS PCIe DMA is memory mapped DMA used to execute PS to PL transfers
> + * on ZynqMP UltraScale+ Devices.
> + * This PCIe driver creates a platform device with specific platform
> + * info enabling creation of DMA device corresponding to the channel
> + * information provided in the properties
> + *
> + * This program is free software: you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation
> + */
> +
> +#include "ps_pcie.h"
> +#include "../dmaengine.h"
> +
> +#define DRV_MODULE_NAME "ps_pcie_dma"
> +
> +static int ps_pcie_dma_probe(struct pci_dev *pdev,
> + const struct pci_device_id *ent);
> +static void ps_pcie_dma_remove(struct pci_dev *pdev);
why do you need fwd declarations of these?
> +
> +static u32 channel_properties_pcie_axi[] = {
> + (u32)(PCIE_AXI_DIRECTION), (u32)(NUMBER_OF_BUFFER_DESCRIPTORS),
> + (u32)(DEFAULT_DMA_QUEUES), (u32)(CHANNEL_COAELSE_COUNT),
> + (u32)(CHANNEL_POLL_TIMER_FREQUENCY) };
why the casts?
> +
> +static u32 channel_properties_axi_pcie[] = {
> + (u32)(AXI_PCIE_DIRECTION), (u32)(NUMBER_OF_BUFFER_DESCRIPTORS),
> + (u32)(DEFAULT_DMA_QUEUES), (u32)(CHANNEL_COAELSE_COUNT),
> + (u32)(CHANNEL_POLL_TIMER_FREQUENCY) };
> +
> +static struct property_entry generic_pcie_ep_property[] = {
> + PROPERTY_ENTRY_U32("numchannels", (u32)MAX_NUMBER_OF_CHANNELS),
> + PROPERTY_ENTRY_U32_ARRAY("ps_pcie_channel0",
> + channel_properties_pcie_axi),
> + PROPERTY_ENTRY_U32_ARRAY("ps_pcie_channel1",
> + channel_properties_axi_pcie),
> + PROPERTY_ENTRY_U32_ARRAY("ps_pcie_channel2",
> + channel_properties_pcie_axi),
> + PROPERTY_ENTRY_U32_ARRAY("ps_pcie_channel3",
> + channel_properties_axi_pcie),
> + { },
> +};
> +
> +static const struct platform_device_info xlnx_std_platform_dev_info = {
> + .name = XLNX_PLATFORM_DRIVER_NAME,
> + .properties = generic_pcie_ep_property,
> +};
> +
> +/**
> + * ps_pcie_dma_probe - Driver probe function
> + * @pdev: Pointer to the pci_dev structure
> + * @ent: pci device id
> + *
> + * Return: '0' on success and failure value on error
> + */
I didnt get any useful info from this, pls get rid of these where they dont
help anyone...
> +static int ps_pcie_dma_probe(struct pci_dev *pdev,
> + const struct pci_device_id *ent)
> +{
> + int err;
> + struct platform_device *platform_dev;
> + struct platform_device_info platform_dev_info;
helps reading if these are reverse christmas tree!
> +
> + dev_info(&pdev->dev, "PS PCIe DMA Driver probe\n");
useless, pls remove
> +
> + err = pcim_enable_device(pdev);
> + if (err) {
> + dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
> + return err;
> + }
> +
> + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
> + if (err) {
> + dev_info(&pdev->dev, "Cannot set 64 bit DMA mask\n");
> + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
> + if (err) {
> + dev_err(&pdev->dev, "DMA mask set error\n");
no disable device on err?
> + return err;
> + }
> + }
> +
> + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
> + if (err) {
> + dev_info(&pdev->dev, "Cannot set 64 bit consistent DMA mask\n");
> + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
> + if (err) {
> + dev_err(&pdev->dev, "Cannot set consistent DMA mask\n");
> + return err;
> + }
> + }
> +
> + pci_set_master(pdev);
> +
> + /* For Root DMA platform device will be created through device tree */
> + if (pdev->vendor == PCI_VENDOR_ID_XILINX &&
> + pdev->device == ZYNQMP_RC_DMA_DEVID)
> + return 0;
the indentations are terrible!
Why regiser for this ID then? Return 0 would be success, so not sure what
you are trying to do here?
> +
> + memcpy(&platform_dev_info, &xlnx_std_platform_dev_info,
> + sizeof(xlnx_std_platform_dev_info));
> +
> + /* Do device specific channel configuration changes to
> + * platform_dev_info.properties if required
> + * More information on channel properties can be found
> + * at Documentation/devicetree/bindings/dma/xilinx/ps-pcie-dma.txt
> + */
/*
* kernel code expects multiline
* comments like this
*/
> +
> + platform_dev_info.parent = &pdev->dev;
> + platform_dev_info.data = &pdev;
> + platform_dev_info.size_data = sizeof(struct pci_dev **);
??
> +
> + platform_dev = platform_device_register_full(&platform_dev_info);
> + if (IS_ERR(platform_dev)) {
> + dev_err(&pdev->dev,
> + "Cannot create platform device, aborting\n");
> + return PTR_ERR(platform_dev);
> + }
> +
> + pci_set_drvdata(pdev, platform_dev);
> +
> + dev_info(&pdev->dev, "PS PCIe DMA driver successfully probed\n");
> +
> + return 0;
> +}
> +
> +static struct pci_device_id ps_pcie_dma_tbl[] = {
> + { PCI_DEVICE(PCI_VENDOR_ID_XILINX, ZYNQMP_DMA_DEVID) },
> + { PCI_DEVICE(PCI_VENDOR_ID_XILINX, ZYNQMP_RC_DMA_DEVID) },
> + { }
> +};
> +
> +static struct pci_driver ps_pcie_dma_driver = {
> + .name = DRV_MODULE_NAME,
> + .id_table = ps_pcie_dma_tbl,
> + .probe = ps_pcie_dma_probe,
> + .remove = ps_pcie_dma_remove,
> +};
> +
> +/**
> + * ps_pcie_init - Driver init function
> + *
> + * Return: 0 on success. Non zero on failure
> + */
> +static int __init ps_pcie_init(void)
> +{
> + int ret;
> +
> + pr_info("%s init()\n", DRV_MODULE_NAME);
> +
> + ret = pci_register_driver(&ps_pcie_dma_driver);
> + if (ret)
> + return ret;
> +
> + ret = dma_platform_driver_register();
> + if (ret)
> + pci_unregister_driver(&ps_pcie_dma_driver);
> +
> + return ret;
> +}
> +
> +/**
> + * ps_pcie_dma_remove - Driver remove function
> + * @pdev: Pointer to the pci_dev structure
> + *
> + * Return: void
> + */
> +static void ps_pcie_dma_remove(struct pci_dev *pdev)
> +{
> + struct platform_device *platform_dev;
> +
> + platform_dev = (struct platform_device *)pci_get_drvdata(pdev);
no need to cast from void
> +
> + if (platform_dev)
> + platform_device_unregister(platform_dev);
> +}
> +
> +/**
> + * ps_pcie_exit - Driver exit function
> + *
> + * Return: void
> + */
> +static void __exit ps_pcie_exit(void)
> +{
> + pr_info("%s exit()\n", DRV_MODULE_NAME);
> +
> + dma_platform_driver_unregister();
> + pci_unregister_driver(&ps_pcie_dma_driver);
> +}
> +
> +module_init(ps_pcie_init);
> +module_exit(ps_pcie_exit);
> +
> +MODULE_AUTHOR("Xilinx Inc");
> +MODULE_DESCRIPTION("Xilinx PS PCIe DMA Driver");
> +MODULE_LICENSE("GPL v2");
> diff --git a/include/linux/dma/ps_pcie_dma.h b/include/linux/dma/ps_pcie_dma.h
> new file mode 100644
> index 0000000..d11323a
> --- /dev/null
> +++ b/include/linux/dma/ps_pcie_dma.h
> @@ -0,0 +1,69 @@
> +/*
> + * Xilinx PS PCIe DMA Engine support header file
> + *
> + * Copyright (C) 2017 Xilinx, Inc. All rights reserved.
> + *
> + * This program is free software: you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation
> + */
> +
> +#ifndef __DMA_XILINX_PS_PCIE_H
> +#define __DMA_XILINX_PS_PCIE_H
> +
> +#include <linux/dma-mapping.h>
> +#include <linux/dmaengine.h>
> +
> +#define XLNX_PLATFORM_DRIVER_NAME "xlnx-platform-dma-driver"
> +
> +#define ZYNQMP_DMA_DEVID (0xD024)
> +#define ZYNQMP_RC_DMA_DEVID (0xD021)
> +
> +#define MAX_ALLOWED_CHANNELS_IN_HW 4
> +
> +#define MAX_NUMBER_OF_CHANNELS MAX_ALLOWED_CHANNELS_IN_HW
> +
> +#define DEFAULT_DMA_QUEUES 4
> +#define TWO_DMA_QUEUES 2
> +
> +#define NUMBER_OF_BUFFER_DESCRIPTORS 1999
> +#define MAX_DESCRIPTORS 65536
> +
> +#define CHANNEL_COAELSE_COUNT 0
> +
> +#define CHANNEL_POLL_TIMER_FREQUENCY 1000 /* in milli seconds */
> +
> +#define PCIE_AXI_DIRECTION DMA_TO_DEVICE
> +#define AXI_PCIE_DIRECTION DMA_FROM_DEVICE
> +
> +/**
> + * struct BAR_PARAMS - PCIe Bar Parameters
> + * @BAR_PHYS_ADDR: PCIe BAR Physical address
> + * @BAR_LENGTH: Length of PCIe BAR
> + * @BAR_VIRT_ADDR: Virtual Address to access PCIe BAR
> + */
> +struct BAR_PARAMS {
> + dma_addr_t BAR_PHYS_ADDR; /**< Base physical address of BAR memory */
> + unsigned long BAR_LENGTH; /**< Length of BAR memory window */
> + void *BAR_VIRT_ADDR; /**< Virtual Address of mapped BAR memory */
okay you have same comment twice. What is with DAMN UPPER CASE
If you cannot do basic checks for patches, I also refuse to waste my time
and review this any further!
--
~Vinod
On Fri, Sep 08, 2017 at 05:53:07PM +0530, Ravi Shankar Jonnalagadda wrote:
> Platform driver handles transactions for PCIe EP DMA and Root DMA
>
> Signed-off-by: Ravi Shankar Jonnalagadda <[email protected]>
> Signed-off-by: RaviKiran Gummaluri <[email protected]>
> ---
> drivers/dma/xilinx/ps_pcie_platform.c | 3055 +++++++++++++++++++++++++++++++++
I dont want to loose my sanity trying to review a 3k patch. If you don't put
an effort to make your code easier to review, I dont want to see this...
Nacked-by: Vinod Koul <[email protected]>
--
~Vinod