2016-04-15 11:29:12

by Purna Chandra Mandal

[permalink] [raw]
Subject: [PATCH v3 1/2] spi: pic32-sqi: add binding document for PIC32 Quad-SPI driver.

Document Device tree bindings for the quad SPI peripheral
found on Microchip PIC32 class devices.

Signed-off-by: Purna Chandra Mandal <[email protected]>
Acked-by: Rob Herring <[email protected]>

Cc: Kumar Gala <[email protected]>
Cc: Ian Campbell <[email protected]>
Cc: Rob Herring <[email protected]>
Cc: Pawel Moll <[email protected]>
Cc: Mark Rutland <[email protected]>
Cc: Mark Brown <[email protected]>

---

Changes in v3:
- update exmaple, replace 'sqi@' with 'spi@'.

Changes in v2: None

Documentation/devicetree/bindings/spi/sqi-pic32.txt | 18 ++++++++++++++++++
1 file changed, 18 insertions(+)
create mode 100644 Documentation/devicetree/bindings/spi/sqi-pic32.txt

diff --git a/Documentation/devicetree/bindings/spi/sqi-pic32.txt b/Documentation/devicetree/bindings/spi/sqi-pic32.txt
new file mode 100644
index 0000000..c82d021
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/sqi-pic32.txt
@@ -0,0 +1,18 @@
+Microchip PIC32 Quad SPI controller
+-----------------------------------
+Required properties:
+- compatible: Should be "microchip,pic32mzda-sqi".
+- reg: Address and length of SQI controller register space.
+- interrupts: Should contain SQI interrupt.
+- clocks: Should contain phandle of two clocks in sequence, one that drives
+ clock on SPI bus and other that drives SQI controller.
+- clock-names: Should be "spi_ck" and "reg_ck" in order.
+
+Example:
+ sqi1: spi@1f8e2000 {
+ compatible = "microchip,pic32mzda-sqi";
+ reg = <0x1f8e2000 0x200>;
+ clocks = <&rootclk REF2CLK>, <&rootclk PB5CLK>;
+ clock-names = "spi_ck", "reg_ck";
+ interrupts = <169 IRQ_TYPE_LEVEL_HIGH>;
+ };
--
1.8.3.1


2016-04-15 11:29:17

by Purna Chandra Mandal

[permalink] [raw]
Subject: [PATCH v3 2/2] spi: pic32-sqi: add SPI driver for PIC32 SQI controller.

This driver implements SPI master interface for Quad SPI
controller, specifically for accessing quad SPI flash.
It uses descriptor-based DMA transfer mode and supports
half-duplex communication for single, dual and quad SPI
transactions.

Signed-off-by: Purna Chandra Mandal <[email protected]>
Cc: Mark Brown <[email protected]>

---

Changes in v3:
- drop sqi_map/unmap_transfer() for core provided one
- replace devm_request_irq() with request_irq() for fixing release order
- check return value of clk_prepare_enable()
- fix checking devm_ioremap_resource() return value

Changes in v2:
- update subject line of dt/binding patch reflecting style of subsystem.

drivers/spi/Kconfig | 6 +
drivers/spi/Makefile | 1 +
drivers/spi/spi-pic32-sqi.c | 768 ++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 775 insertions(+)
create mode 100644 drivers/spi/spi-pic32-sqi.c

diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 8a8ff50..281ed5d 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -442,6 +442,12 @@ config SPI_PIC32
help
SPI driver for Microchip PIC32 SPI master controller.

+config SPI_PIC32_SQI
+ tristate "Microchip PIC32 Quad SPI driver"
+ depends on MACH_PIC32 || COMPILE_TEST
+ help
+ SPI driver for PIC32 Quad SPI controller.
+
config SPI_PL022
tristate "ARM AMBA PL022 SSP controller"
depends on ARM_AMBA
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 06019ed..3c74d00 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -63,6 +63,7 @@ obj-$(CONFIG_SPI_OMAP24XX) += spi-omap2-mcspi.o
obj-$(CONFIG_SPI_TI_QSPI) += spi-ti-qspi.o
obj-$(CONFIG_SPI_ORION) += spi-orion.o
obj-$(CONFIG_SPI_PIC32) += spi-pic32.o
+obj-$(CONFIG_SPI_PIC32_SQI) += spi-pic32-sqi.o
obj-$(CONFIG_SPI_PL022) += spi-pl022.o
obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o
spi-pxa2xx-platform-objs := spi-pxa2xx.o spi-pxa2xx-dma.o
diff --git a/drivers/spi/spi-pic32-sqi.c b/drivers/spi/spi-pic32-sqi.c
new file mode 100644
index 0000000..b215347
--- /dev/null
+++ b/drivers/spi/spi-pic32-sqi.c
@@ -0,0 +1,768 @@
+/*
+ * PIC32 Quad SPI controller driver.
+ *
+ * Purna Chandra Mandal <[email protected]>
+ * Copyright (c) 2016, Microchip Technology Inc.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+
+/* SQI registers */
+#define PESQI_XIP_CONF1_REG 0x00
+#define PESQI_XIP_CONF2_REG 0x04
+#define PESQI_CONF_REG 0x08
+#define PESQI_CTRL_REG 0x0C
+#define PESQI_CLK_CTRL_REG 0x10
+#define PESQI_CMD_THRES_REG 0x14
+#define PESQI_INT_THRES_REG 0x18
+#define PESQI_INT_ENABLE_REG 0x1C
+#define PESQI_INT_STAT_REG 0x20
+#define PESQI_TX_DATA_REG 0x24
+#define PESQI_RX_DATA_REG 0x28
+#define PESQI_STAT1_REG 0x2C
+#define PESQI_STAT2_REG 0x30
+#define PESQI_BD_CTRL_REG 0x34
+#define PESQI_BD_CUR_ADDR_REG 0x38
+#define PESQI_BD_BASE_ADDR_REG 0x40
+#define PESQI_BD_STAT_REG 0x44
+#define PESQI_BD_POLL_CTRL_REG 0x48
+#define PESQI_BD_TX_DMA_STAT_REG 0x4C
+#define PESQI_BD_RX_DMA_STAT_REG 0x50
+#define PESQI_THRES_REG 0x54
+#define PESQI_INT_SIGEN_REG 0x58
+
+/* PESQI_CONF_REG fields */
+#define PESQI_MODE 0x7
+#define PESQI_MODE_BOOT 0
+#define PESQI_MODE_PIO 1
+#define PESQI_MODE_DMA 2
+#define PESQI_MODE_XIP 3
+#define PESQI_MODE_SHIFT 0
+#define PESQI_CPHA BIT(3)
+#define PESQI_CPOL BIT(4)
+#define PESQI_LSBF BIT(5)
+#define PESQI_RXLATCH BIT(7)
+#define PESQI_SERMODE BIT(8)
+#define PESQI_WP_EN BIT(9)
+#define PESQI_HOLD_EN BIT(10)
+#define PESQI_BURST_EN BIT(12)
+#define PESQI_CS_CTRL_HW BIT(15)
+#define PESQI_SOFT_RESET BIT(16)
+#define PESQI_LANES_SHIFT 20
+#define PESQI_SINGLE_LANE 0
+#define PESQI_DUAL_LANE 1
+#define PESQI_QUAD_LANE 2
+#define PESQI_CSEN_SHIFT 24
+#define PESQI_EN BIT(23)
+
+/* PESQI_CLK_CTRL_REG fields */
+#define PESQI_CLK_EN BIT(0)
+#define PESQI_CLK_STABLE BIT(1)
+#define PESQI_CLKDIV_SHIFT 8
+#define PESQI_CLKDIV 0xff
+
+/* PESQI_INT_THR/CMD_THR_REG */
+#define PESQI_TXTHR_MASK 0x1f
+#define PESQI_TXTHR_SHIFT 8
+#define PESQI_RXTHR_MASK 0x1f
+#define PESQI_RXTHR_SHIFT 0
+
+/* PESQI_INT_EN/INT_STAT/INT_SIG_EN_REG */
+#define PESQI_TXEMPTY BIT(0)
+#define PESQI_TXFULL BIT(1)
+#define PESQI_TXTHR BIT(2)
+#define PESQI_RXEMPTY BIT(3)
+#define PESQI_RXFULL BIT(4)
+#define PESQI_RXTHR BIT(5)
+#define PESQI_BDDONE BIT(9) /* BD processing complete */
+#define PESQI_PKTCOMP BIT(10) /* packet processing complete */
+#define PESQI_DMAERR BIT(11) /* error */
+
+/* PESQI_BD_CTRL_REG */
+#define PESQI_DMA_EN BIT(0) /* enable DMA engine */
+#define PESQI_POLL_EN BIT(1) /* enable polling */
+#define PESQI_BDP_START BIT(2) /* start BD processor */
+
+/* PESQI controller buffer descriptor */
+struct buf_desc {
+ u32 bd_ctrl; /* control */
+ u32 bd_status; /* reserved */
+ u32 bd_addr; /* DMA buffer addr */
+ u32 bd_nextp; /* next item in chain */
+};
+
+/* bd_ctrl */
+#define BD_BUFLEN 0x1ff
+#define BD_CBD_INT_EN BIT(16) /* Current BD is processed */
+#define BD_PKT_INT_EN BIT(17) /* All BDs of PKT processed */
+#define BD_LIFM BIT(18) /* last data of pkt */
+#define BD_LAST BIT(19) /* end of list */
+#define BD_DATA_RECV BIT(20) /* receive data */
+#define BD_DDR BIT(21) /* DDR mode */
+#define BD_DUAL BIT(22) /* Dual SPI */
+#define BD_QUAD BIT(23) /* Quad SPI */
+#define BD_LSBF BIT(25) /* LSB First */
+#define BD_STAT_CHECK BIT(27) /* Status poll */
+#define BD_DEVSEL_SHIFT 28 /* CS */
+#define BD_CS_DEASSERT BIT(30) /* de-assert CS after current BD */
+#define BD_EN BIT(31) /* BD owned by H/W */
+
+/**
+ * struct ring_desc - Representation of SQI ring descriptor
+ * @list: list element to add to free or used list.
+ * @bd: PESQI controller buffer descriptor
+ * @bd_dma: DMA address of PESQI controller buffer descriptor
+ * @xfer_len: transfer length
+ */
+struct ring_desc {
+ struct list_head list;
+ struct buf_desc *bd;
+ dma_addr_t bd_dma;
+ u32 xfer_len;
+};
+
+/* Global constants */
+#define PESQI_BD_BUF_LEN_MAX 256
+#define PESQI_BD_COUNT 256 /* max 64KB data per spi message */
+
+struct pic32_sqi {
+ void __iomem *regs;
+ struct clk *sys_clk;
+ struct clk *base_clk; /* drives spi clock */
+ struct spi_master *master;
+ int irq;
+ struct completion xfer_done;
+ struct ring_desc *ring;
+ void *bd;
+ dma_addr_t bd_dma;
+ struct list_head bd_list_free; /* free */
+ struct list_head bd_list_used; /* allocated */
+ struct spi_device *cur_spi;
+ u32 cur_speed;
+ u8 cur_mode;
+};
+
+static inline void pic32_setbits(void __iomem *reg, u32 set)
+{
+ writel(readl(reg) | set, reg);
+}
+
+static inline void pic32_clrbits(void __iomem *reg, u32 clr)
+{
+ writel(readl(reg) & ~clr, reg);
+}
+
+static int pic32_sqi_set_clk_rate(struct pic32_sqi *sqi, u32 sck)
+{
+ u32 val, div;
+
+ /* div = base_clk / (2 * spi_clk) */
+ div = clk_get_rate(sqi->base_clk) / (2 * sck);
+ div &= PESQI_CLKDIV;
+
+ val = readl(sqi->regs + PESQI_CLK_CTRL_REG);
+ /* apply new divider */
+ val &= ~(PESQI_CLK_STABLE | (PESQI_CLKDIV << PESQI_CLKDIV_SHIFT));
+ val |= div << PESQI_CLKDIV_SHIFT;
+ writel(val, sqi->regs + PESQI_CLK_CTRL_REG);
+
+ /* wait for stability */
+ return readl_poll_timeout(sqi->regs + PESQI_CLK_CTRL_REG, val,
+ val & PESQI_CLK_STABLE, 1, 5000);
+}
+
+static inline void pic32_sqi_enable_int(struct pic32_sqi *sqi)
+{
+ u32 mask = PESQI_DMAERR | PESQI_BDDONE | PESQI_PKTCOMP;
+
+ writel(mask, sqi->regs + PESQI_INT_ENABLE_REG);
+ /* INT_SIGEN works as interrupt-gate to INTR line */
+ writel(mask, sqi->regs + PESQI_INT_SIGEN_REG);
+}
+
+static inline void pic32_sqi_disable_int(struct pic32_sqi *sqi)
+{
+ writel(0, sqi->regs + PESQI_INT_ENABLE_REG);
+ writel(0, sqi->regs + PESQI_INT_SIGEN_REG);
+}
+
+static irqreturn_t pic32_sqi_isr(int irq, void *dev_id)
+{
+ struct pic32_sqi *sqi = dev_id;
+ u32 enable, status;
+
+ enable = readl(sqi->regs + PESQI_INT_ENABLE_REG);
+ status = readl(sqi->regs + PESQI_INT_STAT_REG);
+
+ /* check spurious interrupt */
+ if (!status)
+ return IRQ_NONE;
+
+ if (status & PESQI_DMAERR) {
+ enable = 0;
+ goto irq_done;
+ }
+
+ if (status & PESQI_TXTHR)
+ enable &= ~(PESQI_TXTHR | PESQI_TXFULL | PESQI_TXEMPTY);
+
+ if (status & PESQI_RXTHR)
+ enable &= ~(PESQI_RXTHR | PESQI_RXFULL | PESQI_RXEMPTY);
+
+ if (status & PESQI_BDDONE)
+ enable &= ~PESQI_BDDONE;
+
+ /* packet processing completed */
+ if (status & PESQI_PKTCOMP) {
+ /* mask all interrupts */
+ enable = 0;
+ /* complete trasaction */
+ complete(&sqi->xfer_done);
+ }
+
+irq_done:
+ /* interrupts are sticky, so mask when handled */
+ writel(enable, sqi->regs + PESQI_INT_ENABLE_REG);
+
+ return IRQ_HANDLED;
+}
+
+static struct ring_desc *ring_desc_get(struct pic32_sqi *sqi)
+{
+ struct ring_desc *rdesc;
+
+ if (list_empty(&sqi->bd_list_free))
+ return NULL;
+
+ rdesc = list_first_entry(&sqi->bd_list_free, struct ring_desc, list);
+ list_del(&rdesc->list);
+ list_add_tail(&rdesc->list, &sqi->bd_list_used);
+ return rdesc;
+}
+
+static void ring_desc_put(struct pic32_sqi *sqi, struct ring_desc *rdesc)
+{
+ list_del(&rdesc->list);
+ list_add(&rdesc->list, &sqi->bd_list_free);
+}
+
+static int pic32_sqi_one_transfer(struct pic32_sqi *sqi,
+ struct spi_message *mesg,
+ struct spi_transfer *xfer)
+{
+ struct spi_device *spi = mesg->spi;
+ struct scatterlist *sg, *sgl;
+ struct ring_desc *rdesc;
+ struct buf_desc *bd;
+ int nents, i;
+ u32 bd_ctrl;
+ u32 nbits;
+
+ /* Device selection */
+ bd_ctrl = spi->chip_select << BD_DEVSEL_SHIFT;
+
+ /* half-duplex: select transfer buffer, direction and lane */
+ if (xfer->rx_buf) {
+ bd_ctrl |= BD_DATA_RECV;
+ nbits = xfer->rx_nbits;
+ sgl = xfer->rx_sg.sgl;
+ nents = xfer->rx_sg.nents;
+ } else {
+ nbits = xfer->tx_nbits;
+ sgl = xfer->tx_sg.sgl;
+ nents = xfer->tx_sg.nents;
+ }
+
+ if (nbits & SPI_NBITS_QUAD)
+ bd_ctrl |= BD_QUAD;
+ else if (nbits & SPI_NBITS_DUAL)
+ bd_ctrl |= BD_DUAL;
+
+ /* LSB first */
+ if (spi->mode & SPI_LSB_FIRST)
+ bd_ctrl |= BD_LSBF;
+
+ /* ownership to hardware */
+ bd_ctrl |= BD_EN;
+
+ for_each_sg(sgl, sg, nents, i) {
+ /* get ring descriptor */
+ rdesc = ring_desc_get(sqi);
+ if (!rdesc)
+ break;
+
+ bd = rdesc->bd;
+
+ /* BD CTRL: length */
+ rdesc->xfer_len = sg_dma_len(sg);
+ bd->bd_ctrl = bd_ctrl;
+ bd->bd_ctrl |= rdesc->xfer_len;
+
+ /* BD STAT */
+ bd->bd_status = 0;
+
+ /* BD BUFFER ADDRESS */
+ bd->bd_addr = sg->dma_address;
+ }
+
+ return 0;
+}
+
+static int pic32_sqi_prepare_hardware(struct spi_master *master)
+{
+ struct pic32_sqi *sqi = spi_master_get_devdata(master);
+
+ /* enable spi interface */
+ pic32_setbits(sqi->regs + PESQI_CONF_REG, PESQI_EN);
+ /* enable spi clk */
+ pic32_setbits(sqi->regs + PESQI_CLK_CTRL_REG, PESQI_CLK_EN);
+
+ return 0;
+}
+
+static bool pic32_sqi_can_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *x)
+{
+ /* Do DMA irrespective of transfer size */
+ return true;
+}
+
+static int pic32_sqi_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct spi_device *spi = msg->spi;
+ struct ring_desc *rdesc, *next;
+ struct spi_transfer *xfer;
+ struct pic32_sqi *sqi;
+ int ret = 0, mode;
+ u32 val;
+
+ sqi = spi_master_get_devdata(master);
+
+ reinit_completion(&sqi->xfer_done);
+ msg->actual_length = 0;
+
+ /* We can't handle spi_transfer specific "speed_hz", "bits_per_word"
+ * and "delay_usecs". But spi_device specific speed and mode change
+ * can be handled at best during spi chip-select switch.
+ */
+ if (sqi->cur_spi != spi) {
+ /* set spi speed */
+ if (sqi->cur_speed != spi->max_speed_hz) {
+ sqi->cur_speed = spi->max_speed_hz;
+ ret = pic32_sqi_set_clk_rate(sqi, spi->max_speed_hz);
+ if (ret)
+ dev_warn(&spi->dev, "set_clk, %d\n", ret);
+ }
+
+ /* set spi mode */
+ mode = spi->mode & (SPI_MODE_3 | SPI_LSB_FIRST);
+ if (sqi->cur_mode != mode) {
+ val = readl(sqi->regs + PESQI_CONF_REG);
+ val &= ~(PESQI_CPOL | PESQI_CPHA | PESQI_LSBF);
+ if (mode & SPI_CPOL)
+ val |= PESQI_CPOL;
+ if (mode & SPI_LSB_FIRST)
+ val |= PESQI_LSBF;
+ val |= PESQI_CPHA;
+ writel(val, sqi->regs + PESQI_CONF_REG);
+
+ sqi->cur_mode = mode;
+ }
+ sqi->cur_spi = spi;
+ }
+
+ /* prepare hardware desc-list(BD) for transfer(s) */
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ ret = pic32_sqi_one_transfer(sqi, msg, xfer);
+ if (ret) {
+ dev_err(&spi->dev, "xfer %p err\n", xfer);
+ goto xfer_out;
+ }
+ }
+
+ /* BDs are prepared and chained. Now mark LAST_BD, CS_DEASSERT at last
+ * element of the list.
+ */
+ rdesc = list_last_entry(&sqi->bd_list_used, struct ring_desc, list);
+ rdesc->bd->bd_ctrl |= BD_LAST | BD_CS_DEASSERT |
+ BD_LIFM | BD_PKT_INT_EN;
+
+ /* set base address BD list for DMA engine */
+ rdesc = list_first_entry(&sqi->bd_list_used, struct ring_desc, list);
+ writel(rdesc->bd_dma, sqi->regs + PESQI_BD_BASE_ADDR_REG);
+
+ /* enable interrupt */
+ pic32_sqi_enable_int(sqi);
+
+ /* enable DMA engine */
+ val = PESQI_DMA_EN | PESQI_POLL_EN | PESQI_BDP_START;
+ writel(val, sqi->regs + PESQI_BD_CTRL_REG);
+
+ /* wait for xfer completion */
+ ret = wait_for_completion_timeout(&sqi->xfer_done, 5 * HZ);
+ if (ret <= 0) {
+ dev_err(&sqi->master->dev, "wait timedout/interrupted\n");
+ ret = -EIO;
+ msg->status = ret;
+ } else {
+ /* success */
+ msg->status = 0;
+ ret = 0;
+ }
+
+ /* disable DMA */
+ writel(0, sqi->regs + PESQI_BD_CTRL_REG);
+
+ pic32_sqi_disable_int(sqi);
+
+xfer_out:
+ list_for_each_entry_safe_reverse(rdesc, next,
+ &sqi->bd_list_used, list) {
+ /* Update total byte transferred */
+ msg->actual_length += rdesc->xfer_len;
+ /* release ring descr */
+ ring_desc_put(sqi, rdesc);
+ }
+ spi_finalize_current_message(spi->master);
+
+ return ret;
+}
+
+static int pic32_sqi_unprepare_hardware(struct spi_master *master)
+{
+ struct pic32_sqi *sqi = spi_master_get_devdata(master);
+
+ /* disable clk */
+ pic32_clrbits(sqi->regs + PESQI_CLK_CTRL_REG, PESQI_CLK_EN);
+ /* disable spi */
+ pic32_clrbits(sqi->regs + PESQI_CONF_REG, PESQI_EN);
+
+ return 0;
+}
+
+/* This may be called twice for each spi dev */
+static int pic32_sqi_setup(struct spi_device *spi)
+{
+ struct pic32_sqi *sqi;
+
+ if (spi_get_ctldata(spi)) {
+ dev_err(&spi->dev, "is already associated\n");
+ return -EBUSY;
+ }
+
+ /* check word size */
+ if (!spi->bits_per_word) {
+ dev_err(&spi->dev, "No bits_per_word defined\n");
+ return -EINVAL;
+ }
+
+ /* check maximum SPI clk rate */
+ if (!spi->max_speed_hz) {
+ dev_err(&spi->dev, "No max speed HZ parameter\n");
+ return -EINVAL;
+ }
+
+ if (spi->master->max_speed_hz < spi->max_speed_hz) {
+ dev_err(&spi->dev, "max speed %u HZ is too high\n",
+ spi->max_speed_hz);
+ return -EINVAL;
+ }
+
+ sqi = spi_master_get_devdata(spi->master);
+ spi_set_ctldata(spi, (void *)sqi);
+
+ return 0;
+}
+
+static void pic32_sqi_cleanup(struct spi_device *spi)
+{
+ spi_set_ctldata(spi, (void *)NULL);
+}
+
+static int ring_desc_ring_alloc(struct pic32_sqi *sqi)
+{
+ struct ring_desc *rdesc;
+ struct buf_desc *bd;
+ int i;
+
+ /* allocate coherent DMAable memory for hardware buffer descriptors. */
+ sqi->bd = dma_zalloc_coherent(&sqi->master->dev,
+ sizeof(*bd) * PESQI_BD_COUNT,
+ &sqi->bd_dma, GFP_DMA32);
+ if (!sqi->bd) {
+ dev_err(&sqi->master->dev, "failed allocating dma buffer\n");
+ return -ENOMEM;
+ }
+
+ /* allocate software ring descriptors */
+ sqi->ring = kcalloc(PESQI_BD_COUNT, sizeof(*rdesc), GFP_KERNEL);
+ if (!sqi->ring) {
+ dma_free_coherent(&sqi->master->dev,
+ sizeof(*bd) * PESQI_BD_COUNT,
+ sqi->bd, sqi->bd_dma);
+ return -ENOMEM;
+ }
+
+ bd = (struct buf_desc *)sqi->bd;
+
+ INIT_LIST_HEAD(&sqi->bd_list_free);
+ INIT_LIST_HEAD(&sqi->bd_list_used);
+
+ /* initialize ring-desc */
+ for (i = 0, rdesc = sqi->ring; i < PESQI_BD_COUNT; i++, rdesc++) {
+ INIT_LIST_HEAD(&rdesc->list);
+ rdesc->bd = &bd[i];
+ rdesc->bd_dma = sqi->bd_dma + (void *)&bd[i] - (void *)bd;
+ list_add_tail(&rdesc->list, &sqi->bd_list_free);
+ }
+
+ /* Prepare BD: chain to next BD(s) */
+ for (i = 0, rdesc = sqi->ring; i < PESQI_BD_COUNT; i++)
+ bd[i].bd_nextp = rdesc[i + 1].bd_dma;
+ bd[PESQI_BD_COUNT - 1].bd_nextp = 0;
+
+ return 0;
+}
+
+static void ring_desc_ring_free(struct pic32_sqi *sqi)
+{
+ dma_free_coherent(&sqi->master->dev,
+ sizeof(struct buf_desc) * PESQI_BD_COUNT,
+ sqi->bd, sqi->bd_dma);
+ kfree(sqi->ring);
+}
+
+static void pic32_sqi_hw_init(struct pic32_sqi *sqi)
+{
+ unsigned long flags;
+ u32 val;
+
+ /* Soft-reset of PESQI controller triggers interrupt.
+ * We are not yet ready to handle them so disable CPU
+ * interrupt for the time being.
+ */
+ local_irq_save(flags);
+
+ /* assert soft-reset */
+ writel(PESQI_SOFT_RESET, sqi->regs + PESQI_CONF_REG);
+
+ /* wait until clear */
+ readl_poll_timeout_atomic(sqi->regs + PESQI_CONF_REG, val,
+ !(val & PESQI_SOFT_RESET), 1, 5000);
+
+ /* disable all interrupts */
+ pic32_sqi_disable_int(sqi);
+
+ /* Now it is safe to enable back CPU interrupt */
+ local_irq_restore(flags);
+
+ /* tx and rx fifo interrupt threshold */
+ val = readl(sqi->regs + PESQI_CMD_THRES_REG);
+ val &= ~(PESQI_TXTHR_MASK << PESQI_TXTHR_SHIFT);
+ val &= ~(PESQI_RXTHR_MASK << PESQI_RXTHR_SHIFT);
+ val |= (1U << PESQI_TXTHR_SHIFT) | (1U << PESQI_RXTHR_SHIFT);
+ writel(val, sqi->regs + PESQI_CMD_THRES_REG);
+
+ val = readl(sqi->regs + PESQI_INT_THRES_REG);
+ val &= ~(PESQI_TXTHR_MASK << PESQI_TXTHR_SHIFT);
+ val &= ~(PESQI_RXTHR_MASK << PESQI_RXTHR_SHIFT);
+ val |= (1U << PESQI_TXTHR_SHIFT) | (1U << PESQI_RXTHR_SHIFT);
+ writel(val, sqi->regs + PESQI_INT_THRES_REG);
+
+ /* default configuration */
+ val = readl(sqi->regs + PESQI_CONF_REG);
+
+ /* set mode: DMA */
+ val &= ~PESQI_MODE;
+ val |= PESQI_MODE_DMA << PESQI_MODE_SHIFT;
+ writel(val, sqi->regs + PESQI_CONF_REG);
+
+ /* DATAEN - SQIID0-ID3 */
+ val |= PESQI_QUAD_LANE << PESQI_LANES_SHIFT;
+
+ /* burst/INCR4 enable */
+ val |= PESQI_BURST_EN;
+
+ /* CSEN - all CS */
+ val |= 3U << PESQI_CSEN_SHIFT;
+ writel(val, sqi->regs + PESQI_CONF_REG);
+
+ /* write poll count */
+ writel(0, sqi->regs + PESQI_BD_POLL_CTRL_REG);
+
+ sqi->cur_speed = 0;
+ sqi->cur_mode = -1;
+}
+
+static int pic32_sqi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct pic32_sqi *sqi;
+ struct resource *reg;
+ int ret;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*sqi));
+ if (!master)
+ return -ENOMEM;
+
+ sqi = spi_master_get_devdata(master);
+ sqi->master = master;
+
+ reg = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sqi->regs = devm_ioremap_resource(&pdev->dev, reg);
+ if (IS_ERR(sqi->regs)) {
+ ret = PTR_ERR(sqi->regs);
+ goto err_free_master;
+ }
+
+ /* irq */
+ sqi->irq = platform_get_irq(pdev, 0);
+ if (sqi->irq < 0) {
+ dev_err(&pdev->dev, "no irq found\n");
+ ret = sqi->irq;
+ goto err_free_master;
+ }
+
+ /* clocks */
+ sqi->sys_clk = devm_clk_get(&pdev->dev, "reg_ck");
+ if (IS_ERR(sqi->sys_clk)) {
+ ret = PTR_ERR(sqi->sys_clk);
+ dev_err(&pdev->dev, "no sys_clk ?\n");
+ goto err_free_master;
+ }
+
+ sqi->base_clk = devm_clk_get(&pdev->dev, "spi_ck");
+ if (IS_ERR(sqi->base_clk)) {
+ ret = PTR_ERR(sqi->base_clk);
+ dev_err(&pdev->dev, "no base clk ?\n");
+ goto err_free_master;
+ }
+
+ ret = clk_prepare_enable(sqi->sys_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "sys clk enable failed\n");
+ goto err_free_master;
+ }
+
+ ret = clk_prepare_enable(sqi->base_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "base clk enable failed\n");
+ clk_disable_unprepare(sqi->sys_clk);
+ goto err_free_master;
+ }
+
+ init_completion(&sqi->xfer_done);
+
+ /* initialize hardware */
+ pic32_sqi_hw_init(sqi);
+
+ /* allocate buffers & descriptors */
+ ret = ring_desc_ring_alloc(sqi);
+ if (ret) {
+ dev_err(&pdev->dev, "ring alloc failed\n");
+ goto err_disable_clk;
+ }
+
+ /* install irq handlers */
+ ret = request_irq(sqi->irq, pic32_sqi_isr, 0,
+ dev_name(&pdev->dev), sqi);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "request_irq(%d), failed\n", sqi->irq);
+ goto err_free_ring;
+ }
+
+ /* register master */
+ master->num_chipselect = 2;
+ master->max_speed_hz = clk_get_rate(sqi->base_clk);
+ master->dma_alignment = 32;
+ master->max_dma_len = PESQI_BD_BUF_LEN_MAX;
+ master->dev.of_node = of_node_get(pdev->dev.of_node);
+ master->mode_bits = SPI_MODE_3 | SPI_MODE_0 | SPI_TX_DUAL |
+ SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD;
+ master->flags = SPI_MASTER_HALF_DUPLEX;
+ master->setup = pic32_sqi_setup;
+ master->cleanup = pic32_sqi_cleanup;
+ master->can_dma = pic32_sqi_can_dma;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
+ master->transfer_one_message = pic32_sqi_one_message;
+ master->prepare_transfer_hardware = pic32_sqi_prepare_hardware;
+ master->unprepare_transfer_hardware = pic32_sqi_unprepare_hardware;
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret) {
+ dev_err(&master->dev, "failed registering spi master\n");
+ free_irq(sqi->irq, sqi);
+ goto err_free_ring;
+ }
+
+ platform_set_drvdata(pdev, sqi);
+
+ return 0;
+
+err_free_ring:
+ ring_desc_ring_free(sqi);
+
+err_disable_clk:
+ clk_disable_unprepare(sqi->base_clk);
+ clk_disable_unprepare(sqi->sys_clk);
+
+err_free_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int pic32_sqi_remove(struct platform_device *pdev)
+{
+ struct pic32_sqi *sqi = platform_get_drvdata(pdev);
+
+ /* release resources */
+ free_irq(sqi->irq, sqi);
+ ring_desc_ring_free(sqi);
+
+ /* disable clk */
+ clk_disable_unprepare(sqi->base_clk);
+ clk_disable_unprepare(sqi->sys_clk);
+
+ return 0;
+}
+
+static const struct of_device_id pic32_sqi_of_ids[] = {
+ {.compatible = "microchip,pic32mzda-sqi",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, pic32_sqi_of_ids);
+
+static struct platform_driver pic32_sqi_driver = {
+ .driver = {
+ .name = "sqi-pic32",
+ .of_match_table = of_match_ptr(pic32_sqi_of_ids),
+ },
+ .probe = pic32_sqi_probe,
+ .remove = pic32_sqi_remove,
+};
+
+module_platform_driver(pic32_sqi_driver);
+
+MODULE_AUTHOR("Purna Chandra Mandal <[email protected]>");
+MODULE_DESCRIPTION("Microchip SPI driver for PIC32 SQI controller.");
+MODULE_LICENSE("GPL v2");
--
1.8.3.1

2016-04-18 19:04:34

by Mark Brown

[permalink] [raw]
Subject: Applied "spi: pic32-sqi: add binding document for PIC32 Quad-SPI driver." to the spi tree

The patch

spi: pic32-sqi: add binding document for PIC32 Quad-SPI driver.

has been applied to the spi tree at

git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi.git

All being well this means that it will be integrated into the linux-next
tree (usually sometime in the next 24 hours) and sent to Linus during
the next merge window (or sooner if it is a bug fix), however if
problems are discovered then the patch may be dropped or reverted.

You may get further e-mails resulting from automated or manual testing
and review of the tree, please engage with people reporting problems and
send followup patches addressing any issues that are reported if needed.

If any updates are required or you are submitting further changes they
should be sent as incremental updates against current git, existing
patches will not be replaced.

Please add any relevant lists and maintainers to the CCs when replying
to this mail.

Thanks,
Mark

>From 0a4afaae989c47fd93b73cc83d2c4a46b55aa1b7 Mon Sep 17 00:00:00 2001
From: Purna Chandra Mandal <[email protected]>
Date: Fri, 15 Apr 2016 16:57:18 +0530
Subject: [PATCH] spi: pic32-sqi: add binding document for PIC32 Quad-SPI
driver.

Document Device tree bindings for the quad SPI peripheral
found on Microchip PIC32 class devices.

Signed-off-by: Purna Chandra Mandal <[email protected]>
Acked-by: Rob Herring <[email protected]>

Cc: Kumar Gala <[email protected]>
Cc: Ian Campbell <[email protected]>
Cc: Rob Herring <[email protected]>
Cc: Pawel Moll <[email protected]>
Cc: Mark Rutland <[email protected]>
Cc: Mark Brown <[email protected]>
Signed-off-by: Mark Brown <[email protected]>
---
Documentation/devicetree/bindings/spi/sqi-pic32.txt | 18 ++++++++++++++++++
1 file changed, 18 insertions(+)
create mode 100644 Documentation/devicetree/bindings/spi/sqi-pic32.txt

diff --git a/Documentation/devicetree/bindings/spi/sqi-pic32.txt b/Documentation/devicetree/bindings/spi/sqi-pic32.txt
new file mode 100644
index 000000000000..c82d021bce50
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/sqi-pic32.txt
@@ -0,0 +1,18 @@
+Microchip PIC32 Quad SPI controller
+-----------------------------------
+Required properties:
+- compatible: Should be "microchip,pic32mzda-sqi".
+- reg: Address and length of SQI controller register space.
+- interrupts: Should contain SQI interrupt.
+- clocks: Should contain phandle of two clocks in sequence, one that drives
+ clock on SPI bus and other that drives SQI controller.
+- clock-names: Should be "spi_ck" and "reg_ck" in order.
+
+Example:
+ sqi1: spi@1f8e2000 {
+ compatible = "microchip,pic32mzda-sqi";
+ reg = <0x1f8e2000 0x200>;
+ clocks = <&rootclk REF2CLK>, <&rootclk PB5CLK>;
+ clock-names = "spi_ck", "reg_ck";
+ interrupts = <169 IRQ_TYPE_LEVEL_HIGH>;
+ };
--
2.8.0.rc3

2016-04-18 19:04:22

by Mark Brown

[permalink] [raw]
Subject: Applied "spi: pic32-sqi: add SPI driver for PIC32 SQI controller." to the spi tree

The patch

spi: pic32-sqi: add SPI driver for PIC32 SQI controller.

has been applied to the spi tree at

git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi.git

All being well this means that it will be integrated into the linux-next
tree (usually sometime in the next 24 hours) and sent to Linus during
the next merge window (or sooner if it is a bug fix), however if
problems are discovered then the patch may be dropped or reverted.

You may get further e-mails resulting from automated or manual testing
and review of the tree, please engage with people reporting problems and
send followup patches addressing any issues that are reported if needed.

If any updates are required or you are submitting further changes they
should be sent as incremental updates against current git, existing
patches will not be replaced.

Please add any relevant lists and maintainers to the CCs when replying
to this mail.

Thanks,
Mark

>From 3270ac230f660843a7f7d631746ee2c8ee63f347 Mon Sep 17 00:00:00 2001
From: Purna Chandra Mandal <[email protected]>
Date: Fri, 15 Apr 2016 16:57:19 +0530
Subject: [PATCH] spi: pic32-sqi: add SPI driver for PIC32 SQI controller.

This driver implements SPI master interface for Quad SPI
controller, specifically for accessing quad SPI flash.
It uses descriptor-based DMA transfer mode and supports
half-duplex communication for single, dual and quad SPI
transactions.

Signed-off-by: Purna Chandra Mandal <[email protected]>
Cc: Mark Brown <[email protected]>
Signed-off-by: Mark Brown <[email protected]>
---
drivers/spi/Kconfig | 6 +
drivers/spi/Makefile | 1 +
drivers/spi/spi-pic32-sqi.c | 768 ++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 775 insertions(+)
create mode 100644 drivers/spi/spi-pic32-sqi.c

diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 8a8ff5051c64..281ed5da4832 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -442,6 +442,12 @@ config SPI_PIC32
help
SPI driver for Microchip PIC32 SPI master controller.

+config SPI_PIC32_SQI
+ tristate "Microchip PIC32 Quad SPI driver"
+ depends on MACH_PIC32 || COMPILE_TEST
+ help
+ SPI driver for PIC32 Quad SPI controller.
+
config SPI_PL022
tristate "ARM AMBA PL022 SSP controller"
depends on ARM_AMBA
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 06019ed11fac..3c74d003535b 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -63,6 +63,7 @@ obj-$(CONFIG_SPI_OMAP24XX) += spi-omap2-mcspi.o
obj-$(CONFIG_SPI_TI_QSPI) += spi-ti-qspi.o
obj-$(CONFIG_SPI_ORION) += spi-orion.o
obj-$(CONFIG_SPI_PIC32) += spi-pic32.o
+obj-$(CONFIG_SPI_PIC32_SQI) += spi-pic32-sqi.o
obj-$(CONFIG_SPI_PL022) += spi-pl022.o
obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o
spi-pxa2xx-platform-objs := spi-pxa2xx.o spi-pxa2xx-dma.o
diff --git a/drivers/spi/spi-pic32-sqi.c b/drivers/spi/spi-pic32-sqi.c
new file mode 100644
index 000000000000..b21534782ada
--- /dev/null
+++ b/drivers/spi/spi-pic32-sqi.c
@@ -0,0 +1,768 @@
+/*
+ * PIC32 Quad SPI controller driver.
+ *
+ * Purna Chandra Mandal <[email protected]>
+ * Copyright (c) 2016, Microchip Technology Inc.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+
+/* SQI registers */
+#define PESQI_XIP_CONF1_REG 0x00
+#define PESQI_XIP_CONF2_REG 0x04
+#define PESQI_CONF_REG 0x08
+#define PESQI_CTRL_REG 0x0C
+#define PESQI_CLK_CTRL_REG 0x10
+#define PESQI_CMD_THRES_REG 0x14
+#define PESQI_INT_THRES_REG 0x18
+#define PESQI_INT_ENABLE_REG 0x1C
+#define PESQI_INT_STAT_REG 0x20
+#define PESQI_TX_DATA_REG 0x24
+#define PESQI_RX_DATA_REG 0x28
+#define PESQI_STAT1_REG 0x2C
+#define PESQI_STAT2_REG 0x30
+#define PESQI_BD_CTRL_REG 0x34
+#define PESQI_BD_CUR_ADDR_REG 0x38
+#define PESQI_BD_BASE_ADDR_REG 0x40
+#define PESQI_BD_STAT_REG 0x44
+#define PESQI_BD_POLL_CTRL_REG 0x48
+#define PESQI_BD_TX_DMA_STAT_REG 0x4C
+#define PESQI_BD_RX_DMA_STAT_REG 0x50
+#define PESQI_THRES_REG 0x54
+#define PESQI_INT_SIGEN_REG 0x58
+
+/* PESQI_CONF_REG fields */
+#define PESQI_MODE 0x7
+#define PESQI_MODE_BOOT 0
+#define PESQI_MODE_PIO 1
+#define PESQI_MODE_DMA 2
+#define PESQI_MODE_XIP 3
+#define PESQI_MODE_SHIFT 0
+#define PESQI_CPHA BIT(3)
+#define PESQI_CPOL BIT(4)
+#define PESQI_LSBF BIT(5)
+#define PESQI_RXLATCH BIT(7)
+#define PESQI_SERMODE BIT(8)
+#define PESQI_WP_EN BIT(9)
+#define PESQI_HOLD_EN BIT(10)
+#define PESQI_BURST_EN BIT(12)
+#define PESQI_CS_CTRL_HW BIT(15)
+#define PESQI_SOFT_RESET BIT(16)
+#define PESQI_LANES_SHIFT 20
+#define PESQI_SINGLE_LANE 0
+#define PESQI_DUAL_LANE 1
+#define PESQI_QUAD_LANE 2
+#define PESQI_CSEN_SHIFT 24
+#define PESQI_EN BIT(23)
+
+/* PESQI_CLK_CTRL_REG fields */
+#define PESQI_CLK_EN BIT(0)
+#define PESQI_CLK_STABLE BIT(1)
+#define PESQI_CLKDIV_SHIFT 8
+#define PESQI_CLKDIV 0xff
+
+/* PESQI_INT_THR/CMD_THR_REG */
+#define PESQI_TXTHR_MASK 0x1f
+#define PESQI_TXTHR_SHIFT 8
+#define PESQI_RXTHR_MASK 0x1f
+#define PESQI_RXTHR_SHIFT 0
+
+/* PESQI_INT_EN/INT_STAT/INT_SIG_EN_REG */
+#define PESQI_TXEMPTY BIT(0)
+#define PESQI_TXFULL BIT(1)
+#define PESQI_TXTHR BIT(2)
+#define PESQI_RXEMPTY BIT(3)
+#define PESQI_RXFULL BIT(4)
+#define PESQI_RXTHR BIT(5)
+#define PESQI_BDDONE BIT(9) /* BD processing complete */
+#define PESQI_PKTCOMP BIT(10) /* packet processing complete */
+#define PESQI_DMAERR BIT(11) /* error */
+
+/* PESQI_BD_CTRL_REG */
+#define PESQI_DMA_EN BIT(0) /* enable DMA engine */
+#define PESQI_POLL_EN BIT(1) /* enable polling */
+#define PESQI_BDP_START BIT(2) /* start BD processor */
+
+/* PESQI controller buffer descriptor */
+struct buf_desc {
+ u32 bd_ctrl; /* control */
+ u32 bd_status; /* reserved */
+ u32 bd_addr; /* DMA buffer addr */
+ u32 bd_nextp; /* next item in chain */
+};
+
+/* bd_ctrl */
+#define BD_BUFLEN 0x1ff
+#define BD_CBD_INT_EN BIT(16) /* Current BD is processed */
+#define BD_PKT_INT_EN BIT(17) /* All BDs of PKT processed */
+#define BD_LIFM BIT(18) /* last data of pkt */
+#define BD_LAST BIT(19) /* end of list */
+#define BD_DATA_RECV BIT(20) /* receive data */
+#define BD_DDR BIT(21) /* DDR mode */
+#define BD_DUAL BIT(22) /* Dual SPI */
+#define BD_QUAD BIT(23) /* Quad SPI */
+#define BD_LSBF BIT(25) /* LSB First */
+#define BD_STAT_CHECK BIT(27) /* Status poll */
+#define BD_DEVSEL_SHIFT 28 /* CS */
+#define BD_CS_DEASSERT BIT(30) /* de-assert CS after current BD */
+#define BD_EN BIT(31) /* BD owned by H/W */
+
+/**
+ * struct ring_desc - Representation of SQI ring descriptor
+ * @list: list element to add to free or used list.
+ * @bd: PESQI controller buffer descriptor
+ * @bd_dma: DMA address of PESQI controller buffer descriptor
+ * @xfer_len: transfer length
+ */
+struct ring_desc {
+ struct list_head list;
+ struct buf_desc *bd;
+ dma_addr_t bd_dma;
+ u32 xfer_len;
+};
+
+/* Global constants */
+#define PESQI_BD_BUF_LEN_MAX 256
+#define PESQI_BD_COUNT 256 /* max 64KB data per spi message */
+
+struct pic32_sqi {
+ void __iomem *regs;
+ struct clk *sys_clk;
+ struct clk *base_clk; /* drives spi clock */
+ struct spi_master *master;
+ int irq;
+ struct completion xfer_done;
+ struct ring_desc *ring;
+ void *bd;
+ dma_addr_t bd_dma;
+ struct list_head bd_list_free; /* free */
+ struct list_head bd_list_used; /* allocated */
+ struct spi_device *cur_spi;
+ u32 cur_speed;
+ u8 cur_mode;
+};
+
+static inline void pic32_setbits(void __iomem *reg, u32 set)
+{
+ writel(readl(reg) | set, reg);
+}
+
+static inline void pic32_clrbits(void __iomem *reg, u32 clr)
+{
+ writel(readl(reg) & ~clr, reg);
+}
+
+static int pic32_sqi_set_clk_rate(struct pic32_sqi *sqi, u32 sck)
+{
+ u32 val, div;
+
+ /* div = base_clk / (2 * spi_clk) */
+ div = clk_get_rate(sqi->base_clk) / (2 * sck);
+ div &= PESQI_CLKDIV;
+
+ val = readl(sqi->regs + PESQI_CLK_CTRL_REG);
+ /* apply new divider */
+ val &= ~(PESQI_CLK_STABLE | (PESQI_CLKDIV << PESQI_CLKDIV_SHIFT));
+ val |= div << PESQI_CLKDIV_SHIFT;
+ writel(val, sqi->regs + PESQI_CLK_CTRL_REG);
+
+ /* wait for stability */
+ return readl_poll_timeout(sqi->regs + PESQI_CLK_CTRL_REG, val,
+ val & PESQI_CLK_STABLE, 1, 5000);
+}
+
+static inline void pic32_sqi_enable_int(struct pic32_sqi *sqi)
+{
+ u32 mask = PESQI_DMAERR | PESQI_BDDONE | PESQI_PKTCOMP;
+
+ writel(mask, sqi->regs + PESQI_INT_ENABLE_REG);
+ /* INT_SIGEN works as interrupt-gate to INTR line */
+ writel(mask, sqi->regs + PESQI_INT_SIGEN_REG);
+}
+
+static inline void pic32_sqi_disable_int(struct pic32_sqi *sqi)
+{
+ writel(0, sqi->regs + PESQI_INT_ENABLE_REG);
+ writel(0, sqi->regs + PESQI_INT_SIGEN_REG);
+}
+
+static irqreturn_t pic32_sqi_isr(int irq, void *dev_id)
+{
+ struct pic32_sqi *sqi = dev_id;
+ u32 enable, status;
+
+ enable = readl(sqi->regs + PESQI_INT_ENABLE_REG);
+ status = readl(sqi->regs + PESQI_INT_STAT_REG);
+
+ /* check spurious interrupt */
+ if (!status)
+ return IRQ_NONE;
+
+ if (status & PESQI_DMAERR) {
+ enable = 0;
+ goto irq_done;
+ }
+
+ if (status & PESQI_TXTHR)
+ enable &= ~(PESQI_TXTHR | PESQI_TXFULL | PESQI_TXEMPTY);
+
+ if (status & PESQI_RXTHR)
+ enable &= ~(PESQI_RXTHR | PESQI_RXFULL | PESQI_RXEMPTY);
+
+ if (status & PESQI_BDDONE)
+ enable &= ~PESQI_BDDONE;
+
+ /* packet processing completed */
+ if (status & PESQI_PKTCOMP) {
+ /* mask all interrupts */
+ enable = 0;
+ /* complete trasaction */
+ complete(&sqi->xfer_done);
+ }
+
+irq_done:
+ /* interrupts are sticky, so mask when handled */
+ writel(enable, sqi->regs + PESQI_INT_ENABLE_REG);
+
+ return IRQ_HANDLED;
+}
+
+static struct ring_desc *ring_desc_get(struct pic32_sqi *sqi)
+{
+ struct ring_desc *rdesc;
+
+ if (list_empty(&sqi->bd_list_free))
+ return NULL;
+
+ rdesc = list_first_entry(&sqi->bd_list_free, struct ring_desc, list);
+ list_del(&rdesc->list);
+ list_add_tail(&rdesc->list, &sqi->bd_list_used);
+ return rdesc;
+}
+
+static void ring_desc_put(struct pic32_sqi *sqi, struct ring_desc *rdesc)
+{
+ list_del(&rdesc->list);
+ list_add(&rdesc->list, &sqi->bd_list_free);
+}
+
+static int pic32_sqi_one_transfer(struct pic32_sqi *sqi,
+ struct spi_message *mesg,
+ struct spi_transfer *xfer)
+{
+ struct spi_device *spi = mesg->spi;
+ struct scatterlist *sg, *sgl;
+ struct ring_desc *rdesc;
+ struct buf_desc *bd;
+ int nents, i;
+ u32 bd_ctrl;
+ u32 nbits;
+
+ /* Device selection */
+ bd_ctrl = spi->chip_select << BD_DEVSEL_SHIFT;
+
+ /* half-duplex: select transfer buffer, direction and lane */
+ if (xfer->rx_buf) {
+ bd_ctrl |= BD_DATA_RECV;
+ nbits = xfer->rx_nbits;
+ sgl = xfer->rx_sg.sgl;
+ nents = xfer->rx_sg.nents;
+ } else {
+ nbits = xfer->tx_nbits;
+ sgl = xfer->tx_sg.sgl;
+ nents = xfer->tx_sg.nents;
+ }
+
+ if (nbits & SPI_NBITS_QUAD)
+ bd_ctrl |= BD_QUAD;
+ else if (nbits & SPI_NBITS_DUAL)
+ bd_ctrl |= BD_DUAL;
+
+ /* LSB first */
+ if (spi->mode & SPI_LSB_FIRST)
+ bd_ctrl |= BD_LSBF;
+
+ /* ownership to hardware */
+ bd_ctrl |= BD_EN;
+
+ for_each_sg(sgl, sg, nents, i) {
+ /* get ring descriptor */
+ rdesc = ring_desc_get(sqi);
+ if (!rdesc)
+ break;
+
+ bd = rdesc->bd;
+
+ /* BD CTRL: length */
+ rdesc->xfer_len = sg_dma_len(sg);
+ bd->bd_ctrl = bd_ctrl;
+ bd->bd_ctrl |= rdesc->xfer_len;
+
+ /* BD STAT */
+ bd->bd_status = 0;
+
+ /* BD BUFFER ADDRESS */
+ bd->bd_addr = sg->dma_address;
+ }
+
+ return 0;
+}
+
+static int pic32_sqi_prepare_hardware(struct spi_master *master)
+{
+ struct pic32_sqi *sqi = spi_master_get_devdata(master);
+
+ /* enable spi interface */
+ pic32_setbits(sqi->regs + PESQI_CONF_REG, PESQI_EN);
+ /* enable spi clk */
+ pic32_setbits(sqi->regs + PESQI_CLK_CTRL_REG, PESQI_CLK_EN);
+
+ return 0;
+}
+
+static bool pic32_sqi_can_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *x)
+{
+ /* Do DMA irrespective of transfer size */
+ return true;
+}
+
+static int pic32_sqi_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct spi_device *spi = msg->spi;
+ struct ring_desc *rdesc, *next;
+ struct spi_transfer *xfer;
+ struct pic32_sqi *sqi;
+ int ret = 0, mode;
+ u32 val;
+
+ sqi = spi_master_get_devdata(master);
+
+ reinit_completion(&sqi->xfer_done);
+ msg->actual_length = 0;
+
+ /* We can't handle spi_transfer specific "speed_hz", "bits_per_word"
+ * and "delay_usecs". But spi_device specific speed and mode change
+ * can be handled at best during spi chip-select switch.
+ */
+ if (sqi->cur_spi != spi) {
+ /* set spi speed */
+ if (sqi->cur_speed != spi->max_speed_hz) {
+ sqi->cur_speed = spi->max_speed_hz;
+ ret = pic32_sqi_set_clk_rate(sqi, spi->max_speed_hz);
+ if (ret)
+ dev_warn(&spi->dev, "set_clk, %d\n", ret);
+ }
+
+ /* set spi mode */
+ mode = spi->mode & (SPI_MODE_3 | SPI_LSB_FIRST);
+ if (sqi->cur_mode != mode) {
+ val = readl(sqi->regs + PESQI_CONF_REG);
+ val &= ~(PESQI_CPOL | PESQI_CPHA | PESQI_LSBF);
+ if (mode & SPI_CPOL)
+ val |= PESQI_CPOL;
+ if (mode & SPI_LSB_FIRST)
+ val |= PESQI_LSBF;
+ val |= PESQI_CPHA;
+ writel(val, sqi->regs + PESQI_CONF_REG);
+
+ sqi->cur_mode = mode;
+ }
+ sqi->cur_spi = spi;
+ }
+
+ /* prepare hardware desc-list(BD) for transfer(s) */
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ ret = pic32_sqi_one_transfer(sqi, msg, xfer);
+ if (ret) {
+ dev_err(&spi->dev, "xfer %p err\n", xfer);
+ goto xfer_out;
+ }
+ }
+
+ /* BDs are prepared and chained. Now mark LAST_BD, CS_DEASSERT at last
+ * element of the list.
+ */
+ rdesc = list_last_entry(&sqi->bd_list_used, struct ring_desc, list);
+ rdesc->bd->bd_ctrl |= BD_LAST | BD_CS_DEASSERT |
+ BD_LIFM | BD_PKT_INT_EN;
+
+ /* set base address BD list for DMA engine */
+ rdesc = list_first_entry(&sqi->bd_list_used, struct ring_desc, list);
+ writel(rdesc->bd_dma, sqi->regs + PESQI_BD_BASE_ADDR_REG);
+
+ /* enable interrupt */
+ pic32_sqi_enable_int(sqi);
+
+ /* enable DMA engine */
+ val = PESQI_DMA_EN | PESQI_POLL_EN | PESQI_BDP_START;
+ writel(val, sqi->regs + PESQI_BD_CTRL_REG);
+
+ /* wait for xfer completion */
+ ret = wait_for_completion_timeout(&sqi->xfer_done, 5 * HZ);
+ if (ret <= 0) {
+ dev_err(&sqi->master->dev, "wait timedout/interrupted\n");
+ ret = -EIO;
+ msg->status = ret;
+ } else {
+ /* success */
+ msg->status = 0;
+ ret = 0;
+ }
+
+ /* disable DMA */
+ writel(0, sqi->regs + PESQI_BD_CTRL_REG);
+
+ pic32_sqi_disable_int(sqi);
+
+xfer_out:
+ list_for_each_entry_safe_reverse(rdesc, next,
+ &sqi->bd_list_used, list) {
+ /* Update total byte transferred */
+ msg->actual_length += rdesc->xfer_len;
+ /* release ring descr */
+ ring_desc_put(sqi, rdesc);
+ }
+ spi_finalize_current_message(spi->master);
+
+ return ret;
+}
+
+static int pic32_sqi_unprepare_hardware(struct spi_master *master)
+{
+ struct pic32_sqi *sqi = spi_master_get_devdata(master);
+
+ /* disable clk */
+ pic32_clrbits(sqi->regs + PESQI_CLK_CTRL_REG, PESQI_CLK_EN);
+ /* disable spi */
+ pic32_clrbits(sqi->regs + PESQI_CONF_REG, PESQI_EN);
+
+ return 0;
+}
+
+/* This may be called twice for each spi dev */
+static int pic32_sqi_setup(struct spi_device *spi)
+{
+ struct pic32_sqi *sqi;
+
+ if (spi_get_ctldata(spi)) {
+ dev_err(&spi->dev, "is already associated\n");
+ return -EBUSY;
+ }
+
+ /* check word size */
+ if (!spi->bits_per_word) {
+ dev_err(&spi->dev, "No bits_per_word defined\n");
+ return -EINVAL;
+ }
+
+ /* check maximum SPI clk rate */
+ if (!spi->max_speed_hz) {
+ dev_err(&spi->dev, "No max speed HZ parameter\n");
+ return -EINVAL;
+ }
+
+ if (spi->master->max_speed_hz < spi->max_speed_hz) {
+ dev_err(&spi->dev, "max speed %u HZ is too high\n",
+ spi->max_speed_hz);
+ return -EINVAL;
+ }
+
+ sqi = spi_master_get_devdata(spi->master);
+ spi_set_ctldata(spi, (void *)sqi);
+
+ return 0;
+}
+
+static void pic32_sqi_cleanup(struct spi_device *spi)
+{
+ spi_set_ctldata(spi, (void *)NULL);
+}
+
+static int ring_desc_ring_alloc(struct pic32_sqi *sqi)
+{
+ struct ring_desc *rdesc;
+ struct buf_desc *bd;
+ int i;
+
+ /* allocate coherent DMAable memory for hardware buffer descriptors. */
+ sqi->bd = dma_zalloc_coherent(&sqi->master->dev,
+ sizeof(*bd) * PESQI_BD_COUNT,
+ &sqi->bd_dma, GFP_DMA32);
+ if (!sqi->bd) {
+ dev_err(&sqi->master->dev, "failed allocating dma buffer\n");
+ return -ENOMEM;
+ }
+
+ /* allocate software ring descriptors */
+ sqi->ring = kcalloc(PESQI_BD_COUNT, sizeof(*rdesc), GFP_KERNEL);
+ if (!sqi->ring) {
+ dma_free_coherent(&sqi->master->dev,
+ sizeof(*bd) * PESQI_BD_COUNT,
+ sqi->bd, sqi->bd_dma);
+ return -ENOMEM;
+ }
+
+ bd = (struct buf_desc *)sqi->bd;
+
+ INIT_LIST_HEAD(&sqi->bd_list_free);
+ INIT_LIST_HEAD(&sqi->bd_list_used);
+
+ /* initialize ring-desc */
+ for (i = 0, rdesc = sqi->ring; i < PESQI_BD_COUNT; i++, rdesc++) {
+ INIT_LIST_HEAD(&rdesc->list);
+ rdesc->bd = &bd[i];
+ rdesc->bd_dma = sqi->bd_dma + (void *)&bd[i] - (void *)bd;
+ list_add_tail(&rdesc->list, &sqi->bd_list_free);
+ }
+
+ /* Prepare BD: chain to next BD(s) */
+ for (i = 0, rdesc = sqi->ring; i < PESQI_BD_COUNT; i++)
+ bd[i].bd_nextp = rdesc[i + 1].bd_dma;
+ bd[PESQI_BD_COUNT - 1].bd_nextp = 0;
+
+ return 0;
+}
+
+static void ring_desc_ring_free(struct pic32_sqi *sqi)
+{
+ dma_free_coherent(&sqi->master->dev,
+ sizeof(struct buf_desc) * PESQI_BD_COUNT,
+ sqi->bd, sqi->bd_dma);
+ kfree(sqi->ring);
+}
+
+static void pic32_sqi_hw_init(struct pic32_sqi *sqi)
+{
+ unsigned long flags;
+ u32 val;
+
+ /* Soft-reset of PESQI controller triggers interrupt.
+ * We are not yet ready to handle them so disable CPU
+ * interrupt for the time being.
+ */
+ local_irq_save(flags);
+
+ /* assert soft-reset */
+ writel(PESQI_SOFT_RESET, sqi->regs + PESQI_CONF_REG);
+
+ /* wait until clear */
+ readl_poll_timeout_atomic(sqi->regs + PESQI_CONF_REG, val,
+ !(val & PESQI_SOFT_RESET), 1, 5000);
+
+ /* disable all interrupts */
+ pic32_sqi_disable_int(sqi);
+
+ /* Now it is safe to enable back CPU interrupt */
+ local_irq_restore(flags);
+
+ /* tx and rx fifo interrupt threshold */
+ val = readl(sqi->regs + PESQI_CMD_THRES_REG);
+ val &= ~(PESQI_TXTHR_MASK << PESQI_TXTHR_SHIFT);
+ val &= ~(PESQI_RXTHR_MASK << PESQI_RXTHR_SHIFT);
+ val |= (1U << PESQI_TXTHR_SHIFT) | (1U << PESQI_RXTHR_SHIFT);
+ writel(val, sqi->regs + PESQI_CMD_THRES_REG);
+
+ val = readl(sqi->regs + PESQI_INT_THRES_REG);
+ val &= ~(PESQI_TXTHR_MASK << PESQI_TXTHR_SHIFT);
+ val &= ~(PESQI_RXTHR_MASK << PESQI_RXTHR_SHIFT);
+ val |= (1U << PESQI_TXTHR_SHIFT) | (1U << PESQI_RXTHR_SHIFT);
+ writel(val, sqi->regs + PESQI_INT_THRES_REG);
+
+ /* default configuration */
+ val = readl(sqi->regs + PESQI_CONF_REG);
+
+ /* set mode: DMA */
+ val &= ~PESQI_MODE;
+ val |= PESQI_MODE_DMA << PESQI_MODE_SHIFT;
+ writel(val, sqi->regs + PESQI_CONF_REG);
+
+ /* DATAEN - SQIID0-ID3 */
+ val |= PESQI_QUAD_LANE << PESQI_LANES_SHIFT;
+
+ /* burst/INCR4 enable */
+ val |= PESQI_BURST_EN;
+
+ /* CSEN - all CS */
+ val |= 3U << PESQI_CSEN_SHIFT;
+ writel(val, sqi->regs + PESQI_CONF_REG);
+
+ /* write poll count */
+ writel(0, sqi->regs + PESQI_BD_POLL_CTRL_REG);
+
+ sqi->cur_speed = 0;
+ sqi->cur_mode = -1;
+}
+
+static int pic32_sqi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct pic32_sqi *sqi;
+ struct resource *reg;
+ int ret;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*sqi));
+ if (!master)
+ return -ENOMEM;
+
+ sqi = spi_master_get_devdata(master);
+ sqi->master = master;
+
+ reg = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sqi->regs = devm_ioremap_resource(&pdev->dev, reg);
+ if (IS_ERR(sqi->regs)) {
+ ret = PTR_ERR(sqi->regs);
+ goto err_free_master;
+ }
+
+ /* irq */
+ sqi->irq = platform_get_irq(pdev, 0);
+ if (sqi->irq < 0) {
+ dev_err(&pdev->dev, "no irq found\n");
+ ret = sqi->irq;
+ goto err_free_master;
+ }
+
+ /* clocks */
+ sqi->sys_clk = devm_clk_get(&pdev->dev, "reg_ck");
+ if (IS_ERR(sqi->sys_clk)) {
+ ret = PTR_ERR(sqi->sys_clk);
+ dev_err(&pdev->dev, "no sys_clk ?\n");
+ goto err_free_master;
+ }
+
+ sqi->base_clk = devm_clk_get(&pdev->dev, "spi_ck");
+ if (IS_ERR(sqi->base_clk)) {
+ ret = PTR_ERR(sqi->base_clk);
+ dev_err(&pdev->dev, "no base clk ?\n");
+ goto err_free_master;
+ }
+
+ ret = clk_prepare_enable(sqi->sys_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "sys clk enable failed\n");
+ goto err_free_master;
+ }
+
+ ret = clk_prepare_enable(sqi->base_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "base clk enable failed\n");
+ clk_disable_unprepare(sqi->sys_clk);
+ goto err_free_master;
+ }
+
+ init_completion(&sqi->xfer_done);
+
+ /* initialize hardware */
+ pic32_sqi_hw_init(sqi);
+
+ /* allocate buffers & descriptors */
+ ret = ring_desc_ring_alloc(sqi);
+ if (ret) {
+ dev_err(&pdev->dev, "ring alloc failed\n");
+ goto err_disable_clk;
+ }
+
+ /* install irq handlers */
+ ret = request_irq(sqi->irq, pic32_sqi_isr, 0,
+ dev_name(&pdev->dev), sqi);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "request_irq(%d), failed\n", sqi->irq);
+ goto err_free_ring;
+ }
+
+ /* register master */
+ master->num_chipselect = 2;
+ master->max_speed_hz = clk_get_rate(sqi->base_clk);
+ master->dma_alignment = 32;
+ master->max_dma_len = PESQI_BD_BUF_LEN_MAX;
+ master->dev.of_node = of_node_get(pdev->dev.of_node);
+ master->mode_bits = SPI_MODE_3 | SPI_MODE_0 | SPI_TX_DUAL |
+ SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD;
+ master->flags = SPI_MASTER_HALF_DUPLEX;
+ master->setup = pic32_sqi_setup;
+ master->cleanup = pic32_sqi_cleanup;
+ master->can_dma = pic32_sqi_can_dma;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
+ master->transfer_one_message = pic32_sqi_one_message;
+ master->prepare_transfer_hardware = pic32_sqi_prepare_hardware;
+ master->unprepare_transfer_hardware = pic32_sqi_unprepare_hardware;
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret) {
+ dev_err(&master->dev, "failed registering spi master\n");
+ free_irq(sqi->irq, sqi);
+ goto err_free_ring;
+ }
+
+ platform_set_drvdata(pdev, sqi);
+
+ return 0;
+
+err_free_ring:
+ ring_desc_ring_free(sqi);
+
+err_disable_clk:
+ clk_disable_unprepare(sqi->base_clk);
+ clk_disable_unprepare(sqi->sys_clk);
+
+err_free_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int pic32_sqi_remove(struct platform_device *pdev)
+{
+ struct pic32_sqi *sqi = platform_get_drvdata(pdev);
+
+ /* release resources */
+ free_irq(sqi->irq, sqi);
+ ring_desc_ring_free(sqi);
+
+ /* disable clk */
+ clk_disable_unprepare(sqi->base_clk);
+ clk_disable_unprepare(sqi->sys_clk);
+
+ return 0;
+}
+
+static const struct of_device_id pic32_sqi_of_ids[] = {
+ {.compatible = "microchip,pic32mzda-sqi",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, pic32_sqi_of_ids);
+
+static struct platform_driver pic32_sqi_driver = {
+ .driver = {
+ .name = "sqi-pic32",
+ .of_match_table = of_match_ptr(pic32_sqi_of_ids),
+ },
+ .probe = pic32_sqi_probe,
+ .remove = pic32_sqi_remove,
+};
+
+module_platform_driver(pic32_sqi_driver);
+
+MODULE_AUTHOR("Purna Chandra Mandal <[email protected]>");
+MODULE_DESCRIPTION("Microchip SPI driver for PIC32 SQI controller.");
+MODULE_LICENSE("GPL v2");
--
2.8.0.rc3