2013-07-10 08:51:28

by Jonas Jensen

[permalink] [raw]
Subject: [PATCH] dmaengine: Add MOXA ART DMA engine driver

Add dmaengine driver for MOXA ART SoCs based on virt_dma.

Signed-off-by: Jonas Jensen <[email protected]>
---

Notes:
Applies to next-20130703

The MMC driver I plan to submit next can use this
(falls back to PIO if unavailable).

Nothing else uses DMA on UC-7112-LX.

drivers/dma/Kconfig | 9 +
drivers/dma/Makefile | 1 +
drivers/dma/moxart-dma.c | 473 +++++++++++++++++++++++++++++++++++++++++++++++
drivers/dma/moxart-dma.h | 188 +++++++++++++++++++
4 files changed, 671 insertions(+)
create mode 100644 drivers/dma/moxart-dma.c
create mode 100644 drivers/dma/moxart-dma.h

diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6825957..d4ba42b 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -300,6 +300,15 @@ config DMA_JZ4740
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS

+config MOXART_DMA
+ tristate "MOXART DMA support"
+ depends on ARCH_MOXART
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ default n
+ help
+ Enable support for the MOXA ART SoC DMA controller.
+
config DMA_ENGINE
bool

diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 5e0f2ef..470c11b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -39,3 +39,4 @@ obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
obj-$(CONFIG_DMA_OMAP) += omap-dma.o
obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..3f1e771
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,473 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <[email protected]>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+#include "moxart-dma.h"
+
+static DEFINE_SPINLOCK(dma_lock);
+
+struct moxart_dma_chan {
+ struct virt_dma_chan vchan;
+ int ch_num;
+ bool allocated;
+ int error_flag;
+ struct moxart_dma_reg *reg;
+ void (*callback)(void *param);
+ void *callback_param;
+ struct completion dma_complete;
+ struct dma_slave_config cfg;
+ struct dma_async_tx_descriptor tx_desc;
+};
+
+struct moxart_dma_container {
+ int ctlr;
+ struct dma_device dma_slave;
+ struct moxart_dma_chan slave_chans[APB_DMA_MAX_CHANNEL];
+};
+
+struct moxart_dma_container *mdc;
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+ return &chan->dev->device;
+}
+
+static inline struct moxart_dma_container
+*to_moxart_dma_container(struct dma_device *d)
+{
+ return container_of(d, struct moxart_dma_container, dma_slave);
+}
+
+static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct moxart_dma_chan, vchan.chan);
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ union moxart_dma_reg_cfg mcfg;
+ unsigned long flags;
+
+ dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+ spin_lock_irqsave(&dma_lock, flags);
+
+ mcfg.ul = readl(&mchan->reg->cfg.ul);
+ mcfg.ul &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+ writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+ spin_unlock_irqrestore(&dma_lock, flags);
+
+ return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *cfg)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ union moxart_dma_reg_cfg mcfg;
+ unsigned long flags;
+ unsigned int data_width, data_inc;
+
+ spin_lock_irqsave(&dma_lock, flags);
+
+ memcpy(&mchan->cfg, cfg, sizeof(mchan->cfg));
+
+ mcfg.ul = readl(&mchan->reg->cfg.ul);
+ mcfg.bits.burst = APB_DMAB_BURST_MODE;
+
+ switch (mchan->cfg.src_addr_width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ data_width = APB_DMAB_DATA_WIDTH_1;
+ data_inc = APB_DMAB_DEST_INC_1_4;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ data_width = APB_DMAB_DATA_WIDTH_2;
+ data_inc = APB_DMAB_DEST_INC_2_8;
+ break;
+ default:
+ data_width = APB_DMAB_DATA_WIDTH_4;
+ data_inc = APB_DMAB_DEST_INC_4_16;
+ break;
+ }
+
+ if (mchan->cfg.direction == DMA_MEM_TO_DEV) {
+ mcfg.bits.data_width = data_width;
+ mcfg.bits.dest_sel = APB_DMAB_DEST_APB;
+ mcfg.bits.dest_inc = APB_DMAB_DEST_INC_0;
+ mcfg.bits.source_sel = APB_DMAB_SOURCE_AHB;
+ mcfg.bits.source_inc = data_inc;
+
+ mcfg.bits.dest_req_no = mchan->cfg.slave_id;
+ mcfg.bits.source_req_no = 0;
+ } else {
+ mcfg.bits.data_width = data_width;
+ mcfg.bits.dest_sel = APB_DMAB_SOURCE_AHB;
+ mcfg.bits.dest_inc = data_inc;
+ mcfg.bits.source_sel = APB_DMAB_DEST_APB;
+ mcfg.bits.source_inc = APB_DMAB_DEST_INC_0;
+
+ mcfg.bits.dest_req_no = 0;
+ mcfg.bits.source_req_no = mchan->cfg.slave_id;
+ }
+
+ writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+ spin_unlock_irqrestore(&dma_lock, flags);
+
+ return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ int ret = 0;
+ struct dma_slave_config *config;
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ moxart_terminate_all(chan);
+ break;
+ case DMA_SLAVE_CONFIG:
+ config = (struct dma_slave_config *)arg;
+ ret = moxart_slave_config(chan, config);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ return ret;
+}
+
+static dma_cookie_t moxart_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(tx->chan);
+ dma_cookie_t cookie;
+ union moxart_dma_reg_cfg mcfg;
+ unsigned long flags;
+
+ mchan->callback = tx->callback;
+ mchan->callback_param = tx->callback_param;
+ mchan->error_flag = 0;
+
+ dev_dbg(chan2dev(tx->chan), "%s: mchan=%p mchan->ch_num=%d mchan->reg=%p\n",
+ __func__, mchan, mchan->ch_num, mchan->reg);
+
+ spin_lock_irqsave(&dma_lock, flags);
+
+ cookie = dma_cookie_assign(tx);
+
+ mcfg.ul = readl(&mchan->reg->cfg.ul);
+ mcfg.ul |= (APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+ writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+ spin_unlock_irqrestore(&dma_lock, flags);
+
+ return cookie;
+}
+
+static struct dma_async_tx_descriptor
+*moxart_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_transfer_direction direction,
+ unsigned long tx_flags, void *context)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ unsigned long flags;
+ union moxart_dma_reg_cfg mcfg;
+ unsigned int size, adr_width;
+
+ spin_lock_irqsave(&dma_lock, flags);
+
+ if (direction == DMA_MEM_TO_DEV) {
+ writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+ &mchan->reg->source_addr);
+ writel(mchan->cfg.dst_addr, &mchan->reg->dest_addr);
+ adr_width = mchan->cfg.src_addr_width;
+ } else {
+ writel(mchan->cfg.src_addr, &mchan->reg->source_addr);
+ writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+ &mchan->reg->dest_addr);
+ adr_width = mchan->cfg.dst_addr_width;
+ }
+
+ size = sgl->length >> adr_width;
+
+ /*
+ * size is 4 on 64 bytes copied, i.e. once cycle copies 16 bytes
+ * ( when data_width == APB_DMAB_DATA_WIDTH_4 )
+ */
+ writel(size, &mchan->reg->cycles);
+
+ dev_dbg(chan2dev(chan), "%s: set %d DMA cycles (sgl->length=%d adr_width=%d)\n",
+ __func__, size, sgl->length, adr_width);
+
+ dev_dbg(chan2dev(chan), "%s: mcfg.ul=%x read from &mchan->reg->cfg.ul=%x\n",
+ __func__, mcfg.ul, (unsigned int)&mchan->reg->cfg.ul);
+
+ dma_async_tx_descriptor_init(&mchan->tx_desc, chan);
+ mchan->tx_desc.tx_submit = moxart_tx_submit;
+
+ spin_unlock_irqrestore(&dma_lock, flags);
+
+ return &mchan->tx_desc;
+}
+
+static struct platform_driver moxart_driver;
+
+bool moxart_filter_fn(struct dma_chan *chan, void *param)
+{
+ if (chan->device->dev->driver == &moxart_driver.driver) {
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ unsigned int ch_req = *(unsigned int *)param;
+ dev_dbg(chan2dev(chan), "%s: mchan=%p ch_req=%d mchan->ch_num=%d\n",
+ __func__, mchan, ch_req, mchan->ch_num);
+ return ch_req == mchan->ch_num;
+ } else {
+ dev_dbg(chan2dev(chan), "%s: device not registered to this DMA engine\n",
+ __func__);
+ return false;
+ }
+}
+EXPORT_SYMBOL(moxart_filter_fn);
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ int i;
+ bool found = false;
+
+ for (i = 0; i < APB_DMA_MAX_CHANNEL; i++) {
+ if (i == mchan->ch_num
+ && !mchan->allocated) {
+ dev_dbg(chan2dev(chan), "%s: allocating channel #%d\n",
+ __func__, mchan->ch_num);
+ mchan->allocated = true;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return -ENODEV;
+
+ return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+ mchan->allocated = false;
+
+ dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+ __func__, mchan->ch_num);
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ union moxart_dma_reg_cfg mcfg;
+ unsigned long flags;
+
+ dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+ spin_lock_irqsave(&dma_lock, flags);
+
+ mcfg.ul = readl(&mchan->reg->cfg.ul);
+ mcfg.ul |= APB_DMA_ENABLE;
+ writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+ spin_unlock_irqrestore(&dma_lock, flags);
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ enum dma_status ret;
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_SUCCESS || !txstate)
+ return ret;
+
+ return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+ dma->device_prep_slave_sg = moxart_prep_slave_sg;
+ dma->device_alloc_chan_resources = moxart_alloc_chan_resources;
+ dma->device_free_chan_resources = moxart_free_chan_resources;
+ dma->device_issue_pending = moxart_issue_pending;
+ dma->device_tx_status = moxart_tx_status;
+ dma->device_control = moxart_control;
+ dma->dev = dev;
+
+ INIT_LIST_HEAD(&dma->channels);
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+ struct device *dev = devid;
+ struct moxart_dma_chan *mchan = &mdc->slave_chans[0];
+ unsigned int i;
+ union moxart_dma_reg_cfg mcfg;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+ if (mchan->allocated) {
+ mcfg.ul = readl(&mchan->reg->cfg.ul);
+ if (mcfg.ul & APB_DMA_FIN_INT_STS) {
+ mcfg.ul &= ~APB_DMA_FIN_INT_STS;
+ dma_cookie_complete(&mchan->tx_desc);
+ }
+ if (mcfg.ul & APB_DMA_ERR_INT_STS) {
+ mcfg.ul &= ~APB_DMA_ERR_INT_STS;
+ mchan->error_flag = 1;
+ }
+ if (mchan->callback) {
+ dev_dbg(dev, "%s: call callback for mchan=%p\n",
+ __func__, mchan);
+ mchan->callback(mchan->callback_param);
+ }
+ mchan->error_flag = 0;
+ writel(mcfg.ul, &mchan->reg->cfg.ul);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction moxart_dma_irq = {
+ .name = "moxart-dma-engine",
+ .flags = IRQF_DISABLED,
+ .handler = moxart_dma_interrupt,
+};
+
+static int moxart_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource res_dma;
+ static void __iomem *dma_base_addr;
+ int ret, i;
+ unsigned int irq;
+ struct moxart_dma_chan *mchan;
+
+ mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+ if (!mdc) {
+ dev_err(dev, "can't allocate DMA container\n");
+ return -ENOMEM;
+ }
+
+ ret = of_address_to_resource(node, 0, &res_dma);
+ if (ret) {
+ dev_err(dev, "can't get DMA base resource\n");
+ return ret;
+ }
+
+ irq = irq_of_parse_and_map(node, 0);
+
+ dma_base_addr = devm_ioremap_resource(dev, &res_dma);
+ if (IS_ERR(dma_base_addr)) {
+ dev_err(dev, "devm_ioremap_resource failed\n");
+ return PTR_ERR(dma_base_addr);
+ }
+
+ mdc->ctlr = pdev->id;
+
+ dma_cap_zero(mdc->dma_slave.cap_mask);
+ dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+
+ moxart_dma_init(&mdc->dma_slave, dev);
+
+ mchan = &mdc->slave_chans[0];
+ for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+ mchan->ch_num = i;
+ mchan->reg = (struct moxart_dma_reg *)(dma_base_addr + 0x80
+ + i * sizeof(struct moxart_dma_reg));
+ mchan->callback = NULL;
+ mchan->allocated = 0;
+ mchan->callback_param = NULL;
+ vchan_init(&mchan->vchan, &mdc->dma_slave);
+ dev_dbg(dev, "%s: mchans[%d]: mchan->ch_num=%d mchan->reg=%p\n",
+ __func__, i, mchan->ch_num, mchan->reg);
+ }
+
+ ret = dma_async_device_register(&mdc->dma_slave);
+ platform_set_drvdata(pdev, mdc);
+
+ moxart_dma_irq.dev_id = dev;
+ setup_irq(irq, &moxart_dma_irq);
+
+ dev_dbg(dev, "%s: IRQ=%d\n", __func__, irq);
+
+ return ret;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+ struct moxart_dma_container *m = dev_get_drvdata(&pdev->dev);
+ dma_async_device_unregister(&m->dma_slave);
+ return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+ { .compatible = "moxa,moxart-dma" },
+ { }
+};
+
+static struct platform_driver moxart_driver = {
+ .probe = moxart_probe,
+ .remove = moxart_remove,
+ .driver = {
+ .name = "moxart-dma-engine",
+ .owner = THIS_MODULE,
+ .of_match_table = moxart_dma_match,
+ },
+};
+
+static int moxart_init(void)
+{
+ return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+ platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <[email protected]>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/moxart-dma.h b/drivers/dma/moxart-dma.h
new file mode 100644
index 0000000..358c006
--- /dev/null
+++ b/drivers/dma/moxart-dma.h
@@ -0,0 +1,188 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <[email protected]>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __DMA_MOXART_H
+#define __DMA_MOXART_H
+
+#define APB_DMA_MAX_CHANNEL 4
+
+union moxart_dma_reg_cfg {
+
+#define APB_DMA_ENABLE (1<<0)
+#define APB_DMA_FIN_INT_STS (1<<1)
+#define APB_DMA_FIN_INT_EN (1<<2)
+#define APB_DMA_BURST_MODE (1<<3)
+#define APB_DMA_ERR_INT_STS (1<<4)
+#define APB_DMA_ERR_INT_EN (1<<5)
+#define APB_DMA_SOURCE_AHB (1<<6)
+#define APB_DMA_SOURCE_APB 0
+#define APB_DMA_DEST_AHB (1<<7)
+#define APB_DMA_DEST_APB 0
+#define APB_DMA_SOURCE_INC_0 0
+#define APB_DMA_SOURCE_INC_1_4 (1<<8)
+#define APB_DMA_SOURCE_INC_2_8 (2<<8)
+#define APB_DMA_SOURCE_INC_4_16 (3<<8)
+#define APB_DMA_SOURCE_DEC_1_4 (5<<8)
+#define APB_DMA_SOURCE_DEC_2_8 (6<<8)
+#define APB_DMA_SOURCE_DEC_4_16 (7<<8)
+#define APB_DMA_SOURCE_INC_MASK (7<<8)
+#define APB_DMA_DEST_INC_0 0
+#define APB_DMA_DEST_INC_1_4 (1<<12)
+#define APB_DMA_DEST_INC_2_8 (2<<12)
+#define APB_DMA_DEST_INC_4_16 (3<<12)
+#define APB_DMA_DEST_DEC_1_4 (5<<12)
+#define APB_DMA_DEST_DEC_2_8 (6<<12)
+#define APB_DMA_DEST_DEC_4_16 (7<<12)
+#define APB_DMA_DEST_INC_MASK (7<<12)
+#define APB_DMA_DEST_REQ_NO_MASK (15<<16)
+#define APB_DMA_DATA_WIDTH_MASK (3<<20)
+#define APB_DMA_DATA_WIDTH_4 0
+#define APB_DMA_DATA_WIDTH_2 (1<<20)
+#define APB_DMA_DATA_WIDTH_1 (2<<20)
+#define APB_DMA_SOURCE_REQ_NO_MASK (15<<24)
+ unsigned int ul;
+
+ struct {
+
+#define APB_DMAB_ENABLE 1
+ /* enable DMA */
+ unsigned int enable:1;
+
+#define APB_DMAB_FIN_INT_STS 1
+ /* finished interrupt status */
+ unsigned int fin_int_sts:1;
+
+#define APB_DMAB_FIN_INT_EN 1
+ /* finished interrupt enable */
+ unsigned int fin_int_en:1;
+
+#define APB_DMAB_BURST_MODE 1
+ /* burst mode */
+ unsigned int burst:1;
+
+#define APB_DMAB_ERR_INT_STS 1
+ /* error interrupt status */
+ unsigned int err_int_sts:1;
+
+#define APB_DMAB_ERR_INT_EN 1
+ /* error interrupt enable */
+ unsigned int err_int_en:1;
+
+#define APB_DMAB_SOURCE_AHB 1
+#define APB_DMAB_SOURCE_APB 0
+ /* 0:APB (device), 1:AHB (RAM) */
+ unsigned int source_sel:1;
+
+#define APB_DMAB_DEST_AHB 1
+#define APB_DMAB_DEST_APB 0
+ /* 0:APB, 1:AHB */
+ unsigned int dest_sel:1;
+
+#define APB_DMAB_SOURCE_INC_0 0
+#define APB_DMAB_SOURCE_INC_1_4 1
+#define APB_DMAB_SOURCE_INC_2_8 2
+#define APB_DMAB_SOURCE_INC_4_16 3
+#define APB_DMAB_SOURCE_DEC_1_4 5
+#define APB_DMAB_SOURCE_DEC_2_8 6
+#define APB_DMAB_SOURCE_DEC_4_16 7
+#define APB_DMAB_SOURCE_INC_MASK 7
+ /*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4 (burst=1)
+ * 010: +2 (burst=0), +8 (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4 (burst=1)
+ * 110: -2 (burst=0), -8 (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+ */
+ unsigned int source_inc:3;
+
+ unsigned int reserved1:1;
+
+#define APB_DMAB_DEST_INC_0 0
+#define APB_DMAB_DEST_INC_1_4 1
+#define APB_DMAB_DEST_INC_2_8 2
+#define APB_DMAB_DEST_INC_4_16 3
+#define APB_DMAB_DEST_DEC_1_4 5
+#define APB_DMAB_DEST_DEC_2_8 6
+#define APB_DMAB_DEST_DEC_4_16 7
+#define APB_DMAB_DEST_INC_MASK 7
+ /*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4 (burst=1)
+ * 010: +2 (burst=0), +8 (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4 (burst=1)
+ * 110: -2 (burst=0), -8 (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+ */
+ unsigned int dest_inc:3;
+
+ unsigned int reserved2:1;
+
+#define APB_DMAB_DEST_REQ_NO_MASK 15
+ /*
+ * request signal select of destination
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0: no request / grant signal
+ * 1-15: request / grant signal
+ */
+ unsigned int dest_req_no:4;
+
+#define APB_DMAB_DATA_WIDTH_MASK 3
+#define APB_DMAB_DATA_WIDTH_4 0
+#define APB_DMAB_DATA_WIDTH_2 1
+#define APB_DMAB_DATA_WIDTH_1 2
+ /*
+ * data width of transfer
+ * 00: word
+ * 01: half
+ * 10: byte
+ */
+ unsigned int data_width:2;
+
+ unsigned int reserved3:2;
+
+#define APB_DMAB_SOURCE_REQ_NO_MASK 15
+ /*
+ * request signal select of source
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0: no request / grant signal
+ * 1-15: request / grant signal
+ */
+ unsigned int source_req_no:4;
+
+ unsigned int reserved4:4;
+ } bits;
+};
+
+struct moxart_dma_reg {
+ unsigned int source_addr;
+ unsigned int dest_addr;
+#define APB_DMA_CYCLES_MASK 0x00ffffff
+ unsigned int cycles; /* depend on burst mode */
+ union moxart_dma_reg_cfg cfg;
+};
+
+#endif
--
1.8.2.1


2013-07-10 09:31:44

by Russell King - ARM Linux

[permalink] [raw]
Subject: Re: [PATCH] dmaengine: Add MOXA ART DMA engine driver

On Wed, Jul 10, 2013 at 10:51:03AM +0200, Jonas Jensen wrote:
> +#include "virt-dma.h"
...
> +struct moxart_dma_chan {
> + struct virt_dma_chan vchan;
...
> +static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
> +{
> + return container_of(c, struct moxart_dma_chan, vchan.chan);
> +}
...
> + for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
> + mchan->ch_num = i;
> + mchan->reg = (struct moxart_dma_reg *)(dma_base_addr + 0x80
> + + i * sizeof(struct moxart_dma_reg));
> + mchan->callback = NULL;
> + mchan->allocated = 0;
> + mchan->callback_param = NULL;
> + vchan_init(&mchan->vchan, &mdc->dma_slave);

Do you actually make any use what so ever of the vchan support?

2013-07-10 09:48:21

by Jonas Jensen

[permalink] [raw]
Subject: Re: [PATCH] dmaengine: Add MOXA ART DMA engine driver

On 10 July 2013 11:30, Russell King - ARM Linux <[email protected]> wrote:
> Do you actually make any use what so ever of the vchan support?

Only because it was inspired by the edma driver:

static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
{
return container_of(c, struct moxart_dma_chan, vchan.chan);
}

It could use struct dma_chan instead I think.


Best regards,
Jonas

2013-07-10 12:44:22

by Jonas Jensen

[permalink] [raw]
Subject: [PATCH v2] dmaengine: Add MOXA ART DMA engine driver

Add dmaengine driver for MOXA ART SoCs.

Signed-off-by: Jonas Jensen <[email protected]>
---

Notes:
Applies to next-20130703

Changes since v1:

1. remove use of vchan support

drivers/dma/Kconfig | 9 +
drivers/dma/Makefile | 1 +
drivers/dma/moxart-dma.c | 477 +++++++++++++++++++++++++++++++++++++++++++++++
drivers/dma/moxart-dma.h | 188 +++++++++++++++++++
4 files changed, 675 insertions(+)
create mode 100644 drivers/dma/moxart-dma.c
create mode 100644 drivers/dma/moxart-dma.h

diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6825957..d4ba42b 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -300,6 +300,15 @@ config DMA_JZ4740
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS

+config MOXART_DMA
+ tristate "MOXART DMA support"
+ depends on ARCH_MOXART
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ default n
+ help
+ Enable support for the MOXA ART SoC DMA controller.
+
config DMA_ENGINE
bool

diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 5e0f2ef..470c11b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -39,3 +39,4 @@ obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
obj-$(CONFIG_DMA_OMAP) += omap-dma.o
obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..16fddf4
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,477 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <[email protected]>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+#include "moxart-dma.h"
+
+static DEFINE_SPINLOCK(dma_lock);
+
+struct moxart_dma_chan {
+ struct dma_chan chan;
+ int ch_num;
+ bool allocated;
+ int error_flag;
+ struct moxart_dma_reg *reg;
+ void (*callback)(void *param);
+ void *callback_param;
+ struct completion dma_complete;
+ struct dma_slave_config cfg;
+ struct dma_async_tx_descriptor tx_desc;
+};
+
+struct moxart_dma_container {
+ int ctlr;
+ struct dma_device dma_slave;
+ struct moxart_dma_chan slave_chans[APB_DMA_MAX_CHANNEL];
+};
+
+struct moxart_dma_container *mdc;
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+ return &chan->dev->device;
+}
+
+static inline struct moxart_dma_container
+*to_moxart_dma_container(struct dma_device *d)
+{
+ return container_of(d, struct moxart_dma_container, dma_slave);
+}
+
+static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct moxart_dma_chan, chan);
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ union moxart_dma_reg_cfg mcfg;
+ unsigned long flags;
+
+ dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+ spin_lock_irqsave(&dma_lock, flags);
+
+ mcfg.ul = readl(&mchan->reg->cfg.ul);
+ mcfg.ul &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+ writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+ spin_unlock_irqrestore(&dma_lock, flags);
+
+ return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *cfg)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ union moxart_dma_reg_cfg mcfg;
+ unsigned long flags;
+ unsigned int data_width, data_inc;
+
+ spin_lock_irqsave(&dma_lock, flags);
+
+ memcpy(&mchan->cfg, cfg, sizeof(mchan->cfg));
+
+ mcfg.ul = readl(&mchan->reg->cfg.ul);
+ mcfg.bits.burst = APB_DMAB_BURST_MODE;
+
+ switch (mchan->cfg.src_addr_width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ data_width = APB_DMAB_DATA_WIDTH_1;
+ data_inc = APB_DMAB_DEST_INC_1_4;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ data_width = APB_DMAB_DATA_WIDTH_2;
+ data_inc = APB_DMAB_DEST_INC_2_8;
+ break;
+ default:
+ data_width = APB_DMAB_DATA_WIDTH_4;
+ data_inc = APB_DMAB_DEST_INC_4_16;
+ break;
+ }
+
+ if (mchan->cfg.direction == DMA_MEM_TO_DEV) {
+ mcfg.bits.data_width = data_width;
+ mcfg.bits.dest_sel = APB_DMAB_DEST_APB;
+ mcfg.bits.dest_inc = APB_DMAB_DEST_INC_0;
+ mcfg.bits.source_sel = APB_DMAB_SOURCE_AHB;
+ mcfg.bits.source_inc = data_inc;
+
+ mcfg.bits.dest_req_no = mchan->cfg.slave_id;
+ mcfg.bits.source_req_no = 0;
+ } else {
+ mcfg.bits.data_width = data_width;
+ mcfg.bits.dest_sel = APB_DMAB_SOURCE_AHB;
+ mcfg.bits.dest_inc = data_inc;
+ mcfg.bits.source_sel = APB_DMAB_DEST_APB;
+ mcfg.bits.source_inc = APB_DMAB_DEST_INC_0;
+
+ mcfg.bits.dest_req_no = 0;
+ mcfg.bits.source_req_no = mchan->cfg.slave_id;
+ }
+
+ writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+ spin_unlock_irqrestore(&dma_lock, flags);
+
+ return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ int ret = 0;
+ struct dma_slave_config *config;
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ moxart_terminate_all(chan);
+ break;
+ case DMA_SLAVE_CONFIG:
+ config = (struct dma_slave_config *)arg;
+ ret = moxart_slave_config(chan, config);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ return ret;
+}
+
+static dma_cookie_t moxart_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(tx->chan);
+ dma_cookie_t cookie;
+ union moxart_dma_reg_cfg mcfg;
+ unsigned long flags;
+
+ mchan->callback = tx->callback;
+ mchan->callback_param = tx->callback_param;
+ mchan->error_flag = 0;
+
+ dev_dbg(chan2dev(tx->chan), "%s: mchan=%p mchan->ch_num=%d mchan->reg=%p\n",
+ __func__, mchan, mchan->ch_num, mchan->reg);
+
+ spin_lock_irqsave(&dma_lock, flags);
+
+ cookie = dma_cookie_assign(tx);
+
+ mcfg.ul = readl(&mchan->reg->cfg.ul);
+ mcfg.ul |= (APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+ writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+ spin_unlock_irqrestore(&dma_lock, flags);
+
+ return cookie;
+}
+
+static struct dma_async_tx_descriptor
+*moxart_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_transfer_direction direction,
+ unsigned long tx_flags, void *context)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ unsigned long flags;
+ union moxart_dma_reg_cfg mcfg;
+ unsigned int size, adr_width;
+
+ spin_lock_irqsave(&dma_lock, flags);
+
+ if (direction == DMA_MEM_TO_DEV) {
+ writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+ &mchan->reg->source_addr);
+ writel(mchan->cfg.dst_addr, &mchan->reg->dest_addr);
+ adr_width = mchan->cfg.src_addr_width;
+ } else {
+ writel(mchan->cfg.src_addr, &mchan->reg->source_addr);
+ writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+ &mchan->reg->dest_addr);
+ adr_width = mchan->cfg.dst_addr_width;
+ }
+
+ size = sgl->length >> adr_width;
+
+ /*
+ * size is 4 on 64 bytes copied, i.e. once cycle copies 16 bytes
+ * ( when data_width == APB_DMAB_DATA_WIDTH_4 )
+ */
+ writel(size, &mchan->reg->cycles);
+
+ dev_dbg(chan2dev(chan), "%s: set %d DMA cycles (sgl->length=%d adr_width=%d)\n",
+ __func__, size, sgl->length, adr_width);
+
+ dev_dbg(chan2dev(chan), "%s: mcfg.ul=%x read from &mchan->reg->cfg.ul=%x\n",
+ __func__, mcfg.ul, (unsigned int)&mchan->reg->cfg.ul);
+
+ dma_async_tx_descriptor_init(&mchan->tx_desc, chan);
+ mchan->tx_desc.tx_submit = moxart_tx_submit;
+
+ spin_unlock_irqrestore(&dma_lock, flags);
+
+ return &mchan->tx_desc;
+}
+
+static struct platform_driver moxart_driver;
+
+bool moxart_filter_fn(struct dma_chan *chan, void *param)
+{
+ if (chan->device->dev->driver == &moxart_driver.driver) {
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ unsigned int ch_req = *(unsigned int *)param;
+ dev_dbg(chan2dev(chan), "%s: mchan=%p ch_req=%d mchan->ch_num=%d\n",
+ __func__, mchan, ch_req, mchan->ch_num);
+ return ch_req == mchan->ch_num;
+ } else {
+ dev_dbg(chan2dev(chan), "%s: device not registered to this DMA engine\n",
+ __func__);
+ return false;
+ }
+}
+EXPORT_SYMBOL(moxart_filter_fn);
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ int i;
+ bool found = false;
+
+ for (i = 0; i < APB_DMA_MAX_CHANNEL; i++) {
+ if (i == mchan->ch_num
+ && !mchan->allocated) {
+ dev_dbg(chan2dev(chan), "%s: allocating channel #%d\n",
+ __func__, mchan->ch_num);
+ mchan->allocated = true;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return -ENODEV;
+
+ return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+ mchan->allocated = false;
+
+ dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+ __func__, mchan->ch_num);
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ union moxart_dma_reg_cfg mcfg;
+ unsigned long flags;
+
+ dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+ spin_lock_irqsave(&dma_lock, flags);
+
+ mcfg.ul = readl(&mchan->reg->cfg.ul);
+ mcfg.ul |= APB_DMA_ENABLE;
+ writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+ spin_unlock_irqrestore(&dma_lock, flags);
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ enum dma_status ret;
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_SUCCESS || !txstate)
+ return ret;
+
+ return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+ dma->device_prep_slave_sg = moxart_prep_slave_sg;
+ dma->device_alloc_chan_resources = moxart_alloc_chan_resources;
+ dma->device_free_chan_resources = moxart_free_chan_resources;
+ dma->device_issue_pending = moxart_issue_pending;
+ dma->device_tx_status = moxart_tx_status;
+ dma->device_control = moxart_control;
+ dma->dev = dev;
+
+ INIT_LIST_HEAD(&dma->channels);
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+ struct device *dev = devid;
+ struct moxart_dma_chan *mchan = &mdc->slave_chans[0];
+ unsigned int i;
+ union moxart_dma_reg_cfg mcfg;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+ if (mchan->allocated) {
+ mcfg.ul = readl(&mchan->reg->cfg.ul);
+ if (mcfg.ul & APB_DMA_FIN_INT_STS) {
+ mcfg.ul &= ~APB_DMA_FIN_INT_STS;
+ dma_cookie_complete(&mchan->tx_desc);
+ }
+ if (mcfg.ul & APB_DMA_ERR_INT_STS) {
+ mcfg.ul &= ~APB_DMA_ERR_INT_STS;
+ mchan->error_flag = 1;
+ }
+ if (mchan->callback) {
+ dev_dbg(dev, "%s: call callback for mchan=%p\n",
+ __func__, mchan);
+ mchan->callback(mchan->callback_param);
+ }
+ mchan->error_flag = 0;
+ writel(mcfg.ul, &mchan->reg->cfg.ul);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction moxart_dma_irq = {
+ .name = "moxart-dma-engine",
+ .flags = IRQF_DISABLED,
+ .handler = moxart_dma_interrupt,
+};
+
+static int moxart_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource res_dma;
+ static void __iomem *dma_base_addr;
+ int ret, i;
+ unsigned int irq;
+ struct moxart_dma_chan *mchan;
+
+ mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+ if (!mdc) {
+ dev_err(dev, "can't allocate DMA container\n");
+ return -ENOMEM;
+ }
+
+ ret = of_address_to_resource(node, 0, &res_dma);
+ if (ret) {
+ dev_err(dev, "can't get DMA base resource\n");
+ return ret;
+ }
+
+ irq = irq_of_parse_and_map(node, 0);
+
+ dma_base_addr = devm_ioremap_resource(dev, &res_dma);
+ if (IS_ERR(dma_base_addr)) {
+ dev_err(dev, "devm_ioremap_resource failed\n");
+ return PTR_ERR(dma_base_addr);
+ }
+
+ mdc->ctlr = pdev->id;
+
+ dma_cap_zero(mdc->dma_slave.cap_mask);
+ dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+
+ moxart_dma_init(&mdc->dma_slave, dev);
+
+ mchan = &mdc->slave_chans[0];
+ for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+ mchan->ch_num = i;
+ mchan->reg = (struct moxart_dma_reg *)(dma_base_addr + 0x80
+ + i * sizeof(struct moxart_dma_reg));
+ mchan->callback = NULL;
+ mchan->allocated = 0;
+ mchan->callback_param = NULL;
+
+ dma_cookie_init(&mchan->chan);
+ mchan->chan.device = &mdc->dma_slave;
+ list_add_tail(&mchan->chan.device_node,
+ &mdc->dma_slave.channels);
+
+ dev_dbg(dev, "%s: mchans[%d]: mchan->ch_num=%d mchan->reg=%p\n",
+ __func__, i, mchan->ch_num, mchan->reg);
+ }
+
+ ret = dma_async_device_register(&mdc->dma_slave);
+ platform_set_drvdata(pdev, mdc);
+
+ moxart_dma_irq.dev_id = dev;
+ setup_irq(irq, &moxart_dma_irq);
+
+ dev_dbg(dev, "%s: IRQ=%d\n", __func__, irq);
+
+ return ret;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+ struct moxart_dma_container *m = dev_get_drvdata(&pdev->dev);
+ dma_async_device_unregister(&m->dma_slave);
+ return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+ { .compatible = "moxa,moxart-dma" },
+ { }
+};
+
+static struct platform_driver moxart_driver = {
+ .probe = moxart_probe,
+ .remove = moxart_remove,
+ .driver = {
+ .name = "moxart-dma-engine",
+ .owner = THIS_MODULE,
+ .of_match_table = moxart_dma_match,
+ },
+};
+
+static int moxart_init(void)
+{
+ return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+ platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <[email protected]>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/moxart-dma.h b/drivers/dma/moxart-dma.h
new file mode 100644
index 0000000..358c006
--- /dev/null
+++ b/drivers/dma/moxart-dma.h
@@ -0,0 +1,188 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <[email protected]>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __DMA_MOXART_H
+#define __DMA_MOXART_H
+
+#define APB_DMA_MAX_CHANNEL 4
+
+union moxart_dma_reg_cfg {
+
+#define APB_DMA_ENABLE (1<<0)
+#define APB_DMA_FIN_INT_STS (1<<1)
+#define APB_DMA_FIN_INT_EN (1<<2)
+#define APB_DMA_BURST_MODE (1<<3)
+#define APB_DMA_ERR_INT_STS (1<<4)
+#define APB_DMA_ERR_INT_EN (1<<5)
+#define APB_DMA_SOURCE_AHB (1<<6)
+#define APB_DMA_SOURCE_APB 0
+#define APB_DMA_DEST_AHB (1<<7)
+#define APB_DMA_DEST_APB 0
+#define APB_DMA_SOURCE_INC_0 0
+#define APB_DMA_SOURCE_INC_1_4 (1<<8)
+#define APB_DMA_SOURCE_INC_2_8 (2<<8)
+#define APB_DMA_SOURCE_INC_4_16 (3<<8)
+#define APB_DMA_SOURCE_DEC_1_4 (5<<8)
+#define APB_DMA_SOURCE_DEC_2_8 (6<<8)
+#define APB_DMA_SOURCE_DEC_4_16 (7<<8)
+#define APB_DMA_SOURCE_INC_MASK (7<<8)
+#define APB_DMA_DEST_INC_0 0
+#define APB_DMA_DEST_INC_1_4 (1<<12)
+#define APB_DMA_DEST_INC_2_8 (2<<12)
+#define APB_DMA_DEST_INC_4_16 (3<<12)
+#define APB_DMA_DEST_DEC_1_4 (5<<12)
+#define APB_DMA_DEST_DEC_2_8 (6<<12)
+#define APB_DMA_DEST_DEC_4_16 (7<<12)
+#define APB_DMA_DEST_INC_MASK (7<<12)
+#define APB_DMA_DEST_REQ_NO_MASK (15<<16)
+#define APB_DMA_DATA_WIDTH_MASK (3<<20)
+#define APB_DMA_DATA_WIDTH_4 0
+#define APB_DMA_DATA_WIDTH_2 (1<<20)
+#define APB_DMA_DATA_WIDTH_1 (2<<20)
+#define APB_DMA_SOURCE_REQ_NO_MASK (15<<24)
+ unsigned int ul;
+
+ struct {
+
+#define APB_DMAB_ENABLE 1
+ /* enable DMA */
+ unsigned int enable:1;
+
+#define APB_DMAB_FIN_INT_STS 1
+ /* finished interrupt status */
+ unsigned int fin_int_sts:1;
+
+#define APB_DMAB_FIN_INT_EN 1
+ /* finished interrupt enable */
+ unsigned int fin_int_en:1;
+
+#define APB_DMAB_BURST_MODE 1
+ /* burst mode */
+ unsigned int burst:1;
+
+#define APB_DMAB_ERR_INT_STS 1
+ /* error interrupt status */
+ unsigned int err_int_sts:1;
+
+#define APB_DMAB_ERR_INT_EN 1
+ /* error interrupt enable */
+ unsigned int err_int_en:1;
+
+#define APB_DMAB_SOURCE_AHB 1
+#define APB_DMAB_SOURCE_APB 0
+ /* 0:APB (device), 1:AHB (RAM) */
+ unsigned int source_sel:1;
+
+#define APB_DMAB_DEST_AHB 1
+#define APB_DMAB_DEST_APB 0
+ /* 0:APB, 1:AHB */
+ unsigned int dest_sel:1;
+
+#define APB_DMAB_SOURCE_INC_0 0
+#define APB_DMAB_SOURCE_INC_1_4 1
+#define APB_DMAB_SOURCE_INC_2_8 2
+#define APB_DMAB_SOURCE_INC_4_16 3
+#define APB_DMAB_SOURCE_DEC_1_4 5
+#define APB_DMAB_SOURCE_DEC_2_8 6
+#define APB_DMAB_SOURCE_DEC_4_16 7
+#define APB_DMAB_SOURCE_INC_MASK 7
+ /*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4 (burst=1)
+ * 010: +2 (burst=0), +8 (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4 (burst=1)
+ * 110: -2 (burst=0), -8 (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+ */
+ unsigned int source_inc:3;
+
+ unsigned int reserved1:1;
+
+#define APB_DMAB_DEST_INC_0 0
+#define APB_DMAB_DEST_INC_1_4 1
+#define APB_DMAB_DEST_INC_2_8 2
+#define APB_DMAB_DEST_INC_4_16 3
+#define APB_DMAB_DEST_DEC_1_4 5
+#define APB_DMAB_DEST_DEC_2_8 6
+#define APB_DMAB_DEST_DEC_4_16 7
+#define APB_DMAB_DEST_INC_MASK 7
+ /*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4 (burst=1)
+ * 010: +2 (burst=0), +8 (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4 (burst=1)
+ * 110: -2 (burst=0), -8 (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+ */
+ unsigned int dest_inc:3;
+
+ unsigned int reserved2:1;
+
+#define APB_DMAB_DEST_REQ_NO_MASK 15
+ /*
+ * request signal select of destination
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0: no request / grant signal
+ * 1-15: request / grant signal
+ */
+ unsigned int dest_req_no:4;
+
+#define APB_DMAB_DATA_WIDTH_MASK 3
+#define APB_DMAB_DATA_WIDTH_4 0
+#define APB_DMAB_DATA_WIDTH_2 1
+#define APB_DMAB_DATA_WIDTH_1 2
+ /*
+ * data width of transfer
+ * 00: word
+ * 01: half
+ * 10: byte
+ */
+ unsigned int data_width:2;
+
+ unsigned int reserved3:2;
+
+#define APB_DMAB_SOURCE_REQ_NO_MASK 15
+ /*
+ * request signal select of source
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0: no request / grant signal
+ * 1-15: request / grant signal
+ */
+ unsigned int source_req_no:4;
+
+ unsigned int reserved4:4;
+ } bits;
+};
+
+struct moxart_dma_reg {
+ unsigned int source_addr;
+ unsigned int dest_addr;
+#define APB_DMA_CYCLES_MASK 0x00ffffff
+ unsigned int cycles; /* depend on burst mode */
+ union moxart_dma_reg_cfg cfg;
+};
+
+#endif
--
1.8.2.1

2013-07-17 10:07:24

by Jonas Jensen

[permalink] [raw]
Subject: [PATCH v3] dmaengine: Add MOXA ART DMA engine driver

Add dmaengine driver for MOXA ART SoCs.

Signed-off-by: Jonas Jensen <[email protected]>
---

Notes:
Changes since v2:

1. add devicetree bindings document
2. remove DMA_VIRTUAL_CHANNELS and "default n" from Kconfig

Applies to next-20130716

.../devicetree/bindings/dma/moxa,moxart-dma.txt | 19 +
drivers/dma/Kconfig | 7 +
drivers/dma/Makefile | 1 +
drivers/dma/moxart-dma.c | 477 +++++++++++++++++++++
drivers/dma/moxart-dma.h | 188 ++++++++
5 files changed, 692 insertions(+)
create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
create mode 100644 drivers/dma/moxart-dma.c
create mode 100644 drivers/dma/moxart-dma.h

diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 0000000..61a019d
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,19 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible : Should be "moxa,moxart-dma"
+- reg : Should contain registers location and length
+- interrupts : Should contain the interrupt number
+- #dma-cells : see dma.txt, should be 1
+
+Example:
+
+ dma: dma@90500000 {
+ compatible = "moxa,moxart-dma";
+ reg = <0x90500000 0x1000>;
+ interrupts = <24 0>;
+ #dma-cells = <1>;
+ };
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6825957..56c3aaa 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -300,6 +300,13 @@ config DMA_JZ4740
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS

+config MOXART_DMA
+ tristate "MOXART DMA support"
+ depends on ARCH_MOXART
+ select DMA_ENGINE
+ help
+ Enable support for the MOXA ART SoC DMA controller.
+
config DMA_ENGINE
bool

diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 5e0f2ef..470c11b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -39,3 +39,4 @@ obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
obj-$(CONFIG_DMA_OMAP) += omap-dma.o
obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..16fddf4
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,477 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <[email protected]>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+#include "moxart-dma.h"
+
+static DEFINE_SPINLOCK(dma_lock);
+
+struct moxart_dma_chan {
+ struct dma_chan chan;
+ int ch_num;
+ bool allocated;
+ int error_flag;
+ struct moxart_dma_reg *reg;
+ void (*callback)(void *param);
+ void *callback_param;
+ struct completion dma_complete;
+ struct dma_slave_config cfg;
+ struct dma_async_tx_descriptor tx_desc;
+};
+
+struct moxart_dma_container {
+ int ctlr;
+ struct dma_device dma_slave;
+ struct moxart_dma_chan slave_chans[APB_DMA_MAX_CHANNEL];
+};
+
+struct moxart_dma_container *mdc;
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+ return &chan->dev->device;
+}
+
+static inline struct moxart_dma_container
+*to_moxart_dma_container(struct dma_device *d)
+{
+ return container_of(d, struct moxart_dma_container, dma_slave);
+}
+
+static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct moxart_dma_chan, chan);
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ union moxart_dma_reg_cfg mcfg;
+ unsigned long flags;
+
+ dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+ spin_lock_irqsave(&dma_lock, flags);
+
+ mcfg.ul = readl(&mchan->reg->cfg.ul);
+ mcfg.ul &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+ writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+ spin_unlock_irqrestore(&dma_lock, flags);
+
+ return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *cfg)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ union moxart_dma_reg_cfg mcfg;
+ unsigned long flags;
+ unsigned int data_width, data_inc;
+
+ spin_lock_irqsave(&dma_lock, flags);
+
+ memcpy(&mchan->cfg, cfg, sizeof(mchan->cfg));
+
+ mcfg.ul = readl(&mchan->reg->cfg.ul);
+ mcfg.bits.burst = APB_DMAB_BURST_MODE;
+
+ switch (mchan->cfg.src_addr_width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ data_width = APB_DMAB_DATA_WIDTH_1;
+ data_inc = APB_DMAB_DEST_INC_1_4;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ data_width = APB_DMAB_DATA_WIDTH_2;
+ data_inc = APB_DMAB_DEST_INC_2_8;
+ break;
+ default:
+ data_width = APB_DMAB_DATA_WIDTH_4;
+ data_inc = APB_DMAB_DEST_INC_4_16;
+ break;
+ }
+
+ if (mchan->cfg.direction == DMA_MEM_TO_DEV) {
+ mcfg.bits.data_width = data_width;
+ mcfg.bits.dest_sel = APB_DMAB_DEST_APB;
+ mcfg.bits.dest_inc = APB_DMAB_DEST_INC_0;
+ mcfg.bits.source_sel = APB_DMAB_SOURCE_AHB;
+ mcfg.bits.source_inc = data_inc;
+
+ mcfg.bits.dest_req_no = mchan->cfg.slave_id;
+ mcfg.bits.source_req_no = 0;
+ } else {
+ mcfg.bits.data_width = data_width;
+ mcfg.bits.dest_sel = APB_DMAB_SOURCE_AHB;
+ mcfg.bits.dest_inc = data_inc;
+ mcfg.bits.source_sel = APB_DMAB_DEST_APB;
+ mcfg.bits.source_inc = APB_DMAB_DEST_INC_0;
+
+ mcfg.bits.dest_req_no = 0;
+ mcfg.bits.source_req_no = mchan->cfg.slave_id;
+ }
+
+ writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+ spin_unlock_irqrestore(&dma_lock, flags);
+
+ return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ int ret = 0;
+ struct dma_slave_config *config;
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ moxart_terminate_all(chan);
+ break;
+ case DMA_SLAVE_CONFIG:
+ config = (struct dma_slave_config *)arg;
+ ret = moxart_slave_config(chan, config);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ return ret;
+}
+
+static dma_cookie_t moxart_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(tx->chan);
+ dma_cookie_t cookie;
+ union moxart_dma_reg_cfg mcfg;
+ unsigned long flags;
+
+ mchan->callback = tx->callback;
+ mchan->callback_param = tx->callback_param;
+ mchan->error_flag = 0;
+
+ dev_dbg(chan2dev(tx->chan), "%s: mchan=%p mchan->ch_num=%d mchan->reg=%p\n",
+ __func__, mchan, mchan->ch_num, mchan->reg);
+
+ spin_lock_irqsave(&dma_lock, flags);
+
+ cookie = dma_cookie_assign(tx);
+
+ mcfg.ul = readl(&mchan->reg->cfg.ul);
+ mcfg.ul |= (APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+ writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+ spin_unlock_irqrestore(&dma_lock, flags);
+
+ return cookie;
+}
+
+static struct dma_async_tx_descriptor
+*moxart_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_transfer_direction direction,
+ unsigned long tx_flags, void *context)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ unsigned long flags;
+ union moxart_dma_reg_cfg mcfg;
+ unsigned int size, adr_width;
+
+ spin_lock_irqsave(&dma_lock, flags);
+
+ if (direction == DMA_MEM_TO_DEV) {
+ writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+ &mchan->reg->source_addr);
+ writel(mchan->cfg.dst_addr, &mchan->reg->dest_addr);
+ adr_width = mchan->cfg.src_addr_width;
+ } else {
+ writel(mchan->cfg.src_addr, &mchan->reg->source_addr);
+ writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+ &mchan->reg->dest_addr);
+ adr_width = mchan->cfg.dst_addr_width;
+ }
+
+ size = sgl->length >> adr_width;
+
+ /*
+ * size is 4 on 64 bytes copied, i.e. once cycle copies 16 bytes
+ * ( when data_width == APB_DMAB_DATA_WIDTH_4 )
+ */
+ writel(size, &mchan->reg->cycles);
+
+ dev_dbg(chan2dev(chan), "%s: set %d DMA cycles (sgl->length=%d adr_width=%d)\n",
+ __func__, size, sgl->length, adr_width);
+
+ dev_dbg(chan2dev(chan), "%s: mcfg.ul=%x read from &mchan->reg->cfg.ul=%x\n",
+ __func__, mcfg.ul, (unsigned int)&mchan->reg->cfg.ul);
+
+ dma_async_tx_descriptor_init(&mchan->tx_desc, chan);
+ mchan->tx_desc.tx_submit = moxart_tx_submit;
+
+ spin_unlock_irqrestore(&dma_lock, flags);
+
+ return &mchan->tx_desc;
+}
+
+static struct platform_driver moxart_driver;
+
+bool moxart_filter_fn(struct dma_chan *chan, void *param)
+{
+ if (chan->device->dev->driver == &moxart_driver.driver) {
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ unsigned int ch_req = *(unsigned int *)param;
+ dev_dbg(chan2dev(chan), "%s: mchan=%p ch_req=%d mchan->ch_num=%d\n",
+ __func__, mchan, ch_req, mchan->ch_num);
+ return ch_req == mchan->ch_num;
+ } else {
+ dev_dbg(chan2dev(chan), "%s: device not registered to this DMA engine\n",
+ __func__);
+ return false;
+ }
+}
+EXPORT_SYMBOL(moxart_filter_fn);
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ int i;
+ bool found = false;
+
+ for (i = 0; i < APB_DMA_MAX_CHANNEL; i++) {
+ if (i == mchan->ch_num
+ && !mchan->allocated) {
+ dev_dbg(chan2dev(chan), "%s: allocating channel #%d\n",
+ __func__, mchan->ch_num);
+ mchan->allocated = true;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return -ENODEV;
+
+ return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+ mchan->allocated = false;
+
+ dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+ __func__, mchan->ch_num);
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ union moxart_dma_reg_cfg mcfg;
+ unsigned long flags;
+
+ dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+ spin_lock_irqsave(&dma_lock, flags);
+
+ mcfg.ul = readl(&mchan->reg->cfg.ul);
+ mcfg.ul |= APB_DMA_ENABLE;
+ writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+ spin_unlock_irqrestore(&dma_lock, flags);
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ enum dma_status ret;
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_SUCCESS || !txstate)
+ return ret;
+
+ return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+ dma->device_prep_slave_sg = moxart_prep_slave_sg;
+ dma->device_alloc_chan_resources = moxart_alloc_chan_resources;
+ dma->device_free_chan_resources = moxart_free_chan_resources;
+ dma->device_issue_pending = moxart_issue_pending;
+ dma->device_tx_status = moxart_tx_status;
+ dma->device_control = moxart_control;
+ dma->dev = dev;
+
+ INIT_LIST_HEAD(&dma->channels);
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+ struct device *dev = devid;
+ struct moxart_dma_chan *mchan = &mdc->slave_chans[0];
+ unsigned int i;
+ union moxart_dma_reg_cfg mcfg;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+ if (mchan->allocated) {
+ mcfg.ul = readl(&mchan->reg->cfg.ul);
+ if (mcfg.ul & APB_DMA_FIN_INT_STS) {
+ mcfg.ul &= ~APB_DMA_FIN_INT_STS;
+ dma_cookie_complete(&mchan->tx_desc);
+ }
+ if (mcfg.ul & APB_DMA_ERR_INT_STS) {
+ mcfg.ul &= ~APB_DMA_ERR_INT_STS;
+ mchan->error_flag = 1;
+ }
+ if (mchan->callback) {
+ dev_dbg(dev, "%s: call callback for mchan=%p\n",
+ __func__, mchan);
+ mchan->callback(mchan->callback_param);
+ }
+ mchan->error_flag = 0;
+ writel(mcfg.ul, &mchan->reg->cfg.ul);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction moxart_dma_irq = {
+ .name = "moxart-dma-engine",
+ .flags = IRQF_DISABLED,
+ .handler = moxart_dma_interrupt,
+};
+
+static int moxart_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource res_dma;
+ static void __iomem *dma_base_addr;
+ int ret, i;
+ unsigned int irq;
+ struct moxart_dma_chan *mchan;
+
+ mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+ if (!mdc) {
+ dev_err(dev, "can't allocate DMA container\n");
+ return -ENOMEM;
+ }
+
+ ret = of_address_to_resource(node, 0, &res_dma);
+ if (ret) {
+ dev_err(dev, "can't get DMA base resource\n");
+ return ret;
+ }
+
+ irq = irq_of_parse_and_map(node, 0);
+
+ dma_base_addr = devm_ioremap_resource(dev, &res_dma);
+ if (IS_ERR(dma_base_addr)) {
+ dev_err(dev, "devm_ioremap_resource failed\n");
+ return PTR_ERR(dma_base_addr);
+ }
+
+ mdc->ctlr = pdev->id;
+
+ dma_cap_zero(mdc->dma_slave.cap_mask);
+ dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+
+ moxart_dma_init(&mdc->dma_slave, dev);
+
+ mchan = &mdc->slave_chans[0];
+ for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+ mchan->ch_num = i;
+ mchan->reg = (struct moxart_dma_reg *)(dma_base_addr + 0x80
+ + i * sizeof(struct moxart_dma_reg));
+ mchan->callback = NULL;
+ mchan->allocated = 0;
+ mchan->callback_param = NULL;
+
+ dma_cookie_init(&mchan->chan);
+ mchan->chan.device = &mdc->dma_slave;
+ list_add_tail(&mchan->chan.device_node,
+ &mdc->dma_slave.channels);
+
+ dev_dbg(dev, "%s: mchans[%d]: mchan->ch_num=%d mchan->reg=%p\n",
+ __func__, i, mchan->ch_num, mchan->reg);
+ }
+
+ ret = dma_async_device_register(&mdc->dma_slave);
+ platform_set_drvdata(pdev, mdc);
+
+ moxart_dma_irq.dev_id = dev;
+ setup_irq(irq, &moxart_dma_irq);
+
+ dev_dbg(dev, "%s: IRQ=%d\n", __func__, irq);
+
+ return ret;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+ struct moxart_dma_container *m = dev_get_drvdata(&pdev->dev);
+ dma_async_device_unregister(&m->dma_slave);
+ return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+ { .compatible = "moxa,moxart-dma" },
+ { }
+};
+
+static struct platform_driver moxart_driver = {
+ .probe = moxart_probe,
+ .remove = moxart_remove,
+ .driver = {
+ .name = "moxart-dma-engine",
+ .owner = THIS_MODULE,
+ .of_match_table = moxart_dma_match,
+ },
+};
+
+static int moxart_init(void)
+{
+ return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+ platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <[email protected]>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/moxart-dma.h b/drivers/dma/moxart-dma.h
new file mode 100644
index 0000000..358c006
--- /dev/null
+++ b/drivers/dma/moxart-dma.h
@@ -0,0 +1,188 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <[email protected]>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __DMA_MOXART_H
+#define __DMA_MOXART_H
+
+#define APB_DMA_MAX_CHANNEL 4
+
+union moxart_dma_reg_cfg {
+
+#define APB_DMA_ENABLE (1<<0)
+#define APB_DMA_FIN_INT_STS (1<<1)
+#define APB_DMA_FIN_INT_EN (1<<2)
+#define APB_DMA_BURST_MODE (1<<3)
+#define APB_DMA_ERR_INT_STS (1<<4)
+#define APB_DMA_ERR_INT_EN (1<<5)
+#define APB_DMA_SOURCE_AHB (1<<6)
+#define APB_DMA_SOURCE_APB 0
+#define APB_DMA_DEST_AHB (1<<7)
+#define APB_DMA_DEST_APB 0
+#define APB_DMA_SOURCE_INC_0 0
+#define APB_DMA_SOURCE_INC_1_4 (1<<8)
+#define APB_DMA_SOURCE_INC_2_8 (2<<8)
+#define APB_DMA_SOURCE_INC_4_16 (3<<8)
+#define APB_DMA_SOURCE_DEC_1_4 (5<<8)
+#define APB_DMA_SOURCE_DEC_2_8 (6<<8)
+#define APB_DMA_SOURCE_DEC_4_16 (7<<8)
+#define APB_DMA_SOURCE_INC_MASK (7<<8)
+#define APB_DMA_DEST_INC_0 0
+#define APB_DMA_DEST_INC_1_4 (1<<12)
+#define APB_DMA_DEST_INC_2_8 (2<<12)
+#define APB_DMA_DEST_INC_4_16 (3<<12)
+#define APB_DMA_DEST_DEC_1_4 (5<<12)
+#define APB_DMA_DEST_DEC_2_8 (6<<12)
+#define APB_DMA_DEST_DEC_4_16 (7<<12)
+#define APB_DMA_DEST_INC_MASK (7<<12)
+#define APB_DMA_DEST_REQ_NO_MASK (15<<16)
+#define APB_DMA_DATA_WIDTH_MASK (3<<20)
+#define APB_DMA_DATA_WIDTH_4 0
+#define APB_DMA_DATA_WIDTH_2 (1<<20)
+#define APB_DMA_DATA_WIDTH_1 (2<<20)
+#define APB_DMA_SOURCE_REQ_NO_MASK (15<<24)
+ unsigned int ul;
+
+ struct {
+
+#define APB_DMAB_ENABLE 1
+ /* enable DMA */
+ unsigned int enable:1;
+
+#define APB_DMAB_FIN_INT_STS 1
+ /* finished interrupt status */
+ unsigned int fin_int_sts:1;
+
+#define APB_DMAB_FIN_INT_EN 1
+ /* finished interrupt enable */
+ unsigned int fin_int_en:1;
+
+#define APB_DMAB_BURST_MODE 1
+ /* burst mode */
+ unsigned int burst:1;
+
+#define APB_DMAB_ERR_INT_STS 1
+ /* error interrupt status */
+ unsigned int err_int_sts:1;
+
+#define APB_DMAB_ERR_INT_EN 1
+ /* error interrupt enable */
+ unsigned int err_int_en:1;
+
+#define APB_DMAB_SOURCE_AHB 1
+#define APB_DMAB_SOURCE_APB 0
+ /* 0:APB (device), 1:AHB (RAM) */
+ unsigned int source_sel:1;
+
+#define APB_DMAB_DEST_AHB 1
+#define APB_DMAB_DEST_APB 0
+ /* 0:APB, 1:AHB */
+ unsigned int dest_sel:1;
+
+#define APB_DMAB_SOURCE_INC_0 0
+#define APB_DMAB_SOURCE_INC_1_4 1
+#define APB_DMAB_SOURCE_INC_2_8 2
+#define APB_DMAB_SOURCE_INC_4_16 3
+#define APB_DMAB_SOURCE_DEC_1_4 5
+#define APB_DMAB_SOURCE_DEC_2_8 6
+#define APB_DMAB_SOURCE_DEC_4_16 7
+#define APB_DMAB_SOURCE_INC_MASK 7
+ /*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4 (burst=1)
+ * 010: +2 (burst=0), +8 (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4 (burst=1)
+ * 110: -2 (burst=0), -8 (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+ */
+ unsigned int source_inc:3;
+
+ unsigned int reserved1:1;
+
+#define APB_DMAB_DEST_INC_0 0
+#define APB_DMAB_DEST_INC_1_4 1
+#define APB_DMAB_DEST_INC_2_8 2
+#define APB_DMAB_DEST_INC_4_16 3
+#define APB_DMAB_DEST_DEC_1_4 5
+#define APB_DMAB_DEST_DEC_2_8 6
+#define APB_DMAB_DEST_DEC_4_16 7
+#define APB_DMAB_DEST_INC_MASK 7
+ /*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4 (burst=1)
+ * 010: +2 (burst=0), +8 (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4 (burst=1)
+ * 110: -2 (burst=0), -8 (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+ */
+ unsigned int dest_inc:3;
+
+ unsigned int reserved2:1;
+
+#define APB_DMAB_DEST_REQ_NO_MASK 15
+ /*
+ * request signal select of destination
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0: no request / grant signal
+ * 1-15: request / grant signal
+ */
+ unsigned int dest_req_no:4;
+
+#define APB_DMAB_DATA_WIDTH_MASK 3
+#define APB_DMAB_DATA_WIDTH_4 0
+#define APB_DMAB_DATA_WIDTH_2 1
+#define APB_DMAB_DATA_WIDTH_1 2
+ /*
+ * data width of transfer
+ * 00: word
+ * 01: half
+ * 10: byte
+ */
+ unsigned int data_width:2;
+
+ unsigned int reserved3:2;
+
+#define APB_DMAB_SOURCE_REQ_NO_MASK 15
+ /*
+ * request signal select of source
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0: no request / grant signal
+ * 1-15: request / grant signal
+ */
+ unsigned int source_req_no:4;
+
+ unsigned int reserved4:4;
+ } bits;
+};
+
+struct moxart_dma_reg {
+ unsigned int source_addr;
+ unsigned int dest_addr;
+#define APB_DMA_CYCLES_MASK 0x00ffffff
+ unsigned int cycles; /* depend on burst mode */
+ union moxart_dma_reg_cfg cfg;
+};
+
+#endif
--
1.8.2.1

2013-07-29 13:44:20

by Jonas Jensen

[permalink] [raw]
Subject: [PATCH v4] dmaengine: Add MOXA ART DMA engine driver

Add dmaengine driver for MOXA ART SoCs.

Signed-off-by: Jonas Jensen <[email protected]>
---

Notes:
Changes since v3:

1. use BIT() macro in header file
2. use hardcoded masks in header file
3. include linux/bitops.h

device tree bindings document:
4. describe compatible variable "Must be" instead of "Should be"

Applies to next-20130729

.../devicetree/bindings/dma/moxa,moxart-dma.txt | 19 +
drivers/dma/Kconfig | 7 +
drivers/dma/Makefile | 1 +
drivers/dma/moxart-dma.c | 478 +++++++++++++++++++++
drivers/dma/moxart-dma.h | 188 ++++++++
5 files changed, 693 insertions(+)
create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
create mode 100644 drivers/dma/moxart-dma.c
create mode 100644 drivers/dma/moxart-dma.h

diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 0000000..f18f0fb
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,19 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible : Must be "moxa,moxart-dma"
+- reg : Should contain registers location and length
+- interrupts : Should contain the interrupt number
+- #dma-cells : see dma.txt, should be 1
+
+Example:
+
+ dma: dma@90500000 {
+ compatible = "moxa,moxart-dma";
+ reg = <0x90500000 0x1000>;
+ interrupts = <24 0>;
+ #dma-cells = <1>;
+ };
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6825957..56c3aaa 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -300,6 +300,13 @@ config DMA_JZ4740
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS

+config MOXART_DMA
+ tristate "MOXART DMA support"
+ depends on ARCH_MOXART
+ select DMA_ENGINE
+ help
+ Enable support for the MOXA ART SoC DMA controller.
+
config DMA_ENGINE
bool

diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 5e0f2ef..470c11b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -39,3 +39,4 @@ obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
obj-$(CONFIG_DMA_OMAP) += omap-dma.o
obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..4f80a90
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,478 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <[email protected]>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/bitops.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+#include "moxart-dma.h"
+
+static DEFINE_SPINLOCK(dma_lock);
+
+struct moxart_dma_chan {
+ struct dma_chan chan;
+ int ch_num;
+ bool allocated;
+ int error_flag;
+ struct moxart_dma_reg *reg;
+ void (*callback)(void *param);
+ void *callback_param;
+ struct completion dma_complete;
+ struct dma_slave_config cfg;
+ struct dma_async_tx_descriptor tx_desc;
+};
+
+struct moxart_dma_container {
+ int ctlr;
+ struct dma_device dma_slave;
+ struct moxart_dma_chan slave_chans[APB_DMA_MAX_CHANNEL];
+};
+
+struct moxart_dma_container *mdc;
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+ return &chan->dev->device;
+}
+
+static inline struct moxart_dma_container
+*to_moxart_dma_container(struct dma_device *d)
+{
+ return container_of(d, struct moxart_dma_container, dma_slave);
+}
+
+static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct moxart_dma_chan, chan);
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ union moxart_dma_reg_cfg mcfg;
+ unsigned long flags;
+
+ dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+ spin_lock_irqsave(&dma_lock, flags);
+
+ mcfg.ul = readl(&mchan->reg->cfg.ul);
+ mcfg.ul &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+ writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+ spin_unlock_irqrestore(&dma_lock, flags);
+
+ return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *cfg)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ union moxart_dma_reg_cfg mcfg;
+ unsigned long flags;
+ unsigned int data_width, data_inc;
+
+ spin_lock_irqsave(&dma_lock, flags);
+
+ memcpy(&mchan->cfg, cfg, sizeof(mchan->cfg));
+
+ mcfg.ul = readl(&mchan->reg->cfg.ul);
+ mcfg.bits.burst = APB_DMAB_BURST_MODE;
+
+ switch (mchan->cfg.src_addr_width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ data_width = APB_DMAB_DATA_WIDTH_1;
+ data_inc = APB_DMAB_DEST_INC_1_4;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ data_width = APB_DMAB_DATA_WIDTH_2;
+ data_inc = APB_DMAB_DEST_INC_2_8;
+ break;
+ default:
+ data_width = APB_DMAB_DATA_WIDTH_4;
+ data_inc = APB_DMAB_DEST_INC_4_16;
+ break;
+ }
+
+ if (mchan->cfg.direction == DMA_MEM_TO_DEV) {
+ mcfg.bits.data_width = data_width;
+ mcfg.bits.dest_sel = APB_DMAB_DEST_APB;
+ mcfg.bits.dest_inc = APB_DMAB_DEST_INC_0;
+ mcfg.bits.source_sel = APB_DMAB_SOURCE_AHB;
+ mcfg.bits.source_inc = data_inc;
+
+ mcfg.bits.dest_req_no = mchan->cfg.slave_id;
+ mcfg.bits.source_req_no = 0;
+ } else {
+ mcfg.bits.data_width = data_width;
+ mcfg.bits.dest_sel = APB_DMAB_SOURCE_AHB;
+ mcfg.bits.dest_inc = data_inc;
+ mcfg.bits.source_sel = APB_DMAB_DEST_APB;
+ mcfg.bits.source_inc = APB_DMAB_DEST_INC_0;
+
+ mcfg.bits.dest_req_no = 0;
+ mcfg.bits.source_req_no = mchan->cfg.slave_id;
+ }
+
+ writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+ spin_unlock_irqrestore(&dma_lock, flags);
+
+ return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ int ret = 0;
+ struct dma_slave_config *config;
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ moxart_terminate_all(chan);
+ break;
+ case DMA_SLAVE_CONFIG:
+ config = (struct dma_slave_config *)arg;
+ ret = moxart_slave_config(chan, config);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ return ret;
+}
+
+static dma_cookie_t moxart_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(tx->chan);
+ dma_cookie_t cookie;
+ union moxart_dma_reg_cfg mcfg;
+ unsigned long flags;
+
+ mchan->callback = tx->callback;
+ mchan->callback_param = tx->callback_param;
+ mchan->error_flag = 0;
+
+ dev_dbg(chan2dev(tx->chan), "%s: mchan=%p mchan->ch_num=%d mchan->reg=%p\n",
+ __func__, mchan, mchan->ch_num, mchan->reg);
+
+ spin_lock_irqsave(&dma_lock, flags);
+
+ cookie = dma_cookie_assign(tx);
+
+ mcfg.ul = readl(&mchan->reg->cfg.ul);
+ mcfg.ul |= (APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+ writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+ spin_unlock_irqrestore(&dma_lock, flags);
+
+ return cookie;
+}
+
+static struct dma_async_tx_descriptor
+*moxart_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_transfer_direction direction,
+ unsigned long tx_flags, void *context)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ unsigned long flags;
+ union moxart_dma_reg_cfg mcfg;
+ unsigned int size, adr_width;
+
+ spin_lock_irqsave(&dma_lock, flags);
+
+ if (direction == DMA_MEM_TO_DEV) {
+ writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+ &mchan->reg->source_addr);
+ writel(mchan->cfg.dst_addr, &mchan->reg->dest_addr);
+ adr_width = mchan->cfg.src_addr_width;
+ } else {
+ writel(mchan->cfg.src_addr, &mchan->reg->source_addr);
+ writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+ &mchan->reg->dest_addr);
+ adr_width = mchan->cfg.dst_addr_width;
+ }
+
+ size = sgl->length >> adr_width;
+
+ /*
+ * size is 4 on 64 bytes copied, i.e. once cycle copies 16 bytes
+ * ( when data_width == APB_DMAB_DATA_WIDTH_4 )
+ */
+ writel(size, &mchan->reg->cycles);
+
+ dev_dbg(chan2dev(chan), "%s: set %d DMA cycles (sgl->length=%d adr_width=%d)\n",
+ __func__, size, sgl->length, adr_width);
+
+ dev_dbg(chan2dev(chan), "%s: mcfg.ul=%x read from &mchan->reg->cfg.ul=%x\n",
+ __func__, mcfg.ul, (unsigned int)&mchan->reg->cfg.ul);
+
+ dma_async_tx_descriptor_init(&mchan->tx_desc, chan);
+ mchan->tx_desc.tx_submit = moxart_tx_submit;
+
+ spin_unlock_irqrestore(&dma_lock, flags);
+
+ return &mchan->tx_desc;
+}
+
+static struct platform_driver moxart_driver;
+
+bool moxart_filter_fn(struct dma_chan *chan, void *param)
+{
+ if (chan->device->dev->driver == &moxart_driver.driver) {
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ unsigned int ch_req = *(unsigned int *)param;
+ dev_dbg(chan2dev(chan), "%s: mchan=%p ch_req=%d mchan->ch_num=%d\n",
+ __func__, mchan, ch_req, mchan->ch_num);
+ return ch_req == mchan->ch_num;
+ } else {
+ dev_dbg(chan2dev(chan), "%s: device not registered to this DMA engine\n",
+ __func__);
+ return false;
+ }
+}
+EXPORT_SYMBOL(moxart_filter_fn);
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ int i;
+ bool found = false;
+
+ for (i = 0; i < APB_DMA_MAX_CHANNEL; i++) {
+ if (i == mchan->ch_num
+ && !mchan->allocated) {
+ dev_dbg(chan2dev(chan), "%s: allocating channel #%d\n",
+ __func__, mchan->ch_num);
+ mchan->allocated = true;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return -ENODEV;
+
+ return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+ mchan->allocated = false;
+
+ dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+ __func__, mchan->ch_num);
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ union moxart_dma_reg_cfg mcfg;
+ unsigned long flags;
+
+ dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+ spin_lock_irqsave(&dma_lock, flags);
+
+ mcfg.ul = readl(&mchan->reg->cfg.ul);
+ mcfg.ul |= APB_DMA_ENABLE;
+ writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+ spin_unlock_irqrestore(&dma_lock, flags);
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ enum dma_status ret;
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_SUCCESS || !txstate)
+ return ret;
+
+ return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+ dma->device_prep_slave_sg = moxart_prep_slave_sg;
+ dma->device_alloc_chan_resources = moxart_alloc_chan_resources;
+ dma->device_free_chan_resources = moxart_free_chan_resources;
+ dma->device_issue_pending = moxart_issue_pending;
+ dma->device_tx_status = moxart_tx_status;
+ dma->device_control = moxart_control;
+ dma->dev = dev;
+
+ INIT_LIST_HEAD(&dma->channels);
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+ struct device *dev = devid;
+ struct moxart_dma_chan *mchan = &mdc->slave_chans[0];
+ unsigned int i;
+ union moxart_dma_reg_cfg mcfg;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+ if (mchan->allocated) {
+ mcfg.ul = readl(&mchan->reg->cfg.ul);
+ if (mcfg.ul & APB_DMA_FIN_INT_STS) {
+ mcfg.ul &= ~APB_DMA_FIN_INT_STS;
+ dma_cookie_complete(&mchan->tx_desc);
+ }
+ if (mcfg.ul & APB_DMA_ERR_INT_STS) {
+ mcfg.ul &= ~APB_DMA_ERR_INT_STS;
+ mchan->error_flag = 1;
+ }
+ if (mchan->callback) {
+ dev_dbg(dev, "%s: call callback for mchan=%p\n",
+ __func__, mchan);
+ mchan->callback(mchan->callback_param);
+ }
+ mchan->error_flag = 0;
+ writel(mcfg.ul, &mchan->reg->cfg.ul);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction moxart_dma_irq = {
+ .name = "moxart-dma-engine",
+ .flags = IRQF_DISABLED,
+ .handler = moxart_dma_interrupt,
+};
+
+static int moxart_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource res_dma;
+ static void __iomem *dma_base_addr;
+ int ret, i;
+ unsigned int irq;
+ struct moxart_dma_chan *mchan;
+
+ mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+ if (!mdc) {
+ dev_err(dev, "can't allocate DMA container\n");
+ return -ENOMEM;
+ }
+
+ ret = of_address_to_resource(node, 0, &res_dma);
+ if (ret) {
+ dev_err(dev, "can't get DMA base resource\n");
+ return ret;
+ }
+
+ irq = irq_of_parse_and_map(node, 0);
+
+ dma_base_addr = devm_ioremap_resource(dev, &res_dma);
+ if (IS_ERR(dma_base_addr)) {
+ dev_err(dev, "devm_ioremap_resource failed\n");
+ return PTR_ERR(dma_base_addr);
+ }
+
+ mdc->ctlr = pdev->id;
+
+ dma_cap_zero(mdc->dma_slave.cap_mask);
+ dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+
+ moxart_dma_init(&mdc->dma_slave, dev);
+
+ mchan = &mdc->slave_chans[0];
+ for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+ mchan->ch_num = i;
+ mchan->reg = (struct moxart_dma_reg *)(dma_base_addr + 0x80
+ + i * sizeof(struct moxart_dma_reg));
+ mchan->callback = NULL;
+ mchan->allocated = 0;
+ mchan->callback_param = NULL;
+
+ dma_cookie_init(&mchan->chan);
+ mchan->chan.device = &mdc->dma_slave;
+ list_add_tail(&mchan->chan.device_node,
+ &mdc->dma_slave.channels);
+
+ dev_dbg(dev, "%s: mchans[%d]: mchan->ch_num=%d mchan->reg=%p\n",
+ __func__, i, mchan->ch_num, mchan->reg);
+ }
+
+ ret = dma_async_device_register(&mdc->dma_slave);
+ platform_set_drvdata(pdev, mdc);
+
+ moxart_dma_irq.dev_id = dev;
+ setup_irq(irq, &moxart_dma_irq);
+
+ dev_dbg(dev, "%s: IRQ=%d\n", __func__, irq);
+
+ return ret;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+ struct moxart_dma_container *m = dev_get_drvdata(&pdev->dev);
+ dma_async_device_unregister(&m->dma_slave);
+ return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+ { .compatible = "moxa,moxart-dma" },
+ { }
+};
+
+static struct platform_driver moxart_driver = {
+ .probe = moxart_probe,
+ .remove = moxart_remove,
+ .driver = {
+ .name = "moxart-dma-engine",
+ .owner = THIS_MODULE,
+ .of_match_table = moxart_dma_match,
+ },
+};
+
+static int moxart_init(void)
+{
+ return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+ platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <[email protected]>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/moxart-dma.h b/drivers/dma/moxart-dma.h
new file mode 100644
index 0000000..a37b13f
--- /dev/null
+++ b/drivers/dma/moxart-dma.h
@@ -0,0 +1,188 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <[email protected]>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __DMA_MOXART_H
+#define __DMA_MOXART_H
+
+#define APB_DMA_MAX_CHANNEL 4
+
+union moxart_dma_reg_cfg {
+
+#define APB_DMA_ENABLE BIT(0)
+#define APB_DMA_FIN_INT_STS BIT(1)
+#define APB_DMA_FIN_INT_EN BIT(2)
+#define APB_DMA_BURST_MODE BIT(3)
+#define APB_DMA_ERR_INT_STS BIT(4)
+#define APB_DMA_ERR_INT_EN BIT(5)
+#define APB_DMA_SOURCE_AHB BIT(6)
+#define APB_DMA_SOURCE_APB 0
+#define APB_DMA_DEST_AHB BIT(7)
+#define APB_DMA_DEST_APB 0
+#define APB_DMA_SOURCE_INC_0 0
+#define APB_DMA_SOURCE_INC_1_4 0x100
+#define APB_DMA_SOURCE_INC_2_8 0x200
+#define APB_DMA_SOURCE_INC_4_16 0x300
+#define APB_DMA_SOURCE_DEC_1_4 0x500
+#define APB_DMA_SOURCE_DEC_2_8 0x600
+#define APB_DMA_SOURCE_DEC_4_16 0x700
+#define APB_DMA_SOURCE_INC_MASK 0x700
+#define APB_DMA_DEST_INC_0 0
+#define APB_DMA_DEST_INC_1_4 0x1000
+#define APB_DMA_DEST_INC_2_8 0x2000
+#define APB_DMA_DEST_INC_4_16 0x3000
+#define APB_DMA_DEST_DEC_1_4 0x5000
+#define APB_DMA_DEST_DEC_2_8 0x6000
+#define APB_DMA_DEST_DEC_4_16 0x7000
+#define APB_DMA_DEST_INC_MASK 0x7000
+#define APB_DMA_DEST_REQ_NO_MASK 0xf0000
+#define APB_DMA_DATA_WIDTH_MASK 0x300000
+#define APB_DMA_DATA_WIDTH_4 0
+#define APB_DMA_DATA_WIDTH_2 0x100000
+#define APB_DMA_DATA_WIDTH_1 0x200000
+#define APB_DMA_SOURCE_REQ_NO_MASK 0xf000000
+ unsigned int ul;
+
+ struct {
+
+#define APB_DMAB_ENABLE 1
+ /* enable DMA */
+ unsigned int enable:1;
+
+#define APB_DMAB_FIN_INT_STS 1
+ /* finished interrupt status */
+ unsigned int fin_int_sts:1;
+
+#define APB_DMAB_FIN_INT_EN 1
+ /* finished interrupt enable */
+ unsigned int fin_int_en:1;
+
+#define APB_DMAB_BURST_MODE 1
+ /* burst mode */
+ unsigned int burst:1;
+
+#define APB_DMAB_ERR_INT_STS 1
+ /* error interrupt status */
+ unsigned int err_int_sts:1;
+
+#define APB_DMAB_ERR_INT_EN 1
+ /* error interrupt enable */
+ unsigned int err_int_en:1;
+
+#define APB_DMAB_SOURCE_AHB 1
+#define APB_DMAB_SOURCE_APB 0
+ /* 0:APB (device), 1:AHB (RAM) */
+ unsigned int source_sel:1;
+
+#define APB_DMAB_DEST_AHB 1
+#define APB_DMAB_DEST_APB 0
+ /* 0:APB, 1:AHB */
+ unsigned int dest_sel:1;
+
+#define APB_DMAB_SOURCE_INC_0 0
+#define APB_DMAB_SOURCE_INC_1_4 1
+#define APB_DMAB_SOURCE_INC_2_8 2
+#define APB_DMAB_SOURCE_INC_4_16 3
+#define APB_DMAB_SOURCE_DEC_1_4 5
+#define APB_DMAB_SOURCE_DEC_2_8 6
+#define APB_DMAB_SOURCE_DEC_4_16 7
+#define APB_DMAB_SOURCE_INC_MASK 7
+ /*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4 (burst=1)
+ * 010: +2 (burst=0), +8 (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4 (burst=1)
+ * 110: -2 (burst=0), -8 (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+ */
+ unsigned int source_inc:3;
+
+ unsigned int reserved1:1;
+
+#define APB_DMAB_DEST_INC_0 0
+#define APB_DMAB_DEST_INC_1_4 1
+#define APB_DMAB_DEST_INC_2_8 2
+#define APB_DMAB_DEST_INC_4_16 3
+#define APB_DMAB_DEST_DEC_1_4 5
+#define APB_DMAB_DEST_DEC_2_8 6
+#define APB_DMAB_DEST_DEC_4_16 7
+#define APB_DMAB_DEST_INC_MASK 7
+ /*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4 (burst=1)
+ * 010: +2 (burst=0), +8 (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4 (burst=1)
+ * 110: -2 (burst=0), -8 (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+ */
+ unsigned int dest_inc:3;
+
+ unsigned int reserved2:1;
+
+#define APB_DMAB_DEST_REQ_NO_MASK 15
+ /*
+ * request signal select of destination
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0: no request / grant signal
+ * 1-15: request / grant signal
+ */
+ unsigned int dest_req_no:4;
+
+#define APB_DMAB_DATA_WIDTH_MASK 3
+#define APB_DMAB_DATA_WIDTH_4 0
+#define APB_DMAB_DATA_WIDTH_2 1
+#define APB_DMAB_DATA_WIDTH_1 2
+ /*
+ * data width of transfer
+ * 00: word
+ * 01: half
+ * 10: byte
+ */
+ unsigned int data_width:2;
+
+ unsigned int reserved3:2;
+
+#define APB_DMAB_SOURCE_REQ_NO_MASK 15
+ /*
+ * request signal select of source
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0: no request / grant signal
+ * 1-15: request / grant signal
+ */
+ unsigned int source_req_no:4;
+
+ unsigned int reserved4:4;
+ } bits;
+};
+
+struct moxart_dma_reg {
+ unsigned int source_addr;
+ unsigned int dest_addr;
+#define APB_DMA_CYCLES_MASK 0x00ffffff
+ unsigned int cycles; /* depend on burst mode */
+ union moxart_dma_reg_cfg cfg;
+};
+
+#endif
--
1.8.2.1

2013-07-29 16:35:36

by Arnd Bergmann

[permalink] [raw]
Subject: Re: [PATCH v4] dmaengine: Add MOXA ART DMA engine driver

On Monday 29 July 2013, Jonas Jensen wrote:

> diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> new file mode 100644
> index 0000000..f18f0fb
> --- /dev/null
> +++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> @@ -0,0 +1,19 @@
> +MOXA ART DMA Controller
> +
> +See dma.txt first
> +
> +Required properties:
> +
> +- compatible : Must be "moxa,moxart-dma"
> +- reg : Should contain registers location and length
> +- interrupts : Should contain the interrupt number
> +- #dma-cells : see dma.txt, should be 1
> +
> +Example:
> +
> + dma: dma@90500000 {
> + compatible = "moxa,moxart-dma";
> + reg = <0x90500000 0x1000>;
> + interrupts = <24 0>;
> + #dma-cells = <1>;
> + };

The binding should really define what the one cell in the dma specifier refers
to. For all I can tell, it is a hardcoded channel number, and each channel
corresponds to exactly one slave request line.

> +static DEFINE_SPINLOCK(dma_lock);

Can't this be part of the device structure? You should not need a global lock here.

> +struct moxart_dma_container {
> + int ctlr;
> + struct dma_device dma_slave;
> + struct moxart_dma_chan slave_chans[APB_DMA_MAX_CHANNEL];
> +};
> +
> +struct moxart_dma_container *mdc;

Same here. Also, you should never have global identifiers with just three characters.
Most of your 'static' variables are already prefixed "moxart_".

> +static int moxart_slave_config(struct dma_chan *chan,
> + struct dma_slave_config *cfg)
> +{
> + struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
> + union moxart_dma_reg_cfg mcfg;
> + unsigned long flags;
> + unsigned int data_width, data_inc;
> +
> + spin_lock_irqsave(&dma_lock, flags);
> +
> + memcpy(&mchan->cfg, cfg, sizeof(mchan->cfg));
> +
> + mcfg.ul = readl(&mchan->reg->cfg.ul);
> + mcfg.bits.burst = APB_DMAB_BURST_MODE;
> +
> + switch (mchan->cfg.src_addr_width) {
> + case DMA_SLAVE_BUSWIDTH_1_BYTE:
> + data_width = APB_DMAB_DATA_WIDTH_1;
> + data_inc = APB_DMAB_DEST_INC_1_4;
> + break;
> + case DMA_SLAVE_BUSWIDTH_2_BYTES:
> + data_width = APB_DMAB_DATA_WIDTH_2;
> + data_inc = APB_DMAB_DEST_INC_2_8;
> + break;
> + default:
> + data_width = APB_DMAB_DATA_WIDTH_4;
> + data_inc = APB_DMAB_DEST_INC_4_16;
> + break;
> + }
> +
> + if (mchan->cfg.direction == DMA_MEM_TO_DEV) {
> + mcfg.bits.data_width = data_width;
> + mcfg.bits.dest_sel = APB_DMAB_DEST_APB;
> + mcfg.bits.dest_inc = APB_DMAB_DEST_INC_0;
> + mcfg.bits.source_sel = APB_DMAB_SOURCE_AHB;
> + mcfg.bits.source_inc = data_inc;
> +
> + mcfg.bits.dest_req_no = mchan->cfg.slave_id;
> + mcfg.bits.source_req_no = 0;

You must not override the "dest_req_no" and "dest_req_no" in moxart_slave_config
since they are already set by the ->xlate() function and the driver calling
slave_config generally has no knowledge of what the slave id is.

> +static struct platform_driver moxart_driver;

Please reorder the symbols so you don't need the forward declaration.

> +bool moxart_filter_fn(struct dma_chan *chan, void *param)
> +{
> + if (chan->device->dev->driver == &moxart_driver.driver) {

No need to check the driver. What you want to check instead is that
the *device* matches.

> + struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
> + unsigned int ch_req = *(unsigned int *)param;
> + dev_dbg(chan2dev(chan), "%s: mchan=%p ch_req=%d mchan->ch_num=%d\n",
> + __func__, mchan, ch_req, mchan->ch_num);
> + return ch_req == mchan->ch_num;
> + } else {
> + dev_dbg(chan2dev(chan), "%s: device not registered to this DMA engine\n",
> + __func__);
> + return false;
> + }
> +}
> +EXPORT_SYMBOL(moxart_filter_fn);

Don't export the filter function. No slave driver should rely on this, since you
have DT probing.


> diff --git a/drivers/dma/moxart-dma.h b/drivers/dma/moxart-dma.h
> new file mode 100644
> index 0000000..a37b13f
> --- /dev/null
> +++ b/drivers/dma/moxart-dma.h

You don't need a separate file here, just move the contents into moxart-dma.c

> +union moxart_dma_reg_cfg {
> +
> +#define APB_DMA_ENABLE BIT(0)
> +#define APB_DMA_FIN_INT_STS BIT(1)
> +#define APB_DMA_FIN_INT_EN BIT(2)
> +#define APB_DMA_BURST_MODE BIT(3)
> +#define APB_DMA_ERR_INT_STS BIT(4)
> +#define APB_DMA_ERR_INT_EN BIT(5)
> +#define APB_DMA_SOURCE_AHB BIT(6)
> +#define APB_DMA_SOURCE_APB 0
> +#define APB_DMA_DEST_AHB BIT(7)
> +#define APB_DMA_DEST_APB 0
> +#define APB_DMA_SOURCE_INC_0 0
> +#define APB_DMA_SOURCE_INC_1_4 0x100
> +#define APB_DMA_SOURCE_INC_2_8 0x200
> +#define APB_DMA_SOURCE_INC_4_16 0x300
> +#define APB_DMA_SOURCE_DEC_1_4 0x500
> +#define APB_DMA_SOURCE_DEC_2_8 0x600
> +#define APB_DMA_SOURCE_DEC_4_16 0x700
> +#define APB_DMA_SOURCE_INC_MASK 0x700
> +#define APB_DMA_DEST_INC_0 0
> +#define APB_DMA_DEST_INC_1_4 0x1000
> +#define APB_DMA_DEST_INC_2_8 0x2000
> +#define APB_DMA_DEST_INC_4_16 0x3000
> +#define APB_DMA_DEST_DEC_1_4 0x5000
> +#define APB_DMA_DEST_DEC_2_8 0x6000
> +#define APB_DMA_DEST_DEC_4_16 0x7000
> +#define APB_DMA_DEST_INC_MASK 0x7000
> +#define APB_DMA_DEST_REQ_NO_MASK 0xf0000
> +#define APB_DMA_DATA_WIDTH_MASK 0x300000
> +#define APB_DMA_DATA_WIDTH_4 0
> +#define APB_DMA_DATA_WIDTH_2 0x100000
> +#define APB_DMA_DATA_WIDTH_1 0x200000
> +#define APB_DMA_SOURCE_REQ_NO_MASK 0xf000000
> + unsigned int ul;
> +
> + struct {
> +
> +#define APB_DMAB_ENABLE 1
> + /* enable DMA */
> + unsigned int enable:1;
> +
> +#define APB_DMAB_FIN_INT_STS 1
> + /* finished interrupt status */
> + unsigned int fin_int_sts:1;

The bit numbers don't actually match here if you build the kernel as
big-endian. You cannot use bitfields for hw data structures.

While you are here, get rid of the silly 'BIT' macro use as well.
Using hexadecimal literals is much clearer and you do that for
some fields anyway.

Arnd

2013-08-02 12:03:54

by Jonas Jensen

[permalink] [raw]
Subject: [PATCH v5] dmaengine: Add MOXA ART DMA engine driver

Add dmaengine driver for MOXA ART SoCs.

Signed-off-by: Jonas Jensen <[email protected]>
---

Notes:
Changes since v4:

1. use DT probing / remove EXPORT_SYMBOL(moxart_filter_fn)
2. remove struct moxart_dma_reg_cfg
3. refactor and use hex literals
4. moxart_dma_filter_fn(): compare device instead of driver
5. remove moxart-dma.h
6. move spinlock to moxart_dma_container
7. use u32 instead of unsigned int (registers)
8. use platform_get_resource()
9. remove use of BIT()

device tree bindings document:
10. describe single cell #dma-cells property

Applies to next-20130802

.../devicetree/bindings/dma/moxa,moxart-dma.txt | 20 +
drivers/dma/Kconfig | 7 +
drivers/dma/Makefile | 1 +
drivers/dma/moxart-dma.c | 589 +++++++++++++++++++++
4 files changed, 617 insertions(+)
create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
create mode 100644 drivers/dma/moxart-dma.c

diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 0000000..9a4db43
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,20 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible : Must be "moxa,moxart-dma"
+- reg : Should contain registers location and length
+- interrupts : Should contain the interrupt number
+- #dma-cells : Should be 1, this is a single cell used to
+ specify a channel number between 0-3
+
+Example:
+
+ dma: dma@90500000 {
+ compatible = "moxa,moxart-dma";
+ reg = <0x90500000 0x1000>;
+ interrupts = <24 0>;
+ #dma-cells = <1>;
+ };
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6825957..56c3aaa 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -300,6 +300,13 @@ config DMA_JZ4740
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS

+config MOXART_DMA
+ tristate "MOXART DMA support"
+ depends on ARCH_MOXART
+ select DMA_ENGINE
+ help
+ Enable support for the MOXA ART SoC DMA controller.
+
config DMA_ENGINE
bool

diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 5e0f2ef..470c11b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -39,3 +39,4 @@ obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
obj-$(CONFIG_DMA_OMAP) += omap-dma.o
obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..708c238
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,589 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <[email protected]>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+
+#define APB_DMA_MAX_CHANNEL 4
+
+#define APB_DMA_ENABLE 0x1
+#define APB_DMA_FIN_INT_STS 0x2
+#define APB_DMA_FIN_INT_EN 0x4
+#define APB_DMA_BURST_MODE 0x8
+#define APB_DMA_ERR_INT_STS 0x10
+#define APB_DMA_ERR_INT_EN 0x20
+
+/*
+ * unset to select APB source
+ * set to select AHB source
+ */
+#define APB_DMA_SOURCE_SELECT 0x40
+
+/*
+ * unset to select APB destination
+ * set to select AHB destination
+ */
+#define APB_DMA_DEST_SELECT 0x80
+
+#define APB_DMA_SOURCE 0x100
+#define APB_DMA_SOURCE_MASK 0x700
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4 (burst=1)
+ * 010: +2 (burst=0), +8 (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4 (burst=1)
+ * 110: -2 (burst=0), -8 (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+ */
+#define APB_DMA_SOURCE_INC_0 0
+#define APB_DMA_SOURCE_INC_1_4 0x100
+#define APB_DMA_SOURCE_INC_2_8 0x200
+#define APB_DMA_SOURCE_INC_4_16 0x300
+#define APB_DMA_SOURCE_DEC_1_4 0x500
+#define APB_DMA_SOURCE_DEC_2_8 0x600
+#define APB_DMA_SOURCE_DEC_4_16 0x700
+
+#define APB_DMA_DEST 0x1000
+#define APB_DMA_DEST_MASK 0x7000
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4 (burst=1)
+ * 010: +2 (burst=0), +8 (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4 (burst=1)
+ * 110: -2 (burst=0), -8 (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+*/
+#define APB_DMA_DEST_INC_0 0
+#define APB_DMA_DEST_INC_1_4 0x1000
+#define APB_DMA_DEST_INC_2_8 0x2000
+#define APB_DMA_DEST_INC_4_16 0x3000
+#define APB_DMA_DEST_DEC_1_4 0x5000
+#define APB_DMA_DEST_DEC_2_8 0x6000
+#define APB_DMA_DEST_DEC_4_16 0x7000
+
+/*
+ * request signal select of destination
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0: no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_DEST_REQ_NO 0x10000
+#define APB_DMA_DEST_REQ_NO_MASK 0xf0000
+
+#define APB_DMA_DATA_WIDTH 0x100000
+#define APB_DMA_DATA_WIDTH_MASK 0x300000
+/*
+ * data width of transfer
+ * 00: word
+ * 01: half
+ * 10: byte
+ */
+#define APB_DMA_DATA_WIDTH_4 0
+#define APB_DMA_DATA_WIDTH_2 0x100000
+#define APB_DMA_DATA_WIDTH_1 0x200000
+
+/*
+ * request signal select of source
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0: no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_SOURCE_REQ_NO 0x1000000
+#define APB_DMA_SOURCE_REQ_NO_MASK 0xf000000
+
+struct moxart_dma_reg {
+ u32 source_addr;
+ u32 dest_addr;
+#define APB_DMA_CYCLES_MASK 0x00ffffff
+ u32 cycles; /* depend on burst mode */
+ u32 ctrl;
+};
+
+struct moxart_dma_chan {
+ struct dma_chan chan;
+ int ch_num;
+ bool allocated;
+ int error_flag;
+ struct moxart_dma_reg *reg;
+ void (*callback)(void *param);
+ void *callback_param;
+ struct completion dma_complete;
+ struct dma_slave_config cfg;
+ struct dma_async_tx_descriptor tx_desc;
+};
+
+struct moxart_dma_container {
+ int ctlr;
+ struct dma_device dma_slave;
+ struct moxart_dma_chan slave_chans[APB_DMA_MAX_CHANNEL];
+ spinlock_t dma_lock;
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+ return &chan->dev->device;
+}
+
+static inline struct moxart_dma_container
+*to_dma_container(struct dma_device *d)
+{
+ return container_of(d, struct moxart_dma_container, dma_slave);
+}
+
+static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct moxart_dma_chan, chan);
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *ch = to_moxart_dma_chan(chan);
+ struct moxart_dma_container *c = to_dma_container(ch->chan.device);
+ u32 ctrl;
+ unsigned long flags;
+
+ dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
+
+ spin_lock_irqsave(&c->dma_lock, flags);
+
+ ctrl = readl(&ch->reg->ctrl);
+ ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+ writel(ctrl, &ch->reg->ctrl);
+
+ spin_unlock_irqrestore(&c->dma_lock, flags);
+
+ return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *cfg)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+ u32 ctrl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mc->dma_lock, flags);
+
+ memcpy(&mchan->cfg, cfg, sizeof(mchan->cfg));
+
+ ctrl = readl(&mchan->reg->ctrl);
+ ctrl |= APB_DMA_BURST_MODE;
+ ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
+ ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
+
+ switch (mchan->cfg.src_addr_width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ ctrl |= APB_DMA_DATA_WIDTH_1;
+ if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+ ctrl |= APB_DMA_DEST_INC_1_4;
+ else
+ ctrl |= APB_DMA_SOURCE_INC_1_4;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ ctrl |= APB_DMA_DATA_WIDTH_2;
+ if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+ ctrl |= APB_DMA_DEST_INC_2_8;
+ else
+ ctrl |= APB_DMA_SOURCE_INC_2_8;
+ break;
+ default:
+ ctrl &= ~APB_DMA_DATA_WIDTH;
+ if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+ ctrl |= APB_DMA_DEST_INC_4_16;
+ else
+ ctrl |= APB_DMA_SOURCE_INC_4_16;
+ break;
+ }
+
+ if (mchan->cfg.direction == DMA_MEM_TO_DEV) {
+ ctrl &= ~APB_DMA_DEST_SELECT;
+ ctrl |= APB_DMA_SOURCE_SELECT;
+ ctrl |= (mchan->cfg.slave_id << 16 &
+ APB_DMA_DEST_REQ_NO_MASK);
+ } else {
+ ctrl |= APB_DMA_DEST_SELECT;
+ ctrl &= ~APB_DMA_SOURCE_SELECT;
+ ctrl |= (mchan->cfg.slave_id << 24 &
+ APB_DMA_SOURCE_REQ_NO_MASK);
+ }
+
+ writel(ctrl, &mchan->reg->ctrl);
+
+ spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+ return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ int ret = 0;
+ struct dma_slave_config *config;
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ moxart_terminate_all(chan);
+ break;
+ case DMA_SLAVE_CONFIG:
+ config = (struct dma_slave_config *)arg;
+ ret = moxart_slave_config(chan, config);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ return ret;
+}
+
+static dma_cookie_t moxart_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(tx->chan);
+ struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+ dma_cookie_t cookie;
+ u32 ctrl;
+ unsigned long flags;
+
+ mchan->callback = tx->callback;
+ mchan->callback_param = tx->callback_param;
+ mchan->error_flag = 0;
+
+ dev_dbg(chan2dev(tx->chan), "%s: mchan=%p mchan->ch_num=%d mchan->reg=%p\n",
+ __func__, mchan, mchan->ch_num, mchan->reg);
+
+ spin_lock_irqsave(&mc->dma_lock, flags);
+
+ cookie = dma_cookie_assign(tx);
+
+ ctrl = readl(&mchan->reg->ctrl);
+ ctrl |= (APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+ writel(ctrl, &mchan->reg->ctrl);
+
+ spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+ return cookie;
+}
+
+static struct dma_async_tx_descriptor
+*moxart_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_transfer_direction direction,
+ unsigned long tx_flags, void *context)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+ unsigned long flags;
+ unsigned int size, adr_width;
+
+ spin_lock_irqsave(&mc->dma_lock, flags);
+
+ if (direction == DMA_MEM_TO_DEV) {
+ writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+ &mchan->reg->source_addr);
+ writel(mchan->cfg.dst_addr, &mchan->reg->dest_addr);
+ adr_width = mchan->cfg.src_addr_width;
+ } else {
+ writel(mchan->cfg.src_addr, &mchan->reg->source_addr);
+ writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+ &mchan->reg->dest_addr);
+ adr_width = mchan->cfg.dst_addr_width;
+ }
+
+ size = sgl->length >> adr_width;
+
+ /*
+ * size is 4 on 64 bytes copied, i.e. once cycle copies 16 bytes
+ * ( when data_width == APB_DMAB_DATA_WIDTH_4 )
+ */
+ writel(size, &mchan->reg->cycles);
+
+ dev_dbg(chan2dev(chan), "%s: set %d DMA cycles (sgl->length=%d adr_width=%d)\n",
+ __func__, size, sgl->length, adr_width);
+
+ dma_async_tx_descriptor_init(&mchan->tx_desc, chan);
+ mchan->tx_desc.tx_submit = moxart_tx_submit;
+
+ spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+ return &mchan->tx_desc;
+}
+
+bool moxart_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+
+ if (chan->device->dev == mc->dma_slave.dev) {
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ unsigned int ch_req = *(unsigned int *)param;
+ dev_dbg(chan2dev(chan), "%s: mchan=%p ch_req=%d mchan->ch_num=%d\n",
+ __func__, mchan, ch_req, mchan->ch_num);
+ return ch_req == mchan->ch_num;
+ } else {
+ dev_dbg(chan2dev(chan), "%s: device not registered to this DMA engine\n",
+ __func__);
+ return false;
+ }
+}
+
+static struct of_dma_filter_info moxart_dma_info = {
+ .filter_fn = moxart_dma_filter_fn,
+};
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ int i;
+ bool found = false;
+
+ for (i = 0; i < APB_DMA_MAX_CHANNEL; i++) {
+ if (i == mchan->ch_num
+ && !mchan->allocated) {
+ dev_dbg(chan2dev(chan), "%s: allocating channel #%d\n",
+ __func__, mchan->ch_num);
+ mchan->allocated = true;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return -ENODEV;
+
+ return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+ mchan->allocated = false;
+
+ dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+ __func__, mchan->ch_num);
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+ u32 ctrl;
+ unsigned long flags;
+
+ dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+ spin_lock_irqsave(&mc->dma_lock, flags);
+
+ ctrl = readl(&mchan->reg->ctrl);
+ ctrl |= APB_DMA_ENABLE;
+ writel(ctrl, &mchan->reg->ctrl);
+
+ spin_unlock_irqrestore(&mc->dma_lock, flags);
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ enum dma_status ret;
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_SUCCESS || !txstate)
+ return ret;
+
+ return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+ dma->device_prep_slave_sg = moxart_prep_slave_sg;
+ dma->device_alloc_chan_resources = moxart_alloc_chan_resources;
+ dma->device_free_chan_resources = moxart_free_chan_resources;
+ dma->device_issue_pending = moxart_issue_pending;
+ dma->device_tx_status = moxart_tx_status;
+ dma->device_control = moxart_control;
+ dma->dev = dev;
+
+ INIT_LIST_HEAD(&dma->channels);
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+ struct moxart_dma_container *mc = to_dma_container(devid);
+ struct moxart_dma_chan *mchan = &mc->slave_chans[0];
+ unsigned int i;
+ u32 ctrl;
+
+ pr_debug("%s\n", __func__);
+
+ for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+ if (mchan->allocated) {
+ ctrl = readl(&mchan->reg->ctrl);
+ if (ctrl & APB_DMA_FIN_INT_STS) {
+ ctrl &= ~APB_DMA_FIN_INT_STS;
+ dma_cookie_complete(&mchan->tx_desc);
+ }
+ if (ctrl & APB_DMA_ERR_INT_STS) {
+ ctrl &= ~APB_DMA_ERR_INT_STS;
+ mchan->error_flag = 1;
+ }
+ if (mchan->callback) {
+ pr_debug("%s: call callback for mchan=%p\n",
+ __func__, mchan);
+ mchan->callback(mchan->callback_param);
+ }
+ mchan->error_flag = 0;
+ writel(ctrl, &mchan->reg->ctrl);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction moxart_dma_irq = {
+ .name = "moxart-dma-engine",
+ .flags = IRQF_DISABLED,
+ .handler = moxart_dma_interrupt,
+};
+
+static int moxart_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource *res;
+ static void __iomem *dma_base_addr;
+ int ret, i;
+ unsigned int irq;
+ struct moxart_dma_chan *mchan;
+ struct moxart_dma_container *mdc;
+
+ mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+ if (!mdc) {
+ dev_err(dev, "can't allocate DMA container\n");
+ return -ENOMEM;
+ }
+
+ irq = irq_of_parse_and_map(node, 0);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dma_base_addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(dma_base_addr)) {
+ dev_err(dev, "devm_ioremap_resource failed\n");
+ return PTR_ERR(dma_base_addr);
+ }
+
+ mdc->ctlr = pdev->id;
+ spin_lock_init(&mdc->dma_lock);
+
+ dma_cap_zero(mdc->dma_slave.cap_mask);
+ dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+
+ moxart_dma_init(&mdc->dma_slave, dev);
+
+ mchan = &mdc->slave_chans[0];
+ for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+ mchan->ch_num = i;
+ mchan->reg = (struct moxart_dma_reg *)(dma_base_addr + 0x80
+ + i * sizeof(struct moxart_dma_reg));
+ mchan->callback = NULL;
+ mchan->allocated = 0;
+ mchan->callback_param = NULL;
+
+ dma_cookie_init(&mchan->chan);
+ mchan->chan.device = &mdc->dma_slave;
+ list_add_tail(&mchan->chan.device_node,
+ &mdc->dma_slave.channels);
+
+ dev_dbg(dev, "%s: mchans[%d]: mchan->ch_num=%d mchan->reg=%p\n",
+ __func__, i, mchan->ch_num, mchan->reg);
+ }
+
+ ret = dma_async_device_register(&mdc->dma_slave);
+ platform_set_drvdata(pdev, mdc);
+
+ of_dma_controller_register(node, of_dma_simple_xlate, &moxart_dma_info);
+
+ moxart_dma_irq.dev_id = &mdc->dma_slave;
+ setup_irq(irq, &moxart_dma_irq);
+
+ dev_dbg(dev, "%s: IRQ=%d\n", __func__, irq);
+
+ return ret;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+ struct moxart_dma_container *m = dev_get_drvdata(&pdev->dev);
+ dma_async_device_unregister(&m->dma_slave);
+ return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+ { .compatible = "moxa,moxart-dma" },
+ { }
+};
+
+static struct platform_driver moxart_driver = {
+ .probe = moxart_probe,
+ .remove = moxart_remove,
+ .driver = {
+ .name = "moxart-dma-engine",
+ .owner = THIS_MODULE,
+ .of_match_table = moxart_dma_match,
+ },
+};
+
+static int moxart_init(void)
+{
+ return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+ platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <[email protected]>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
--
1.8.2.1

2013-08-02 12:28:30

by Jonas Jensen

[permalink] [raw]
Subject: Re: [PATCH v4] dmaengine: Add MOXA ART DMA engine driver

Hi Arnd,

Thanks for the replies. I think what you mention should now be fixed.

Except one thing where I still have questions:

On 29 July 2013 18:35, Arnd Bergmann <[email protected]> wrote:
> You must not override the "dest_req_no" and "dest_req_no" in moxart_slave_config
> since they are already set by the ->xlate() function and the driver calling
> slave_config generally has no knowledge of what the slave id is.

MMC now has a device tree node:

mmc: mmc@98e00000 {
compatible = "moxa,moxart-mmc";
reg = <0x98e00000 0x5C>;
interrupts = <5 0>;
clocks = <&coreclk>;
dmas = <&dma 0>,
<&dma 1>;
dma-names = "tx", "rx";
};

.. where the driver requests channel 0-1 and sets cfg.slave_id =
APB_DMA_SD_REQ_NO for both.

Perhaps this is not how slave_id is intended to be used?

Maybe it would be more appropriate to have two DMA cells?

APB_DMA_SD_REQ_NO can then be moved from driver code to DT.

Best regards,
Jonas

2013-08-02 13:29:23

by Jonas Jensen

[permalink] [raw]
Subject: [PATCH v6] dmaengine: Add MOXA ART DMA engine driver

Add dmaengine driver for MOXA ART SoCs.

Signed-off-by: Jonas Jensen <[email protected]>
---

Notes:
Preemptively submitting a new version that has the previously
mentioned two cell xlate.

Changes since v5:

1. add line request number and use two cell xlate

device tree bindings document:
2. update description, describe the two cells of #dma-cells

Applies to next-20130802

.../devicetree/bindings/dma/moxa,moxart-dma.txt | 21 +
drivers/dma/Kconfig | 7 +
drivers/dma/Makefile | 1 +
drivers/dma/moxart-dma.c | 610 +++++++++++++++++++++
4 files changed, 639 insertions(+)
create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
create mode 100644 drivers/dma/moxart-dma.c

diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 0000000..dc2b686
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,21 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible : Must be "moxa,moxart-dma"
+- reg : Should contain registers location and length
+- interrupts : Should contain the interrupt number
+- #dma-cells : Should be 2
+ cell index 0: channel number between 0-3
+ cell index 1: line request number
+
+Example:
+
+ dma: dma@90500000 {
+ compatible = "moxa,moxart-dma";
+ reg = <0x90500000 0x1000>;
+ interrupts = <24 0>;
+ #dma-cells = <1>;
+ };
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6825957..56c3aaa 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -300,6 +300,13 @@ config DMA_JZ4740
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS

+config MOXART_DMA
+ tristate "MOXART DMA support"
+ depends on ARCH_MOXART
+ select DMA_ENGINE
+ help
+ Enable support for the MOXA ART SoC DMA controller.
+
config DMA_ENGINE
bool

diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 5e0f2ef..470c11b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -39,3 +39,4 @@ obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
obj-$(CONFIG_DMA_OMAP) += omap-dma.o
obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..3ed270f
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,610 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <[email protected]>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+
+#define APB_DMA_MAX_CHANNEL 4
+
+#define APB_DMA_ENABLE 0x1
+#define APB_DMA_FIN_INT_STS 0x2
+#define APB_DMA_FIN_INT_EN 0x4
+#define APB_DMA_BURST_MODE 0x8
+#define APB_DMA_ERR_INT_STS 0x10
+#define APB_DMA_ERR_INT_EN 0x20
+
+/*
+ * unset to select APB source
+ * set to select AHB source
+ */
+#define APB_DMA_SOURCE_SELECT 0x40
+
+/*
+ * unset to select APB destination
+ * set to select AHB destination
+ */
+#define APB_DMA_DEST_SELECT 0x80
+
+#define APB_DMA_SOURCE 0x100
+#define APB_DMA_SOURCE_MASK 0x700
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4 (burst=1)
+ * 010: +2 (burst=0), +8 (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4 (burst=1)
+ * 110: -2 (burst=0), -8 (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+ */
+#define APB_DMA_SOURCE_INC_0 0
+#define APB_DMA_SOURCE_INC_1_4 0x100
+#define APB_DMA_SOURCE_INC_2_8 0x200
+#define APB_DMA_SOURCE_INC_4_16 0x300
+#define APB_DMA_SOURCE_DEC_1_4 0x500
+#define APB_DMA_SOURCE_DEC_2_8 0x600
+#define APB_DMA_SOURCE_DEC_4_16 0x700
+
+#define APB_DMA_DEST 0x1000
+#define APB_DMA_DEST_MASK 0x7000
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4 (burst=1)
+ * 010: +2 (burst=0), +8 (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4 (burst=1)
+ * 110: -2 (burst=0), -8 (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+*/
+#define APB_DMA_DEST_INC_0 0
+#define APB_DMA_DEST_INC_1_4 0x1000
+#define APB_DMA_DEST_INC_2_8 0x2000
+#define APB_DMA_DEST_INC_4_16 0x3000
+#define APB_DMA_DEST_DEC_1_4 0x5000
+#define APB_DMA_DEST_DEC_2_8 0x6000
+#define APB_DMA_DEST_DEC_4_16 0x7000
+
+/*
+ * request signal select of destination
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0: no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_DEST_REQ_NO 0x10000
+#define APB_DMA_DEST_REQ_NO_MASK 0xf0000
+
+#define APB_DMA_DATA_WIDTH 0x100000
+#define APB_DMA_DATA_WIDTH_MASK 0x300000
+/*
+ * data width of transfer
+ * 00: word
+ * 01: half
+ * 10: byte
+ */
+#define APB_DMA_DATA_WIDTH_4 0
+#define APB_DMA_DATA_WIDTH_2 0x100000
+#define APB_DMA_DATA_WIDTH_1 0x200000
+
+/*
+ * request signal select of source
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0: no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_SOURCE_REQ_NO 0x1000000
+#define APB_DMA_SOURCE_REQ_NO_MASK 0xf000000
+
+struct moxart_dma_reg {
+ u32 source_addr;
+ u32 dest_addr;
+#define APB_DMA_CYCLES_MASK 0x00ffffff
+ u32 cycles; /* depend on burst mode */
+ u32 ctrl;
+};
+
+struct moxart_dma_chan {
+ struct dma_chan chan;
+ int ch_num;
+ bool allocated;
+ int error_flag;
+ struct moxart_dma_reg *reg;
+ void (*callback)(void *param);
+ void *callback_param;
+ struct completion dma_complete;
+ struct dma_slave_config cfg;
+ struct dma_async_tx_descriptor tx_desc;
+ unsigned int line;
+};
+
+struct moxart_dma_container {
+ int ctlr;
+ struct dma_device dma_slave;
+ struct moxart_dma_chan slave_chans[APB_DMA_MAX_CHANNEL];
+ spinlock_t dma_lock;
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+ return &chan->dev->device;
+}
+
+static inline struct moxart_dma_container
+*to_dma_container(struct dma_device *d)
+{
+ return container_of(d, struct moxart_dma_container, dma_slave);
+}
+
+static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct moxart_dma_chan, chan);
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *ch = to_moxart_dma_chan(chan);
+ struct moxart_dma_container *c = to_dma_container(ch->chan.device);
+ u32 ctrl;
+ unsigned long flags;
+
+ dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
+
+ spin_lock_irqsave(&c->dma_lock, flags);
+
+ ctrl = readl(&ch->reg->ctrl);
+ ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+ writel(ctrl, &ch->reg->ctrl);
+
+ spin_unlock_irqrestore(&c->dma_lock, flags);
+
+ return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *cfg)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+ u32 ctrl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mc->dma_lock, flags);
+
+ memcpy(&mchan->cfg, cfg, sizeof(mchan->cfg));
+
+ ctrl = readl(&mchan->reg->ctrl);
+ ctrl |= APB_DMA_BURST_MODE;
+ ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
+ ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
+
+ switch (mchan->cfg.src_addr_width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ ctrl |= APB_DMA_DATA_WIDTH_1;
+ if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+ ctrl |= APB_DMA_DEST_INC_1_4;
+ else
+ ctrl |= APB_DMA_SOURCE_INC_1_4;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ ctrl |= APB_DMA_DATA_WIDTH_2;
+ if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+ ctrl |= APB_DMA_DEST_INC_2_8;
+ else
+ ctrl |= APB_DMA_SOURCE_INC_2_8;
+ break;
+ default:
+ ctrl &= ~APB_DMA_DATA_WIDTH;
+ if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+ ctrl |= APB_DMA_DEST_INC_4_16;
+ else
+ ctrl |= APB_DMA_SOURCE_INC_4_16;
+ break;
+ }
+
+ if (mchan->cfg.direction == DMA_MEM_TO_DEV) {
+ ctrl &= ~APB_DMA_DEST_SELECT;
+ ctrl |= APB_DMA_SOURCE_SELECT;
+ ctrl |= (mchan->line << 16 &
+ APB_DMA_DEST_REQ_NO_MASK);
+ } else {
+ ctrl |= APB_DMA_DEST_SELECT;
+ ctrl &= ~APB_DMA_SOURCE_SELECT;
+ ctrl |= (mchan->line << 24 &
+ APB_DMA_SOURCE_REQ_NO_MASK);
+ }
+
+ writel(ctrl, &mchan->reg->ctrl);
+
+ spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+ return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ int ret = 0;
+ struct dma_slave_config *config;
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ moxart_terminate_all(chan);
+ break;
+ case DMA_SLAVE_CONFIG:
+ config = (struct dma_slave_config *)arg;
+ ret = moxart_slave_config(chan, config);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ return ret;
+}
+
+static dma_cookie_t moxart_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(tx->chan);
+ struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+ dma_cookie_t cookie;
+ u32 ctrl;
+ unsigned long flags;
+
+ mchan->callback = tx->callback;
+ mchan->callback_param = tx->callback_param;
+ mchan->error_flag = 0;
+
+ dev_dbg(chan2dev(tx->chan), "%s: mchan=%p mchan->ch_num=%d mchan->reg=%p\n",
+ __func__, mchan, mchan->ch_num, mchan->reg);
+
+ spin_lock_irqsave(&mc->dma_lock, flags);
+
+ cookie = dma_cookie_assign(tx);
+
+ ctrl = readl(&mchan->reg->ctrl);
+ ctrl |= (APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+ writel(ctrl, &mchan->reg->ctrl);
+
+ spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+ return cookie;
+}
+
+static struct dma_async_tx_descriptor
+*moxart_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_transfer_direction direction,
+ unsigned long tx_flags, void *context)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+ unsigned long flags;
+ unsigned int size, adr_width;
+
+ spin_lock_irqsave(&mc->dma_lock, flags);
+
+ if (direction == DMA_MEM_TO_DEV) {
+ writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+ &mchan->reg->source_addr);
+ writel(mchan->cfg.dst_addr, &mchan->reg->dest_addr);
+ adr_width = mchan->cfg.src_addr_width;
+ } else {
+ writel(mchan->cfg.src_addr, &mchan->reg->source_addr);
+ writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+ &mchan->reg->dest_addr);
+ adr_width = mchan->cfg.dst_addr_width;
+ }
+
+ size = sgl->length >> adr_width;
+
+ /*
+ * size is 4 on 64 bytes copied, i.e. once cycle copies 16 bytes
+ * ( when data_width == APB_DMAB_DATA_WIDTH_4 )
+ */
+ writel(size, &mchan->reg->cycles);
+
+ dev_dbg(chan2dev(chan), "%s: set %d DMA cycles (sgl->length=%d adr_width=%d)\n",
+ __func__, size, sgl->length, adr_width);
+
+ dma_async_tx_descriptor_init(&mchan->tx_desc, chan);
+ mchan->tx_desc.tx_submit = moxart_tx_submit;
+
+ spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+ return &mchan->tx_desc;
+}
+
+bool moxart_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+
+ if (chan->device->dev == mc->dma_slave.dev) {
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ unsigned int ch_req = *(unsigned int *)param;
+ dev_dbg(chan2dev(chan), "%s: mchan=%p ch_req=%d mchan->ch_num=%d\n",
+ __func__, mchan, ch_req, mchan->ch_num);
+ return ch_req == mchan->ch_num;
+ } else {
+ dev_dbg(chan2dev(chan), "%s: device not registered to this DMA engine\n",
+ __func__);
+ return false;
+ }
+}
+
+static struct of_dma_filter_info moxart_dma_info = {
+ .filter_fn = moxart_dma_filter_fn,
+};
+
+static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct dma_chan *chan;
+ struct of_dma_filter_info *info = ofdma->of_dma_data;
+
+ if (!info || !info->filter_fn)
+ return NULL;
+
+ if (dma_spec->args_count != 2)
+ return NULL;
+
+ chan = dma_request_channel(info->dma_cap, info->filter_fn,
+ &dma_spec->args[0]);
+ if (chan)
+ to_moxart_dma_chan(chan)->line = dma_spec->args[1];
+
+ return chan;
+}
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ int i;
+ bool found = false;
+
+ for (i = 0; i < APB_DMA_MAX_CHANNEL; i++) {
+ if (i == mchan->ch_num
+ && !mchan->allocated) {
+ dev_dbg(chan2dev(chan), "%s: allocating channel #%d\n",
+ __func__, mchan->ch_num);
+ mchan->allocated = true;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return -ENODEV;
+
+ return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+ mchan->allocated = false;
+
+ dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+ __func__, mchan->ch_num);
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+ u32 ctrl;
+ unsigned long flags;
+
+ dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+ spin_lock_irqsave(&mc->dma_lock, flags);
+
+ ctrl = readl(&mchan->reg->ctrl);
+ ctrl |= APB_DMA_ENABLE;
+ writel(ctrl, &mchan->reg->ctrl);
+
+ spin_unlock_irqrestore(&mc->dma_lock, flags);
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ enum dma_status ret;
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_SUCCESS || !txstate)
+ return ret;
+
+ return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+ dma->device_prep_slave_sg = moxart_prep_slave_sg;
+ dma->device_alloc_chan_resources = moxart_alloc_chan_resources;
+ dma->device_free_chan_resources = moxart_free_chan_resources;
+ dma->device_issue_pending = moxart_issue_pending;
+ dma->device_tx_status = moxart_tx_status;
+ dma->device_control = moxart_control;
+ dma->dev = dev;
+
+ INIT_LIST_HEAD(&dma->channels);
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+ struct moxart_dma_container *mc = to_dma_container(devid);
+ struct moxart_dma_chan *mchan = &mc->slave_chans[0];
+ unsigned int i;
+ u32 ctrl;
+
+ pr_debug("%s\n", __func__);
+
+ for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+ if (mchan->allocated) {
+ ctrl = readl(&mchan->reg->ctrl);
+ if (ctrl & APB_DMA_FIN_INT_STS) {
+ ctrl &= ~APB_DMA_FIN_INT_STS;
+ dma_cookie_complete(&mchan->tx_desc);
+ }
+ if (ctrl & APB_DMA_ERR_INT_STS) {
+ ctrl &= ~APB_DMA_ERR_INT_STS;
+ mchan->error_flag = 1;
+ }
+ if (mchan->callback) {
+ pr_debug("%s: call callback for mchan=%p\n",
+ __func__, mchan);
+ mchan->callback(mchan->callback_param);
+ }
+ mchan->error_flag = 0;
+ writel(ctrl, &mchan->reg->ctrl);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction moxart_dma_irq = {
+ .name = "moxart-dma-engine",
+ .flags = IRQF_DISABLED,
+ .handler = moxart_dma_interrupt,
+};
+
+static int moxart_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource *res;
+ static void __iomem *dma_base_addr;
+ int ret, i;
+ unsigned int irq;
+ struct moxart_dma_chan *mchan;
+ struct moxart_dma_container *mdc;
+
+ mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+ if (!mdc) {
+ dev_err(dev, "can't allocate DMA container\n");
+ return -ENOMEM;
+ }
+
+ irq = irq_of_parse_and_map(node, 0);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dma_base_addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(dma_base_addr)) {
+ dev_err(dev, "devm_ioremap_resource failed\n");
+ return PTR_ERR(dma_base_addr);
+ }
+
+ mdc->ctlr = pdev->id;
+ spin_lock_init(&mdc->dma_lock);
+
+ dma_cap_zero(mdc->dma_slave.cap_mask);
+ dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+
+ moxart_dma_init(&mdc->dma_slave, dev);
+
+ mchan = &mdc->slave_chans[0];
+ for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+ mchan->ch_num = i;
+ mchan->reg = (struct moxart_dma_reg *)(dma_base_addr + 0x80
+ + i * sizeof(struct moxart_dma_reg));
+ mchan->callback = NULL;
+ mchan->allocated = 0;
+ mchan->callback_param = NULL;
+
+ dma_cookie_init(&mchan->chan);
+ mchan->chan.device = &mdc->dma_slave;
+ list_add_tail(&mchan->chan.device_node,
+ &mdc->dma_slave.channels);
+
+ dev_dbg(dev, "%s: mchans[%d]: mchan->ch_num=%d mchan->reg=%p\n",
+ __func__, i, mchan->ch_num, mchan->reg);
+ }
+
+ ret = dma_async_device_register(&mdc->dma_slave);
+ platform_set_drvdata(pdev, mdc);
+
+ of_dma_controller_register(node, moxart_of_xlate, &moxart_dma_info);
+
+ moxart_dma_irq.dev_id = &mdc->dma_slave;
+ setup_irq(irq, &moxart_dma_irq);
+
+ dev_dbg(dev, "%s: IRQ=%d\n", __func__, irq);
+
+ return ret;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+ struct moxart_dma_container *m = dev_get_drvdata(&pdev->dev);
+ dma_async_device_unregister(&m->dma_slave);
+ return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+ { .compatible = "moxa,moxart-dma" },
+ { }
+};
+
+static struct platform_driver moxart_driver = {
+ .probe = moxart_probe,
+ .remove = moxart_remove,
+ .driver = {
+ .name = "moxart-dma-engine",
+ .owner = THIS_MODULE,
+ .of_match_table = moxart_dma_match,
+ },
+};
+
+static int moxart_init(void)
+{
+ return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+ platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <[email protected]>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
--
1.8.2.1

2013-08-02 13:51:39

by Russell King - ARM Linux

[permalink] [raw]
Subject: Re: [PATCH v6] dmaengine: Add MOXA ART DMA engine driver

On Fri, Aug 02, 2013 at 03:28:45PM +0200, Jonas Jensen wrote:
> +struct moxart_dma_chan {
> + struct dma_chan chan;
> + int ch_num;
> + bool allocated;
> + int error_flag;
> + struct moxart_dma_reg *reg;
> + void (*callback)(void *param);
> + void *callback_param;
> + struct completion dma_complete;

Is this completion used anywhere?

> + struct dma_slave_config cfg;
> + struct dma_async_tx_descriptor tx_desc;
> + unsigned int line;
> +};
...
> +static dma_cookie_t moxart_tx_submit(struct dma_async_tx_descriptor *tx)
> +{
> + struct moxart_dma_chan *mchan = to_moxart_dma_chan(tx->chan);
> + struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
> + dma_cookie_t cookie;
> + u32 ctrl;
> + unsigned long flags;
> +
> + mchan->callback = tx->callback;
> + mchan->callback_param = tx->callback_param;

As 'mchan' contains the tx descriptor, I don't know why you feel that you
need to copy these.

> +static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
> +{
> + struct moxart_dma_container *mc = to_dma_container(devid);
> + struct moxart_dma_chan *mchan = &mc->slave_chans[0];
> + unsigned int i;
> + u32 ctrl;
> +
> + pr_debug("%s\n", __func__);
> +
> + for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
> + if (mchan->allocated) {
> + ctrl = readl(&mchan->reg->ctrl);
> + if (ctrl & APB_DMA_FIN_INT_STS) {
> + ctrl &= ~APB_DMA_FIN_INT_STS;
> + dma_cookie_complete(&mchan->tx_desc);
> + }
> + if (ctrl & APB_DMA_ERR_INT_STS) {
> + ctrl &= ~APB_DMA_ERR_INT_STS;
> + mchan->error_flag = 1;
> + }
> + if (mchan->callback) {
> + pr_debug("%s: call callback for mchan=%p\n",
> + __func__, mchan);
> + mchan->callback(mchan->callback_param);

Calling the callback from interrupt context is not on.

2/ Specify a completion callback. The callback routine runs in tasklet
context if the offload engine driver supports interrupts, or it is
called in application context if the operation is carried out
synchronously in software.

That can be found in Documentation/crypto/async-tx-api.txt and applies
to all DMA engine implementations (which is the underlying implementation
of the async-tx API.)

2013-08-02 14:10:55

by Mark Rutland

[permalink] [raw]
Subject: Re: [PATCH v6] dmaengine: Add MOXA ART DMA engine driver

On Fri, Aug 02, 2013 at 02:28:45PM +0100, Jonas Jensen wrote:
> Add dmaengine driver for MOXA ART SoCs.
>
> Signed-off-by: Jonas Jensen <[email protected]>
> ---
>
> Notes:
> Preemptively submitting a new version that has the previously
> mentioned two cell xlate.
>
> Changes since v5:
>
> 1. add line request number and use two cell xlate
>
> device tree bindings document:
> 2. update description, describe the two cells of #dma-cells
>
> Applies to next-20130802
>
> .../devicetree/bindings/dma/moxa,moxart-dma.txt | 21 +
> drivers/dma/Kconfig | 7 +
> drivers/dma/Makefile | 1 +
> drivers/dma/moxart-dma.c | 610 +++++++++++++++++++++
> 4 files changed, 639 insertions(+)
> create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> create mode 100644 drivers/dma/moxart-dma.c
>
> diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> new file mode 100644
> index 0000000..dc2b686
> --- /dev/null
> +++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> @@ -0,0 +1,21 @@
> +MOXA ART DMA Controller
> +
> +See dma.txt first
> +
> +Required properties:
> +
> +- compatible : Must be "moxa,moxart-dma"
> +- reg : Should contain registers location and length
> +- interrupts : Should contain the interrupt number
> +- #dma-cells : Should be 2
> + cell index 0: channel number between 0-3
> + cell index 1: line request number
> +
> +Example:
> +
> + dma: dma@90500000 {
> + compatible = "moxa,moxart-dma";
> + reg = <0x90500000 0x1000>;
> + interrupts = <24 0>;
> + #dma-cells = <1>;

This should be #dma-cells = <2>;

[...]

> +struct moxart_dma_reg {
> + u32 source_addr;
> + u32 dest_addr;
> +#define APB_DMA_CYCLES_MASK 0x00ffffff
> + u32 cycles; /* depend on burst mode */
> + u32 ctrl;
> +};

I'm not keen on relying on structs for register offsets, but at least
they're exact width u32s.

[...]

> +static int moxart_alloc_chan_resources(struct dma_chan *chan)
> +{
> + struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
> + int i;
> + bool found = false;
> +
> + for (i = 0; i < APB_DMA_MAX_CHANNEL; i++) {
> + if (i == mchan->ch_num
> + && !mchan->allocated) {
> + dev_dbg(chan2dev(chan), "%s: allocating channel #%d\n",
> + __func__, mchan->ch_num);
> + mchan->allocated = true;
> + found = true;

Why not return 0 here...

> + break;
> + }
> + }


...and always return -ENODEV here?

That way you can also get rid of the found variable.

> +
> + if (!found)
> + return -ENODEV;
> +
> + return 0;
> +}

[...]

> +static struct irqaction moxart_dma_irq = {
> + .name = "moxart-dma-engine",
> + .flags = IRQF_DISABLED,
> + .handler = moxart_dma_interrupt,
> +};
> +
> +static int moxart_probe(struct platform_device *pdev)
> +{
> + struct device *dev = &pdev->dev;
> + struct device_node *node = dev->of_node;
> + struct resource *res;
> + static void __iomem *dma_base_addr;
> + int ret, i;
> + unsigned int irq;
> + struct moxart_dma_chan *mchan;
> + struct moxart_dma_container *mdc;
> +
> + mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
> + if (!mdc) {
> + dev_err(dev, "can't allocate DMA container\n");
> + return -ENOMEM;
> + }
> +
> + irq = irq_of_parse_and_map(node, 0);

What if this fails (where irq == 0)?.

> +
> + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
> + dma_base_addr = devm_ioremap_resource(dev, res);
> + if (IS_ERR(dma_base_addr)) {
> + dev_err(dev, "devm_ioremap_resource failed\n");
> + return PTR_ERR(dma_base_addr);
> + }
> +
> + mdc->ctlr = pdev->id;
> + spin_lock_init(&mdc->dma_lock);
> +
> + dma_cap_zero(mdc->dma_slave.cap_mask);
> + dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
> +
> + moxart_dma_init(&mdc->dma_slave, dev);
> +
> + mchan = &mdc->slave_chans[0];
> + for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
> + mchan->ch_num = i;
> + mchan->reg = (struct moxart_dma_reg *)(dma_base_addr + 0x80
> + + i * sizeof(struct moxart_dma_reg));
> + mchan->callback = NULL;
> + mchan->allocated = 0;
> + mchan->callback_param = NULL;
> +
> + dma_cookie_init(&mchan->chan);
> + mchan->chan.device = &mdc->dma_slave;
> + list_add_tail(&mchan->chan.device_node,
> + &mdc->dma_slave.channels);
> +
> + dev_dbg(dev, "%s: mchans[%d]: mchan->ch_num=%d mchan->reg=%p\n",
> + __func__, i, mchan->ch_num, mchan->reg);
> + }
> +
> + ret = dma_async_device_register(&mdc->dma_slave);
> + platform_set_drvdata(pdev, mdc);
> +
> + of_dma_controller_register(node, moxart_of_xlate, &moxart_dma_info);
> +
> + moxart_dma_irq.dev_id = &mdc->dma_slave;
> + setup_irq(irq, &moxart_dma_irq);

What if this fails?

Is there any reason you can't use request_irq over setup_irq?

> +
> + dev_dbg(dev, "%s: IRQ=%d\n", __func__, irq);
> +
> + return ret;
> +}

Thanks,
Mark.

2013-08-02 19:28:43

by Arnd Bergmann

[permalink] [raw]
Subject: Re: [PATCH v4] dmaengine: Add MOXA ART DMA engine driver

On Friday 02 August 2013 14:28:28 Jonas Jensen wrote:
>
> On 29 July 2013 18:35, Arnd Bergmann <[email protected]> wrote:
> > You must not override the "dest_req_no" and "dest_req_no" in moxart_slave_config
> > since they are already set by the ->xlate() function and the driver calling
> > slave_config generally has no knowledge of what the slave id is.
>
> MMC now has a device tree node:
>
> mmc: mmc@98e00000 {
> compatible = "moxa,moxart-mmc";
> reg = <0x98e00000 0x5C>;
> interrupts = <5 0>;
> clocks = <&coreclk>;
> dmas = <&dma 0>,
> <&dma 1>;
> dma-names = "tx", "rx";
> };
>
> .. where the driver requests channel 0-1 and sets cfg.slave_id =
> APB_DMA_SD_REQ_NO for both.
>
> Perhaps this is not how slave_id is intended to be used?
>
> Maybe it would be more appropriate to have two DMA cells?
>
> APB_DMA_SD_REQ_NO can then be moved from driver code to DT.

In most drivers, you can use any channel with any request line number
and let the dmaengine driver pick a channel while you pass just the
request line (slave id) in a single cell in DT. If this does not
work, using two cells is the best approach here.

Removing APB_DMA_SD_REQ_NO from the driver code is definitely the
right approach, since that number is not something specific to the
device, but to the way it is connected to the DMA engine, which
belongs into DT.

Arnd

2013-08-05 14:38:07

by Jonas Jensen

[permalink] [raw]
Subject: [PATCH v7] dmaengine: Add MOXA ART DMA engine driver

The MOXA ART SoC has a DMA controller capable of offloading expensive
memory operations, such as large copies. This patch adds support for
the controller including four channels. Two of these are used to
handle MMC copy on the UC-7112-LX hardware. The remaining two can be
used in a future audio driver or client application.

Signed-off-by: Jonas Jensen <[email protected]>
---

Notes:
Thanks for the replies.

Changes since v6:

1. move callback from interrupt context to tasklet
2. remove callback and callback_param, use those provided by tx_desc
3. don't rely on structs for register offsets
4. remove local bool "found" variable from moxart_alloc_chan_resources()
5. check return value of irq_of_parse_and_map
6. use devm_request_irq instead of setup_irq
7. elaborate commit message

device tree bindings document:
8. in the example, change "#dma-cells" to "<2>"

Applies to next-20130805

.../devicetree/bindings/dma/moxa,moxart-dma.txt | 21 +
drivers/dma/Kconfig | 7 +
drivers/dma/Makefile | 1 +
drivers/dma/moxart-dma.c | 614 +++++++++++++++++++++
4 files changed, 643 insertions(+)
create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
create mode 100644 drivers/dma/moxart-dma.c

diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 0000000..5b9f82c
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,21 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible : Must be "moxa,moxart-dma"
+- reg : Should contain registers location and length
+- interrupts : Should contain the interrupt number
+- #dma-cells : Should be 2
+ cell index 0: channel number between 0-3
+ cell index 1: line request number
+
+Example:
+
+ dma: dma@90500000 {
+ compatible = "moxa,moxart-dma";
+ reg = <0x90500000 0x1000>;
+ interrupts = <24 0>;
+ #dma-cells = <2>;
+ };
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6825957..56c3aaa 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -300,6 +300,13 @@ config DMA_JZ4740
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS

+config MOXART_DMA
+ tristate "MOXART DMA support"
+ depends on ARCH_MOXART
+ select DMA_ENGINE
+ help
+ Enable support for the MOXA ART SoC DMA controller.
+
config DMA_ENGINE
bool

diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 5e0f2ef..470c11b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -39,3 +39,4 @@ obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
obj-$(CONFIG_DMA_OMAP) += omap-dma.o
obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..7160cc3
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,614 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <[email protected]>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+
+#define APB_DMA_MAX_CHANNEL 4
+
+#define REG_ADDRESS_SOURCE 0
+#define REG_ADDRESS_DEST 4
+#define REG_CYCLES 8
+#define REG_CTRL 12
+#define REG_CHAN_SIZE 16
+
+#define APB_DMA_ENABLE 0x1
+#define APB_DMA_FIN_INT_STS 0x2
+#define APB_DMA_FIN_INT_EN 0x4
+#define APB_DMA_BURST_MODE 0x8
+#define APB_DMA_ERR_INT_STS 0x10
+#define APB_DMA_ERR_INT_EN 0x20
+
+/*
+ * unset to select APB source
+ * set to select AHB source
+ */
+#define APB_DMA_SOURCE_SELECT 0x40
+
+/*
+ * unset to select APB destination
+ * set to select AHB destination
+ */
+#define APB_DMA_DEST_SELECT 0x80
+
+#define APB_DMA_SOURCE 0x100
+#define APB_DMA_SOURCE_MASK 0x700
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4 (burst=1)
+ * 010: +2 (burst=0), +8 (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4 (burst=1)
+ * 110: -2 (burst=0), -8 (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+ */
+#define APB_DMA_SOURCE_INC_0 0
+#define APB_DMA_SOURCE_INC_1_4 0x100
+#define APB_DMA_SOURCE_INC_2_8 0x200
+#define APB_DMA_SOURCE_INC_4_16 0x300
+#define APB_DMA_SOURCE_DEC_1_4 0x500
+#define APB_DMA_SOURCE_DEC_2_8 0x600
+#define APB_DMA_SOURCE_DEC_4_16 0x700
+
+#define APB_DMA_DEST 0x1000
+#define APB_DMA_DEST_MASK 0x7000
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4 (burst=1)
+ * 010: +2 (burst=0), +8 (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4 (burst=1)
+ * 110: -2 (burst=0), -8 (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+*/
+#define APB_DMA_DEST_INC_0 0
+#define APB_DMA_DEST_INC_1_4 0x1000
+#define APB_DMA_DEST_INC_2_8 0x2000
+#define APB_DMA_DEST_INC_4_16 0x3000
+#define APB_DMA_DEST_DEC_1_4 0x5000
+#define APB_DMA_DEST_DEC_2_8 0x6000
+#define APB_DMA_DEST_DEC_4_16 0x7000
+
+/*
+ * request signal select of destination
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0: no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_DEST_REQ_NO 0x10000
+#define APB_DMA_DEST_REQ_NO_MASK 0xf0000
+
+#define APB_DMA_DATA_WIDTH 0x100000
+#define APB_DMA_DATA_WIDTH_MASK 0x300000
+/*
+ * data width of transfer
+ * 00: word
+ * 01: half
+ * 10: byte
+ */
+#define APB_DMA_DATA_WIDTH_4 0
+#define APB_DMA_DATA_WIDTH_2 0x100000
+#define APB_DMA_DATA_WIDTH_1 0x200000
+
+/*
+ * request signal select of source
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0: no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_SOURCE_REQ_NO 0x1000000
+#define APB_DMA_SOURCE_REQ_NO_MASK 0xf000000
+#define APB_DMA_CYCLES_MASK 0x00ffffff
+
+struct moxart_dma_chan {
+ struct dma_chan chan;
+ int ch_num;
+ bool allocated;
+ int error_flag;
+ void __iomem *base;
+ struct completion dma_complete;
+ struct dma_slave_config cfg;
+ struct dma_async_tx_descriptor tx_desc;
+ unsigned int line;
+};
+
+struct moxart_dma_container {
+ int ctlr;
+ struct dma_device dma_slave;
+ struct moxart_dma_chan slave_chans[APB_DMA_MAX_CHANNEL];
+ spinlock_t dma_lock;
+ struct tasklet_struct tasklet;
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+ return &chan->dev->device;
+}
+
+static inline struct moxart_dma_container
+*to_dma_container(struct dma_device *d)
+{
+ return container_of(d, struct moxart_dma_container, dma_slave);
+}
+
+static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct moxart_dma_chan, chan);
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *ch = to_moxart_dma_chan(chan);
+ struct moxart_dma_container *c = to_dma_container(ch->chan.device);
+ u32 ctrl;
+ unsigned long flags;
+
+ dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
+
+ spin_lock_irqsave(&c->dma_lock, flags);
+
+ ctrl = readl(ch->base + REG_CTRL);
+ ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+ writel(ctrl, ch->base + REG_CTRL);
+
+ spin_unlock_irqrestore(&c->dma_lock, flags);
+
+ return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *cfg)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+ u32 ctrl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mc->dma_lock, flags);
+
+ memcpy(&mchan->cfg, cfg, sizeof(mchan->cfg));
+
+ ctrl = readl(mchan->base + REG_CTRL);
+ ctrl |= APB_DMA_BURST_MODE;
+ ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
+ ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
+
+ switch (mchan->cfg.src_addr_width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ ctrl |= APB_DMA_DATA_WIDTH_1;
+ if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+ ctrl |= APB_DMA_DEST_INC_1_4;
+ else
+ ctrl |= APB_DMA_SOURCE_INC_1_4;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ ctrl |= APB_DMA_DATA_WIDTH_2;
+ if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+ ctrl |= APB_DMA_DEST_INC_2_8;
+ else
+ ctrl |= APB_DMA_SOURCE_INC_2_8;
+ break;
+ default:
+ ctrl &= ~APB_DMA_DATA_WIDTH;
+ if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+ ctrl |= APB_DMA_DEST_INC_4_16;
+ else
+ ctrl |= APB_DMA_SOURCE_INC_4_16;
+ break;
+ }
+
+ if (mchan->cfg.direction == DMA_MEM_TO_DEV) {
+ ctrl &= ~APB_DMA_DEST_SELECT;
+ ctrl |= APB_DMA_SOURCE_SELECT;
+ ctrl |= (mchan->line << 16 &
+ APB_DMA_DEST_REQ_NO_MASK);
+ } else {
+ ctrl |= APB_DMA_DEST_SELECT;
+ ctrl &= ~APB_DMA_SOURCE_SELECT;
+ ctrl |= (mchan->line << 24 &
+ APB_DMA_SOURCE_REQ_NO_MASK);
+ }
+
+ writel(ctrl, mchan->base + REG_CTRL);
+
+ spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+ return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ int ret = 0;
+ struct dma_slave_config *config;
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ moxart_terminate_all(chan);
+ break;
+ case DMA_SLAVE_CONFIG:
+ config = (struct dma_slave_config *)arg;
+ ret = moxart_slave_config(chan, config);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ return ret;
+}
+
+static dma_cookie_t moxart_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(tx->chan);
+ struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+ dma_cookie_t cookie;
+ u32 ctrl;
+ unsigned long flags;
+
+ mchan->error_flag = 0;
+
+ dev_dbg(chan2dev(tx->chan), "%s: mchan=%p mchan->ch_num=%d mchan->base=%p\n",
+ __func__, mchan, mchan->ch_num, mchan->base);
+
+ spin_lock_irqsave(&mc->dma_lock, flags);
+
+ cookie = dma_cookie_assign(tx);
+
+ ctrl = readl(mchan->base + REG_CTRL);
+ ctrl |= (APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+ writel(ctrl, mchan->base + REG_CTRL);
+
+ spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+ return cookie;
+}
+
+static struct dma_async_tx_descriptor
+*moxart_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_transfer_direction direction,
+ unsigned long tx_flags, void *context)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+ unsigned long flags;
+ unsigned int size, adr_width;
+
+ spin_lock_irqsave(&mc->dma_lock, flags);
+
+ if (direction == DMA_MEM_TO_DEV) {
+ writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+ mchan->base + REG_ADDRESS_SOURCE);
+ writel(mchan->cfg.dst_addr, mchan->base + REG_ADDRESS_DEST);
+
+ adr_width = mchan->cfg.src_addr_width;
+ } else {
+ writel(mchan->cfg.src_addr, mchan->base + REG_ADDRESS_SOURCE);
+ writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+ mchan->base + REG_ADDRESS_DEST);
+
+ adr_width = mchan->cfg.dst_addr_width;
+ }
+
+ size = sgl->length >> adr_width;
+
+ /*
+ * size is 4 on 64 bytes copied, i.e. once cycle copies 16 bytes
+ * ( when data_width == APB_DMAB_DATA_WIDTH_4 )
+ */
+ writel(size, mchan->base + REG_CYCLES);
+
+ dev_dbg(chan2dev(chan), "%s: set %d DMA cycles (sgl->length=%d adr_width=%d)\n",
+ __func__, size, sgl->length, adr_width);
+
+ dma_async_tx_descriptor_init(&mchan->tx_desc, chan);
+ mchan->tx_desc.tx_submit = moxart_tx_submit;
+
+ spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+ return &mchan->tx_desc;
+}
+
+bool moxart_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+
+ if (chan->device->dev == mc->dma_slave.dev) {
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ unsigned int ch_req = *(unsigned int *)param;
+ dev_dbg(chan2dev(chan), "%s: mchan=%p ch_req=%d mchan->ch_num=%d\n",
+ __func__, mchan, ch_req, mchan->ch_num);
+ return ch_req == mchan->ch_num;
+ } else {
+ dev_dbg(chan2dev(chan), "%s: device not registered to this DMA engine\n",
+ __func__);
+ return false;
+ }
+}
+
+static struct of_dma_filter_info moxart_dma_info = {
+ .filter_fn = moxart_dma_filter_fn,
+};
+
+static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct dma_chan *chan;
+ struct of_dma_filter_info *info = ofdma->of_dma_data;
+
+ if (!info || !info->filter_fn)
+ return NULL;
+
+ if (dma_spec->args_count != 2)
+ return NULL;
+
+ chan = dma_request_channel(info->dma_cap, info->filter_fn,
+ &dma_spec->args[0]);
+ if (chan)
+ to_moxart_dma_chan(chan)->line = dma_spec->args[1];
+
+ return chan;
+}
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ int i;
+
+ for (i = 0; i < APB_DMA_MAX_CHANNEL; i++) {
+ if (i == mchan->ch_num
+ && !mchan->allocated) {
+ dev_dbg(chan2dev(chan), "%s: allocating channel #%d\n",
+ __func__, mchan->ch_num);
+ mchan->allocated = true;
+ return 0;
+ }
+ }
+
+ return -ENODEV;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+ mchan->allocated = false;
+
+ dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+ __func__, mchan->ch_num);
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+ u32 ctrl;
+ unsigned long flags;
+
+ dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+ spin_lock_irqsave(&mc->dma_lock, flags);
+
+ ctrl = readl(mchan->base + REG_CTRL);
+ ctrl |= APB_DMA_ENABLE;
+ writel(ctrl, mchan->base + REG_CTRL);
+
+ spin_unlock_irqrestore(&mc->dma_lock, flags);
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ enum dma_status ret;
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_SUCCESS || !txstate)
+ return ret;
+
+ return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+ dma->device_prep_slave_sg = moxart_prep_slave_sg;
+ dma->device_alloc_chan_resources = moxart_alloc_chan_resources;
+ dma->device_free_chan_resources = moxart_free_chan_resources;
+ dma->device_issue_pending = moxart_issue_pending;
+ dma->device_tx_status = moxart_tx_status;
+ dma->device_control = moxart_control;
+ dma->dev = dev;
+
+ INIT_LIST_HEAD(&dma->channels);
+}
+
+static void moxart_dma_tasklet(unsigned long data)
+{
+ struct moxart_dma_container *mc = (void *)data;
+ struct moxart_dma_chan *ch = &mc->slave_chans[0];
+ unsigned int i;
+
+ pr_debug("%s\n", __func__);
+
+ for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+ if (ch->allocated && ch->tx_desc.callback) {
+ pr_debug("%s: call callback for ch=%p\n",
+ __func__, ch);
+ ch->tx_desc.callback(ch->tx_desc.callback_param);
+ }
+ }
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+ struct moxart_dma_container *mc = devid;
+ struct moxart_dma_chan *mchan = &mc->slave_chans[0];
+ unsigned int i;
+ u32 ctrl;
+
+ pr_debug("%s\n", __func__);
+
+ for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+ if (mchan->allocated) {
+ ctrl = readl(mchan->base + REG_CTRL);
+ if (ctrl & APB_DMA_FIN_INT_STS) {
+ ctrl &= ~APB_DMA_FIN_INT_STS;
+ dma_cookie_complete(&mchan->tx_desc);
+ }
+ if (ctrl & APB_DMA_ERR_INT_STS) {
+ ctrl &= ~APB_DMA_ERR_INT_STS;
+ mchan->error_flag = 1;
+ }
+ mchan->error_flag = 0;
+ writel(ctrl, mchan->base + REG_CTRL);
+ }
+ }
+
+ tasklet_schedule(&mc->tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static int moxart_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource *res;
+ static void __iomem *dma_base_addr;
+ int ret, i;
+ unsigned int irq;
+ struct moxart_dma_chan *mchan;
+ struct moxart_dma_container *mdc;
+
+ mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+ if (!mdc) {
+ dev_err(dev, "can't allocate DMA container\n");
+ return -ENOMEM;
+ }
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq <= 0) {
+ dev_err(dev, "irq_of_parse_and_map failed\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dma_base_addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(dma_base_addr)) {
+ dev_err(dev, "devm_ioremap_resource failed\n");
+ return PTR_ERR(dma_base_addr);
+ }
+
+ mdc->ctlr = pdev->id;
+ spin_lock_init(&mdc->dma_lock);
+
+ dma_cap_zero(mdc->dma_slave.cap_mask);
+ dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+
+ moxart_dma_init(&mdc->dma_slave, dev);
+
+ mchan = &mdc->slave_chans[0];
+ for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+ mchan->ch_num = i;
+ mchan->base = dma_base_addr + 0x80 + i * REG_CHAN_SIZE;
+ mchan->allocated = 0;
+
+ dma_cookie_init(&mchan->chan);
+ mchan->chan.device = &mdc->dma_slave;
+ list_add_tail(&mchan->chan.device_node,
+ &mdc->dma_slave.channels);
+
+ dev_dbg(dev, "%s: mchans[%d]: mchan->ch_num=%d mchan->base=%p\n",
+ __func__, i, mchan->ch_num, mchan->base);
+ }
+
+ ret = dma_async_device_register(&mdc->dma_slave);
+ platform_set_drvdata(pdev, mdc);
+
+ of_dma_controller_register(node, moxart_of_xlate, &moxart_dma_info);
+
+ tasklet_init(&mdc->tasklet, moxart_dma_tasklet, (unsigned long)mdc);
+
+ devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
+ "moxart-dma-engine", mdc);
+
+ dev_dbg(dev, "%s: IRQ=%d\n", __func__, irq);
+
+ return ret;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+ struct moxart_dma_container *m = dev_get_drvdata(&pdev->dev);
+ dma_async_device_unregister(&m->dma_slave);
+ return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+ { .compatible = "moxa,moxart-dma" },
+ { }
+};
+
+static struct platform_driver moxart_driver = {
+ .probe = moxart_probe,
+ .remove = moxart_remove,
+ .driver = {
+ .name = "moxart-dma-engine",
+ .owner = THIS_MODULE,
+ .of_match_table = moxart_dma_match,
+ },
+};
+
+static int moxart_init(void)
+{
+ return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+ platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <[email protected]>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
--
1.8.2.1

2013-08-05 16:58:49

by Mark Rutland

[permalink] [raw]
Subject: Re: [PATCH v7] dmaengine: Add MOXA ART DMA engine driver

On Mon, Aug 05, 2013 at 03:37:37PM +0100, Jonas Jensen wrote:
> The MOXA ART SoC has a DMA controller capable of offloading expensive
> memory operations, such as large copies. This patch adds support for
> the controller including four channels. Two of these are used to
> handle MMC copy on the UC-7112-LX hardware. The remaining two can be
> used in a future audio driver or client application.
>
> Signed-off-by: Jonas Jensen <[email protected]>
> ---
>
> Notes:
> Thanks for the replies.
>
> Changes since v6:
>
> 1. move callback from interrupt context to tasklet
> 2. remove callback and callback_param, use those provided by tx_desc
> 3. don't rely on structs for register offsets
> 4. remove local bool "found" variable from moxart_alloc_chan_resources()
> 5. check return value of irq_of_parse_and_map
> 6. use devm_request_irq instead of setup_irq
> 7. elaborate commit message
>
> device tree bindings document:
> 8. in the example, change "#dma-cells" to "<2>"
>
> Applies to next-20130805
>
> .../devicetree/bindings/dma/moxa,moxart-dma.txt | 21 +
> drivers/dma/Kconfig | 7 +
> drivers/dma/Makefile | 1 +
> drivers/dma/moxart-dma.c | 614 +++++++++++++++++++++
> 4 files changed, 643 insertions(+)
> create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> create mode 100644 drivers/dma/moxart-dma.c
>
> diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> new file mode 100644
> index 0000000..5b9f82c
> --- /dev/null
> +++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> @@ -0,0 +1,21 @@
> +MOXA ART DMA Controller
> +
> +See dma.txt first
> +
> +Required properties:
> +
> +- compatible : Must be "moxa,moxart-dma"
> +- reg : Should contain registers location and length
> +- interrupts : Should contain the interrupt number
> +- #dma-cells : Should be 2
> + cell index 0: channel number between 0-3
> + cell index 1: line request number
> +
> +Example:
> +
> + dma: dma@90500000 {
> + compatible = "moxa,moxart-dma";
> + reg = <0x90500000 0x1000>;
> + interrupts = <24 0>;
> + #dma-cells = <2>;
> + };

Thanks for the updates on this. :)

The binding and example look sensible to me; it would be nice if someone
familiar with the dma subsystem could check that this has the necessary
information.

[...]

> +static int moxart_alloc_chan_resources(struct dma_chan *chan)
> +{
> + struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
> + int i;
> +
> + for (i = 0; i < APB_DMA_MAX_CHANNEL; i++) {
> + if (i == mchan->ch_num
> + && !mchan->allocated) {
> + dev_dbg(chan2dev(chan), "%s: allocating channel #%d\n",
> + __func__, mchan->ch_num);
> + mchan->allocated = true;
> + return 0;
> + }
> + }

Come to think of it, why do you need to iterate over all of the channels
to handle a particular channel number that you already know, and already
have the struct for?

I'm not familiar with the dma subsystem, and I couldn't spot when the
dma channel is actually assigned/selected prior to this.

[...]

> +static enum dma_status moxart_tx_status(struct dma_chan *chan,
> + dma_cookie_t cookie,
> + struct dma_tx_state *txstate)
> +{
> + enum dma_status ret;
> +
> + ret = dma_cookie_status(chan, cookie, txstate);
> + if (ret == DMA_SUCCESS || !txstate)
> + return ret;
> +
> + return ret;

No special status handling?

This function is equivalent to:

return dma_cookie_status(chan, cookie, txstate);

[...]

> +static int moxart_probe(struct platform_device *pdev)
> +{
> + struct device *dev = &pdev->dev;
> + struct device_node *node = dev->of_node;
> + struct resource *res;
> + static void __iomem *dma_base_addr;
> + int ret, i;
> + unsigned int irq;
> + struct moxart_dma_chan *mchan;
> + struct moxart_dma_container *mdc;
> +
> + mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
> + if (!mdc) {
> + dev_err(dev, "can't allocate DMA container\n");
> + return -ENOMEM;
> + }
> +
> + irq = irq_of_parse_and_map(node, 0);
> + if (irq <= 0) {
> + dev_err(dev, "irq_of_parse_and_map failed\n");
> + return -EINVAL;
> + }
> +
> + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
> + dma_base_addr = devm_ioremap_resource(dev, res);
> + if (IS_ERR(dma_base_addr)) {
> + dev_err(dev, "devm_ioremap_resource failed\n");
> + return PTR_ERR(dma_base_addr);
> + }
> +
> + mdc->ctlr = pdev->id;
> + spin_lock_init(&mdc->dma_lock);
> +
> + dma_cap_zero(mdc->dma_slave.cap_mask);
> + dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
> +
> + moxart_dma_init(&mdc->dma_slave, dev);
> +
> + mchan = &mdc->slave_chans[0];
> + for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
> + mchan->ch_num = i;
> + mchan->base = dma_base_addr + 0x80 + i * REG_CHAN_SIZE;
> + mchan->allocated = 0;
> +
> + dma_cookie_init(&mchan->chan);
> + mchan->chan.device = &mdc->dma_slave;
> + list_add_tail(&mchan->chan.device_node,
> + &mdc->dma_slave.channels);
> +
> + dev_dbg(dev, "%s: mchans[%d]: mchan->ch_num=%d mchan->base=%p\n",
> + __func__, i, mchan->ch_num, mchan->base);
> + }
> +
> + ret = dma_async_device_register(&mdc->dma_slave);

What if this fails?

> + platform_set_drvdata(pdev, mdc);
> +
> + of_dma_controller_register(node, moxart_of_xlate, &moxart_dma_info);
> +
> + tasklet_init(&mdc->tasklet, moxart_dma_tasklet, (unsigned long)mdc);
> +
> + devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
> + "moxart-dma-engine", mdc);

The return value of devm_request_irq should be checked; it might fail.

> +
> + dev_dbg(dev, "%s: IRQ=%d\n", __func__, irq);
> +
> + return ret;
> +}

Thanks,
Mark.

2013-08-05 20:50:21

by Arnd Bergmann

[permalink] [raw]
Subject: Re: [PATCH v7] dmaengine: Add MOXA ART DMA engine driver

On Monday 05 August 2013, Jonas Jensen wrote:

> +bool moxart_dma_filter_fn(struct dma_chan *chan, void *param)
> +{
> + struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
> + struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
> +
> + if (chan->device->dev == mc->dma_slave.dev) {

This comparison seems rather pointless -- you only check that the
device owning the channel is the same as the device that belongs
to channel's "container", which would naturally be the case.

What you don't check here is that it matches the device that was passed
to of_dma_controller_register().

> + struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
> + unsigned int ch_req = *(unsigned int *)param;
> + dev_dbg(chan2dev(chan), "%s: mchan=%p ch_req=%d mchan->ch_num=%d\n",
> + __func__, mchan, ch_req, mchan->ch_num);
> + return ch_req == mchan->ch_num;
> + } else {
> + dev_dbg(chan2dev(chan), "%s: device not registered to this DMA engine\n",
> + __func__);
> + return false;
> + }
> +}
> +
> +static struct of_dma_filter_info moxart_dma_info = {
> + .filter_fn = moxart_dma_filter_fn,
> +};
> +
> +static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
> + struct of_dma *ofdma)
> +{
> + struct dma_chan *chan;
> + struct of_dma_filter_info *info = ofdma->of_dma_data;
> +
> + if (!info || !info->filter_fn)
> + return NULL;

This seems pointless too. Why do you pass a of_dma_filter_info pointer
as ofdma->of_dma_data? It's constant after all and you can just access
it a couple of lines higher.

> + if (dma_spec->args_count != 2)
> + return NULL;
> +
> + chan = dma_request_channel(info->dma_cap, info->filter_fn,
> + &dma_spec->args[0]);

The filter function is also constant. However, you need to pass the
device pointer here so the filter can compare it.

> + if (chan)
> + to_moxart_dma_chan(chan)->line = dma_spec->args[1];
> +
> + return chan;
> +}

There is still an open question here regarding whether or not the
channel number is actually required to be fixed or not. In most
dma engines, the channels are actually interchangeable, so you only
need to specify the request number, not the channel. Does this still
work if you just pick the first

Arnd

2013-08-06 12:39:38

by Jonas Jensen

[permalink] [raw]
Subject: [PATCH v8] dmaengine: Add MOXA ART DMA engine driver

The MOXA ART SoC has a DMA controller capable of offloading expensive
memory operations, such as large copies. This patch adds support for
the controller including four channels. Two of these are used to
handle MMC copy on the UC-7112-LX hardware. The remaining two can be
used in a future audio driver or client application.

Signed-off-by: Jonas Jensen <[email protected]>
---

Notes:
Add test dummy DMA channels to MMC, prove the controller
has support for interchangeable channel numbers [0].

Add new filter data struct, store dma_spec passed in xlate,
similar to proposed patch for omap/edma [1][2].

[0] https://bitbucket.org/Kasreyn/linux-next/commits/2f17ac38c5d3af49bc0c559c429a351ddd40063d
[1] https://lkml.org/lkml/2013/8/1/750 "[PATCH] DMA: let filter functions of of_dma_simple_xlate possible check of_node"
[2] https://lkml.org/lkml/2013/3/11/203 "A proposal to check the device in generic way"

Changes since v7:

1. remove unnecessary loop in moxart_alloc_chan_resources()
2. remove unnecessary status check in moxart_tx_status()
3. check/handle dma_async_device_register() return value
4. check/handle devm_request_irq() return value
5. add and use filter data struct
6. check if channel device is the same as passed to
of_dma_controller_register()
7. add check if chan->device->dev->of_node is the same as
dma_spec->np (xlate)
8. support interchangeable channels, #dma-cells is now <1>

device tree bindings document:
9. update description and example, change "#dma-cells" to "<1>"

Applies to next-20130806

.../devicetree/bindings/dma/moxa,moxart-dma.txt | 19 +
drivers/dma/Kconfig | 7 +
drivers/dma/Makefile | 1 +
drivers/dma/moxart-dma.c | 614 +++++++++++++++++++++
4 files changed, 641 insertions(+)
create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
create mode 100644 drivers/dma/moxart-dma.c

diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 0000000..69e7001
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,19 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible : Must be "moxa,moxart-dma"
+- reg : Should contain registers location and length
+- interrupts : Should contain the interrupt number
+- #dma-cells : Should be 1, a single cell holding a line request number
+
+Example:
+
+ dma: dma@90500000 {
+ compatible = "moxa,moxart-dma";
+ reg = <0x90500000 0x1000>;
+ interrupts = <24 0>;
+ #dma-cells = <1>;
+ };
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6825957..56c3aaa 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -300,6 +300,13 @@ config DMA_JZ4740
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS

+config MOXART_DMA
+ tristate "MOXART DMA support"
+ depends on ARCH_MOXART
+ select DMA_ENGINE
+ help
+ Enable support for the MOXA ART SoC DMA controller.
+
config DMA_ENGINE
bool

diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 5e0f2ef..470c11b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -39,3 +39,4 @@ obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
obj-$(CONFIG_DMA_OMAP) += omap-dma.o
obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..36923cf
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,614 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <[email protected]>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+
+#define APB_DMA_MAX_CHANNEL 4
+
+#define REG_ADDRESS_SOURCE 0
+#define REG_ADDRESS_DEST 4
+#define REG_CYCLES 8
+#define REG_CTRL 12
+#define REG_CHAN_SIZE 16
+
+#define APB_DMA_ENABLE 0x1
+#define APB_DMA_FIN_INT_STS 0x2
+#define APB_DMA_FIN_INT_EN 0x4
+#define APB_DMA_BURST_MODE 0x8
+#define APB_DMA_ERR_INT_STS 0x10
+#define APB_DMA_ERR_INT_EN 0x20
+
+/*
+ * unset to select APB source
+ * set to select AHB source
+ */
+#define APB_DMA_SOURCE_SELECT 0x40
+
+/*
+ * unset to select APB destination
+ * set to select AHB destination
+ */
+#define APB_DMA_DEST_SELECT 0x80
+
+#define APB_DMA_SOURCE 0x100
+#define APB_DMA_SOURCE_MASK 0x700
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4 (burst=1)
+ * 010: +2 (burst=0), +8 (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4 (burst=1)
+ * 110: -2 (burst=0), -8 (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+ */
+#define APB_DMA_SOURCE_INC_0 0
+#define APB_DMA_SOURCE_INC_1_4 0x100
+#define APB_DMA_SOURCE_INC_2_8 0x200
+#define APB_DMA_SOURCE_INC_4_16 0x300
+#define APB_DMA_SOURCE_DEC_1_4 0x500
+#define APB_DMA_SOURCE_DEC_2_8 0x600
+#define APB_DMA_SOURCE_DEC_4_16 0x700
+
+#define APB_DMA_DEST 0x1000
+#define APB_DMA_DEST_MASK 0x7000
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4 (burst=1)
+ * 010: +2 (burst=0), +8 (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4 (burst=1)
+ * 110: -2 (burst=0), -8 (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+*/
+#define APB_DMA_DEST_INC_0 0
+#define APB_DMA_DEST_INC_1_4 0x1000
+#define APB_DMA_DEST_INC_2_8 0x2000
+#define APB_DMA_DEST_INC_4_16 0x3000
+#define APB_DMA_DEST_DEC_1_4 0x5000
+#define APB_DMA_DEST_DEC_2_8 0x6000
+#define APB_DMA_DEST_DEC_4_16 0x7000
+
+/*
+ * request signal select of destination
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0: no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_DEST_REQ_NO 0x10000
+#define APB_DMA_DEST_REQ_NO_MASK 0xf0000
+
+#define APB_DMA_DATA_WIDTH 0x100000
+#define APB_DMA_DATA_WIDTH_MASK 0x300000
+/*
+ * data width of transfer
+ * 00: word
+ * 01: half
+ * 10: byte
+ */
+#define APB_DMA_DATA_WIDTH_4 0
+#define APB_DMA_DATA_WIDTH_2 0x100000
+#define APB_DMA_DATA_WIDTH_1 0x200000
+
+/*
+ * request signal select of source
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0: no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_SOURCE_REQ_NO 0x1000000
+#define APB_DMA_SOURCE_REQ_NO_MASK 0xf000000
+#define APB_DMA_CYCLES_MASK 0x00ffffff
+
+struct moxart_dma_chan {
+ struct dma_chan chan;
+ int ch_num;
+ bool allocated;
+ int error_flag;
+ void __iomem *base;
+ struct completion dma_complete;
+ struct dma_slave_config cfg;
+ struct dma_async_tx_descriptor tx_desc;
+ unsigned int line_reqno;
+};
+
+struct moxart_dma_container {
+ int ctlr;
+ struct dma_device dma_slave;
+ struct moxart_dma_chan slave_chans[APB_DMA_MAX_CHANNEL];
+ spinlock_t dma_lock;
+ struct tasklet_struct tasklet;
+};
+
+struct moxart_dma_filter_data {
+ struct moxart_dma_container *mdc;
+ struct of_phandle_args *dma_spec;
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+ return &chan->dev->device;
+}
+
+static inline struct moxart_dma_container
+*to_dma_container(struct dma_device *d)
+{
+ return container_of(d, struct moxart_dma_container, dma_slave);
+}
+
+static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct moxart_dma_chan, chan);
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *ch = to_moxart_dma_chan(chan);
+ struct moxart_dma_container *c = to_dma_container(ch->chan.device);
+ u32 ctrl;
+ unsigned long flags;
+
+ dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
+
+ spin_lock_irqsave(&c->dma_lock, flags);
+
+ ctrl = readl(ch->base + REG_CTRL);
+ ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+ writel(ctrl, ch->base + REG_CTRL);
+
+ spin_unlock_irqrestore(&c->dma_lock, flags);
+
+ return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *cfg)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+ u32 ctrl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mc->dma_lock, flags);
+
+ memcpy(&mchan->cfg, cfg, sizeof(mchan->cfg));
+
+ ctrl = readl(mchan->base + REG_CTRL);
+ ctrl |= APB_DMA_BURST_MODE;
+ ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
+ ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
+
+ switch (mchan->cfg.src_addr_width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ ctrl |= APB_DMA_DATA_WIDTH_1;
+ if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+ ctrl |= APB_DMA_DEST_INC_1_4;
+ else
+ ctrl |= APB_DMA_SOURCE_INC_1_4;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ ctrl |= APB_DMA_DATA_WIDTH_2;
+ if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+ ctrl |= APB_DMA_DEST_INC_2_8;
+ else
+ ctrl |= APB_DMA_SOURCE_INC_2_8;
+ break;
+ default:
+ ctrl &= ~APB_DMA_DATA_WIDTH;
+ if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+ ctrl |= APB_DMA_DEST_INC_4_16;
+ else
+ ctrl |= APB_DMA_SOURCE_INC_4_16;
+ break;
+ }
+
+ if (mchan->cfg.direction == DMA_MEM_TO_DEV) {
+ ctrl &= ~APB_DMA_DEST_SELECT;
+ ctrl |= APB_DMA_SOURCE_SELECT;
+ ctrl |= (mchan->line_reqno << 16 &
+ APB_DMA_DEST_REQ_NO_MASK);
+ } else {
+ ctrl |= APB_DMA_DEST_SELECT;
+ ctrl &= ~APB_DMA_SOURCE_SELECT;
+ ctrl |= (mchan->line_reqno << 24 &
+ APB_DMA_SOURCE_REQ_NO_MASK);
+ }
+
+ writel(ctrl, mchan->base + REG_CTRL);
+
+ spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+ return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ int ret = 0;
+ struct dma_slave_config *config;
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ moxart_terminate_all(chan);
+ break;
+ case DMA_SLAVE_CONFIG:
+ config = (struct dma_slave_config *)arg;
+ ret = moxart_slave_config(chan, config);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ return ret;
+}
+
+static dma_cookie_t moxart_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(tx->chan);
+ struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+ dma_cookie_t cookie;
+ u32 ctrl;
+ unsigned long flags;
+
+ mchan->error_flag = 0;
+
+ dev_dbg(chan2dev(tx->chan), "%s: mchan=%p mchan->ch_num=%u mchan->base=%p\n",
+ __func__, mchan, mchan->ch_num, mchan->base);
+
+ spin_lock_irqsave(&mc->dma_lock, flags);
+
+ cookie = dma_cookie_assign(tx);
+
+ ctrl = readl(mchan->base + REG_CTRL);
+ ctrl |= (APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+ writel(ctrl, mchan->base + REG_CTRL);
+
+ spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+ return cookie;
+}
+
+static struct dma_async_tx_descriptor
+*moxart_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_transfer_direction direction,
+ unsigned long tx_flags, void *context)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+ unsigned long flags;
+ unsigned int size, adr_width;
+
+ spin_lock_irqsave(&mc->dma_lock, flags);
+
+ if (direction == DMA_MEM_TO_DEV) {
+ writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+ mchan->base + REG_ADDRESS_SOURCE);
+ writel(mchan->cfg.dst_addr, mchan->base + REG_ADDRESS_DEST);
+
+ adr_width = mchan->cfg.src_addr_width;
+ } else {
+ writel(mchan->cfg.src_addr, mchan->base + REG_ADDRESS_SOURCE);
+ writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+ mchan->base + REG_ADDRESS_DEST);
+
+ adr_width = mchan->cfg.dst_addr_width;
+ }
+
+ size = sgl->length >> adr_width;
+
+ /*
+ * size is 4 on 64 bytes copied, i.e. once cycle copies 16 bytes
+ * ( when data_width == APB_DMAB_DATA_WIDTH_4 )
+ */
+ writel(size, mchan->base + REG_CYCLES);
+
+ dev_dbg(chan2dev(chan), "%s: set %u DMA cycles (sgl->length=%u adr_width=%u)\n",
+ __func__, size, sgl->length, adr_width);
+
+ dma_async_tx_descriptor_init(&mchan->tx_desc, chan);
+ mchan->tx_desc.tx_submit = moxart_tx_submit;
+
+ spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+ return &mchan->tx_desc;
+}
+
+bool moxart_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+ struct moxart_dma_filter_data *fdata = param;
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+ if (chan->device->dev != fdata->mdc->dma_slave.dev ||
+ chan->device->dev->of_node != fdata->dma_spec->np) {
+ dev_dbg(chan2dev(chan), "device not registered to this DMA engine\n");
+ return 0;
+ }
+
+ dev_dbg(chan2dev(chan), "%s: mchan=%p line_reqno=%u mchan->ch_num=%u\n",
+ __func__, mchan, fdata->dma_spec->args[0], mchan->ch_num);
+
+ mchan->line_reqno = fdata->dma_spec->args[0];
+
+ return 1;
+}
+
+static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct dma_chan *chan;
+ struct moxart_dma_container *mdc = ofdma->of_dma_data;
+ struct moxart_dma_filter_data fdata = {
+ .mdc = mdc,
+ };
+
+ if (dma_spec->args_count < 1)
+ return NULL;
+
+ fdata.dma_spec = dma_spec;
+
+ return dma_request_channel(mdc->dma_slave.cap_mask,
+ moxart_dma_filter_fn, &fdata);
+}
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+ dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n",
+ __func__, mchan->ch_num);
+ mchan->allocated = 1;
+
+ return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+ dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+ __func__, mchan->ch_num);
+ mchan->allocated = 0;
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+ u32 ctrl;
+ unsigned long flags;
+
+ dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+ spin_lock_irqsave(&mc->dma_lock, flags);
+
+ ctrl = readl(mchan->base + REG_CTRL);
+ ctrl |= APB_DMA_ENABLE;
+ writel(ctrl, mchan->base + REG_CTRL);
+
+ spin_unlock_irqrestore(&mc->dma_lock, flags);
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ return dma_cookie_status(chan, cookie, txstate);
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+ dma->device_prep_slave_sg = moxart_prep_slave_sg;
+ dma->device_alloc_chan_resources = moxart_alloc_chan_resources;
+ dma->device_free_chan_resources = moxart_free_chan_resources;
+ dma->device_issue_pending = moxart_issue_pending;
+ dma->device_tx_status = moxart_tx_status;
+ dma->device_control = moxart_control;
+ dma->dev = dev;
+
+ INIT_LIST_HEAD(&dma->channels);
+}
+
+static void moxart_dma_tasklet(unsigned long data)
+{
+ struct moxart_dma_container *mc = (void *)data;
+ struct moxart_dma_chan *ch = &mc->slave_chans[0];
+ unsigned int i;
+
+ pr_debug("%s\n", __func__);
+
+ for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+ if (ch->allocated && ch->tx_desc.callback) {
+ pr_debug("%s: call callback for ch=%p\n",
+ __func__, ch);
+ ch->tx_desc.callback(ch->tx_desc.callback_param);
+ }
+ }
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+ struct moxart_dma_container *mc = devid;
+ struct moxart_dma_chan *mchan = &mc->slave_chans[0];
+ unsigned int i;
+ u32 ctrl;
+
+ pr_debug("%s\n", __func__);
+
+ for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+ if (mchan->allocated) {
+ ctrl = readl(mchan->base + REG_CTRL);
+ if (ctrl & APB_DMA_FIN_INT_STS) {
+ ctrl &= ~APB_DMA_FIN_INT_STS;
+ dma_cookie_complete(&mchan->tx_desc);
+ }
+ if (ctrl & APB_DMA_ERR_INT_STS) {
+ ctrl &= ~APB_DMA_ERR_INT_STS;
+ mchan->error_flag = 1;
+ }
+ mchan->error_flag = 0;
+ writel(ctrl, mchan->base + REG_CTRL);
+ }
+ }
+
+ tasklet_schedule(&mc->tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static int moxart_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource *res;
+ static void __iomem *dma_base_addr;
+ int ret, i;
+ unsigned int irq;
+ struct moxart_dma_chan *mchan;
+ struct moxart_dma_container *mdc;
+
+ mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+ if (!mdc) {
+ dev_err(dev, "can't allocate DMA container\n");
+ return -ENOMEM;
+ }
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq <= 0) {
+ dev_err(dev, "irq_of_parse_and_map failed\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dma_base_addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(dma_base_addr)) {
+ dev_err(dev, "devm_ioremap_resource failed\n");
+ return PTR_ERR(dma_base_addr);
+ }
+
+ mdc->ctlr = pdev->id;
+ spin_lock_init(&mdc->dma_lock);
+
+ dma_cap_zero(mdc->dma_slave.cap_mask);
+ dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+
+ moxart_dma_init(&mdc->dma_slave, dev);
+
+ mchan = &mdc->slave_chans[0];
+ for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+ mchan->ch_num = i;
+ mchan->base = dma_base_addr + 0x80 + i * REG_CHAN_SIZE;
+ mchan->allocated = 0;
+
+ dma_cookie_init(&mchan->chan);
+ mchan->chan.device = &mdc->dma_slave;
+ list_add_tail(&mchan->chan.device_node,
+ &mdc->dma_slave.channels);
+
+ dev_dbg(dev, "%s: mchans[%d]: mchan->ch_num=%u mchan->base=%p\n",
+ __func__, i, mchan->ch_num, mchan->base);
+ }
+
+ ret = dma_async_device_register(&mdc->dma_slave);
+ if (ret) {
+ dev_err(dev, "dma_async_device_register failed\n");
+ return ret;
+ }
+
+ ret = of_dma_controller_register(node, moxart_of_xlate, mdc);
+ if (ret) {
+ dev_err(dev, "of_dma_controller_register failed\n");
+ dma_async_device_unregister(&mdc->dma_slave);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, mdc);
+
+ tasklet_init(&mdc->tasklet, moxart_dma_tasklet, (unsigned long)mdc);
+
+ ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
+ "moxart-dma-engine", mdc);
+ if (ret) {
+ dev_err(dev, "devm_request_irq failed\n");
+ return ret;
+ }
+
+ dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq);
+
+ return 0;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+ struct moxart_dma_container *m = dev_get_drvdata(&pdev->dev);
+ dma_async_device_unregister(&m->dma_slave);
+ return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+ { .compatible = "moxa,moxart-dma" },
+ { }
+};
+
+static struct platform_driver moxart_driver = {
+ .probe = moxart_probe,
+ .remove = moxart_remove,
+ .driver = {
+ .name = "moxart-dma-engine",
+ .owner = THIS_MODULE,
+ .of_match_table = moxart_dma_match,
+ },
+};
+
+static int moxart_init(void)
+{
+ return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+ platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <[email protected]>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
--
1.8.2.1

2013-08-06 18:42:27

by Arnd Bergmann

[permalink] [raw]
Subject: Re: [PATCH v8] dmaengine: Add MOXA ART DMA engine driver

On Tuesday 06 August 2013, Jonas Jensen wrote:
> The MOXA ART SoC has a DMA controller capable of offloading expensive
> memory operations, such as large copies. This patch adds support for
> the controller including four channels. Two of these are used to
> handle MMC copy on the UC-7112-LX hardware. The remaining two can be
> used in a future audio driver or client application.
>
> Signed-off-by: Jonas Jensen <[email protected]>

Looks all good to me now,

Reviewed-by: Arnd Bergmann <[email protected]>

2013-08-07 15:14:08

by Mark Rutland

[permalink] [raw]
Subject: Re: [PATCH v8] dmaengine: Add MOXA ART DMA engine driver

On Tue, Aug 06, 2013 at 01:38:31PM +0100, Jonas Jensen wrote:
> The MOXA ART SoC has a DMA controller capable of offloading expensive
> memory operations, such as large copies. This patch adds support for
> the controller including four channels. Two of these are used to
> handle MMC copy on the UC-7112-LX hardware. The remaining two can be
> used in a future audio driver or client application.
>
> Signed-off-by: Jonas Jensen <[email protected]>
> ---
>
> Notes:
> Add test dummy DMA channels to MMC, prove the controller
> has support for interchangeable channel numbers [0].
>
> Add new filter data struct, store dma_spec passed in xlate,
> similar to proposed patch for omap/edma [1][2].
>
> [0] https://bitbucket.org/Kasreyn/linux-next/commits/2f17ac38c5d3af49bc0c559c429a351ddd40063d
> [1] https://lkml.org/lkml/2013/8/1/750 "[PATCH] DMA: let filter functions of of_dma_simple_xlate possible check of_node"
> [2] https://lkml.org/lkml/2013/3/11/203 "A proposal to check the device in generic way"
>
> Changes since v7:
>
> 1. remove unnecessary loop in moxart_alloc_chan_resources()
> 2. remove unnecessary status check in moxart_tx_status()
> 3. check/handle dma_async_device_register() return value
> 4. check/handle devm_request_irq() return value
> 5. add and use filter data struct
> 6. check if channel device is the same as passed to
> of_dma_controller_register()
> 7. add check if chan->device->dev->of_node is the same as
> dma_spec->np (xlate)
> 8. support interchangeable channels, #dma-cells is now <1>
>
> device tree bindings document:
> 9. update description and example, change "#dma-cells" to "<1>"
>
> Applies to next-20130806
>
> .../devicetree/bindings/dma/moxa,moxart-dma.txt | 19 +
> drivers/dma/Kconfig | 7 +
> drivers/dma/Makefile | 1 +
> drivers/dma/moxart-dma.c | 614 +++++++++++++++++++++
> 4 files changed, 641 insertions(+)
> create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> create mode 100644 drivers/dma/moxart-dma.c
>
> diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> new file mode 100644
> index 0000000..69e7001
> --- /dev/null
> +++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> @@ -0,0 +1,19 @@
> +MOXA ART DMA Controller
> +
> +See dma.txt first
> +
> +Required properties:
> +
> +- compatible : Must be "moxa,moxart-dma"
> +- reg : Should contain registers location and length
> +- interrupts : Should contain the interrupt number
> +- #dma-cells : Should be 1, a single cell holding a line request number
> +
> +Example:
> +
> + dma: dma@90500000 {
> + compatible = "moxa,moxart-dma";
> + reg = <0x90500000 0x1000>;
> + interrupts = <24 0>;
> + #dma-cells = <1>;
> + };

The binding looks sensible to me now, but I have a couple of (hopefully
final) questions on the probe failure path.

[...]

> +
> + ret = dma_async_device_register(&mdc->dma_slave);
> + if (ret) {
> + dev_err(dev, "dma_async_device_register failed\n");
> + return ret;
> + }
> +
> + ret = of_dma_controller_register(node, moxart_of_xlate, mdc);
> + if (ret) {
> + dev_err(dev, "of_dma_controller_register failed\n");
> + dma_async_device_unregister(&mdc->dma_slave);
> + return ret;
> + }
> +
> + platform_set_drvdata(pdev, mdc);
> +
> + tasklet_init(&mdc->tasklet, moxart_dma_tasklet, (unsigned long)mdc);
> +
> + ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
> + "moxart-dma-engine", mdc);
> + if (ret) {
> + dev_err(dev, "devm_request_irq failed\n");

Do you not need calls to of_dma_controller_free and
dma_async_device_unregister here? I'm not all that familiar with the DMA
API, so maybe you don't.

> + return ret;
> + }
> +
> + dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq);
> +
> + return 0;
> +}
> +
> +static int moxart_remove(struct platform_device *pdev)
> +{
> + struct moxart_dma_container *m = dev_get_drvdata(&pdev->dev);

Similarly, do you not need to call of_dma_controller free here?

> + dma_async_device_unregister(&m->dma_slave);
> + return 0;
> +}

Thanks,
Mark.

2013-10-07 13:14:21

by Jonas Jensen

[permalink] [raw]
Subject: [PATCH v9] dmaengine: Add MOXA ART DMA engine driver

The MOXA ART SoC has a DMA controller capable of offloading expensive
memory operations, such as large copies. This patch adds support for
the controller including four channels. Two of these are used to
handle MMC copy on the UC-7112-LX hardware. The remaining two can be
used in a future audio driver or client application.

Signed-off-by: Jonas Jensen <[email protected]>
---

Notes:
Changes since v8:

1. reorder probe path, of_dma_controller_register() now happens after devm_request_irq()
2. call of_dma_controller_free() on removal
3. set flag on error, return DMA_ERROR in device_tx_status()
4. move tasklet_init() to end of probe path
5. kill tasklet on removal
6. remove offset to base address (make it so DT includes offset)
7. update device tree bindings document example (modify register range to what is actually used)

Applies to next-20130927

.../devicetree/bindings/dma/moxa,moxart-dma.txt | 19 +
drivers/dma/Kconfig | 7 +
drivers/dma/Makefile | 1 +
drivers/dma/moxart-dma.c | 651 +++++++++++++++++++++
4 files changed, 678 insertions(+)
create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
create mode 100644 drivers/dma/moxart-dma.c

diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 0000000..79facce
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,19 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible : Must be "moxa,moxart-dma"
+- reg : Should contain registers location and length
+- interrupts : Should contain the interrupt number
+- #dma-cells : Should be 1, a single cell holding a line request number
+
+Example:
+
+ dma: dma@90500000 {
+ compatible = "moxa,moxart-dma";
+ reg = <0x90500080 0x40>;
+ interrupts = <24 0>;
+ #dma-cells = <1>;
+ };
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index f238cfd..f4ed3a9 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -318,6 +318,13 @@ config K3_DMA
Support the DMA engine for Hisilicon K3 platform
devices.

+config MOXART_DMA
+ tristate "MOXART DMA support"
+ depends on ARCH_MOXART
+ select DMA_ENGINE
+ help
+ Enable support for the MOXA ART SoC DMA controller.
+
config DMA_ENGINE
bool

diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index db89035..9ef0916 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -41,3 +41,4 @@ obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
obj-$(CONFIG_TI_CPPI41) += cppi41.o
obj-$(CONFIG_K3_DMA) += k3dma.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..d418a16
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,651 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <[email protected]>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+
+#define APB_DMA_MAX_CHANNEL 4
+
+#define REG_ADDRESS_SOURCE 0
+#define REG_ADDRESS_DEST 4
+#define REG_CYCLES 8
+#define REG_CTRL 12
+#define REG_CHAN_SIZE 16
+
+#define APB_DMA_ENABLE 0x1
+#define APB_DMA_FIN_INT_STS 0x2
+#define APB_DMA_FIN_INT_EN 0x4
+#define APB_DMA_BURST_MODE 0x8
+#define APB_DMA_ERR_INT_STS 0x10
+#define APB_DMA_ERR_INT_EN 0x20
+
+/*
+ * unset to select APB source
+ * set to select AHB source
+ */
+#define APB_DMA_SOURCE_SELECT 0x40
+
+/*
+ * unset to select APB destination
+ * set to select AHB destination
+ */
+#define APB_DMA_DEST_SELECT 0x80
+
+#define APB_DMA_SOURCE 0x100
+#define APB_DMA_SOURCE_MASK 0x700
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4 (burst=1)
+ * 010: +2 (burst=0), +8 (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4 (burst=1)
+ * 110: -2 (burst=0), -8 (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+ */
+#define APB_DMA_SOURCE_INC_0 0
+#define APB_DMA_SOURCE_INC_1_4 0x100
+#define APB_DMA_SOURCE_INC_2_8 0x200
+#define APB_DMA_SOURCE_INC_4_16 0x300
+#define APB_DMA_SOURCE_DEC_1_4 0x500
+#define APB_DMA_SOURCE_DEC_2_8 0x600
+#define APB_DMA_SOURCE_DEC_4_16 0x700
+
+#define APB_DMA_DEST 0x1000
+#define APB_DMA_DEST_MASK 0x7000
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4 (burst=1)
+ * 010: +2 (burst=0), +8 (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4 (burst=1)
+ * 110: -2 (burst=0), -8 (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+*/
+#define APB_DMA_DEST_INC_0 0
+#define APB_DMA_DEST_INC_1_4 0x1000
+#define APB_DMA_DEST_INC_2_8 0x2000
+#define APB_DMA_DEST_INC_4_16 0x3000
+#define APB_DMA_DEST_DEC_1_4 0x5000
+#define APB_DMA_DEST_DEC_2_8 0x6000
+#define APB_DMA_DEST_DEC_4_16 0x7000
+
+/*
+ * request signal select of destination
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0: no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_DEST_REQ_NO 0x10000
+#define APB_DMA_DEST_REQ_NO_MASK 0xf0000
+
+#define APB_DMA_DATA_WIDTH 0x100000
+#define APB_DMA_DATA_WIDTH_MASK 0x300000
+/*
+ * data width of transfer
+ * 00: word
+ * 01: half
+ * 10: byte
+ */
+#define APB_DMA_DATA_WIDTH_4 0
+#define APB_DMA_DATA_WIDTH_2 0x100000
+#define APB_DMA_DATA_WIDTH_1 0x200000
+
+/*
+ * request signal select of source
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0: no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_SOURCE_REQ_NO 0x1000000
+#define APB_DMA_SOURCE_REQ_NO_MASK 0xf000000
+#define APB_DMA_CYCLES_MASK 0x00ffffff
+
+struct moxart_dma_chan {
+ struct dma_chan chan;
+ int ch_num;
+ bool allocated;
+ bool error;
+ void __iomem *base;
+ struct dma_slave_config cfg;
+ struct dma_async_tx_descriptor tx_desc;
+ unsigned int line_reqno;
+};
+
+struct moxart_dma_container {
+ int ctlr;
+ struct dma_device dma_slave;
+ struct moxart_dma_chan slave_chans[APB_DMA_MAX_CHANNEL];
+ spinlock_t dma_lock;
+ struct tasklet_struct tasklet;
+};
+
+struct moxart_dma_filter_data {
+ struct moxart_dma_container *mdc;
+ struct of_phandle_args *dma_spec;
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+ return &chan->dev->device;
+}
+
+static inline struct moxart_dma_container
+*to_dma_container(struct dma_device *d)
+{
+ return container_of(d, struct moxart_dma_container, dma_slave);
+}
+
+static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct moxart_dma_chan, chan);
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *ch = to_moxart_dma_chan(chan);
+ struct moxart_dma_container *c = to_dma_container(ch->chan.device);
+ u32 ctrl;
+ unsigned long flags;
+
+ dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
+
+ spin_lock_irqsave(&c->dma_lock, flags);
+
+ ctrl = readl(ch->base + REG_CTRL);
+ ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+ writel(ctrl, ch->base + REG_CTRL);
+
+ spin_unlock_irqrestore(&c->dma_lock, flags);
+
+ return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *cfg)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+ u32 ctrl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mc->dma_lock, flags);
+
+ memcpy(&mchan->cfg, cfg, sizeof(mchan->cfg));
+
+ ctrl = readl(mchan->base + REG_CTRL);
+ ctrl |= APB_DMA_BURST_MODE;
+ ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
+ ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
+
+ switch (mchan->cfg.src_addr_width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ ctrl |= APB_DMA_DATA_WIDTH_1;
+ if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+ ctrl |= APB_DMA_DEST_INC_1_4;
+ else
+ ctrl |= APB_DMA_SOURCE_INC_1_4;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ ctrl |= APB_DMA_DATA_WIDTH_2;
+ if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+ ctrl |= APB_DMA_DEST_INC_2_8;
+ else
+ ctrl |= APB_DMA_SOURCE_INC_2_8;
+ break;
+ default:
+ ctrl &= ~APB_DMA_DATA_WIDTH;
+ if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+ ctrl |= APB_DMA_DEST_INC_4_16;
+ else
+ ctrl |= APB_DMA_SOURCE_INC_4_16;
+ break;
+ }
+
+ if (mchan->cfg.direction == DMA_MEM_TO_DEV) {
+ ctrl &= ~APB_DMA_DEST_SELECT;
+ ctrl |= APB_DMA_SOURCE_SELECT;
+ ctrl |= (mchan->line_reqno << 16 &
+ APB_DMA_DEST_REQ_NO_MASK);
+ } else {
+ ctrl |= APB_DMA_DEST_SELECT;
+ ctrl &= ~APB_DMA_SOURCE_SELECT;
+ ctrl |= (mchan->line_reqno << 24 &
+ APB_DMA_SOURCE_REQ_NO_MASK);
+ }
+
+ writel(ctrl, mchan->base + REG_CTRL);
+
+ spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+ return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ int ret = 0;
+ struct dma_slave_config *config;
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ moxart_terminate_all(chan);
+ break;
+ case DMA_SLAVE_CONFIG:
+ config = (struct dma_slave_config *)arg;
+ ret = moxart_slave_config(chan, config);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ return ret;
+}
+
+static dma_cookie_t moxart_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(tx->chan);
+ struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+ dma_cookie_t cookie;
+ u32 ctrl;
+ unsigned long flags;
+
+ dev_dbg(chan2dev(tx->chan), "%s: mchan=%p mchan->ch_num=%u mchan->base=%p\n",
+ __func__, mchan, mchan->ch_num, mchan->base);
+
+ spin_lock_irqsave(&mc->dma_lock, flags);
+
+ cookie = dma_cookie_assign(tx);
+
+ ctrl = readl(mchan->base + REG_CTRL);
+ ctrl |= (APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+ writel(ctrl, mchan->base + REG_CTRL);
+
+ spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+ return cookie;
+}
+
+static struct dma_async_tx_descriptor
+*moxart_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_transfer_direction direction,
+ unsigned long tx_flags, void *context)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+ unsigned long flags;
+ unsigned int size, adr_width;
+
+ spin_lock_irqsave(&mc->dma_lock, flags);
+
+ if (direction == DMA_MEM_TO_DEV) {
+ writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+ mchan->base + REG_ADDRESS_SOURCE);
+ writel(mchan->cfg.dst_addr, mchan->base + REG_ADDRESS_DEST);
+
+ adr_width = mchan->cfg.src_addr_width;
+ } else {
+ writel(mchan->cfg.src_addr, mchan->base + REG_ADDRESS_SOURCE);
+ writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+ mchan->base + REG_ADDRESS_DEST);
+
+ adr_width = mchan->cfg.dst_addr_width;
+ }
+
+ size = sgl->length >> adr_width;
+
+ /*
+ * size is 4 on 64 bytes copied, i.e. one cycle copies 16 bytes
+ * ( when data_width == APB_DMAB_DATA_WIDTH_4 )
+ */
+ writel(size, mchan->base + REG_CYCLES);
+
+ dev_dbg(chan2dev(chan), "%s: set %u DMA cycles (sgl->length=%u adr_width=%u)\n",
+ __func__, size, sgl->length, adr_width);
+
+ dma_async_tx_descriptor_init(&mchan->tx_desc, chan);
+ mchan->tx_desc.tx_submit = moxart_tx_submit;
+ mchan->error = 0;
+
+ spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+ return &mchan->tx_desc;
+}
+
+bool moxart_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+ struct moxart_dma_filter_data *fdata = param;
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+ if (chan->device->dev != fdata->mdc->dma_slave.dev ||
+ chan->device->dev->of_node != fdata->dma_spec->np) {
+ dev_dbg(chan2dev(chan), "device not registered to this DMA engine\n");
+ return 0;
+ }
+
+ dev_dbg(chan2dev(chan), "%s: mchan=%p line_reqno=%u mchan->ch_num=%u\n",
+ __func__, mchan, fdata->dma_spec->args[0], mchan->ch_num);
+
+ mchan->line_reqno = fdata->dma_spec->args[0];
+
+ return 1;
+}
+
+static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct moxart_dma_container *mdc = ofdma->of_dma_data;
+ struct moxart_dma_filter_data fdata = {
+ .mdc = mdc,
+ };
+
+ if (dma_spec->args_count < 1)
+ return NULL;
+
+ fdata.dma_spec = dma_spec;
+
+ return dma_request_channel(mdc->dma_slave.cap_mask,
+ moxart_dma_filter_fn, &fdata);
+}
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+ dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n",
+ __func__, mchan->ch_num);
+ mchan->allocated = 1;
+
+ return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+ dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+ __func__, mchan->ch_num);
+ mchan->allocated = 0;
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+ u32 ctrl;
+ unsigned long flags;
+
+ dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+ spin_lock_irqsave(&mc->dma_lock, flags);
+
+ ctrl = readl(mchan->base + REG_CTRL);
+ ctrl |= APB_DMA_ENABLE;
+ writel(ctrl, mchan->base + REG_CTRL);
+
+ spin_unlock_irqrestore(&mc->dma_lock, flags);
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txs)
+{
+ struct moxart_dma_chan *ch = to_moxart_dma_chan(chan);
+ enum dma_status ret;
+
+ ret = (ch->error) ? DMA_ERROR : dma_cookie_status(chan, cookie, txs);
+
+ return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+ dma->device_prep_slave_sg = moxart_prep_slave_sg;
+ dma->device_alloc_chan_resources = moxart_alloc_chan_resources;
+ dma->device_free_chan_resources = moxart_free_chan_resources;
+ dma->device_issue_pending = moxart_issue_pending;
+ dma->device_tx_status = moxart_tx_status;
+ dma->device_control = moxart_control;
+ dma->dev = dev;
+
+ INIT_LIST_HEAD(&dma->channels);
+}
+
+static void moxart_dma_tasklet(unsigned long data)
+{
+ struct moxart_dma_container *mc = (void *)data;
+ struct moxart_dma_chan *ch = &mc->slave_chans[0];
+ struct dma_async_tx_descriptor *tx_desc;
+ unsigned int i;
+ enum dma_status s;
+ struct dma_tx_state txs;
+
+ pr_debug("%s\n", __func__);
+
+ for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+ if (ch->allocated) {
+ tx_desc = &ch->tx_desc;
+
+ s = mc->dma_slave.device_tx_status(&ch->chan,
+ ch->chan.cookie,
+ &txs);
+
+ switch (s) {
+ case DMA_ERROR:
+ printk_ratelimited("%s: DMA error\n",
+ __func__);
+ break;
+ case DMA_SUCCESS:
+ break;
+ case DMA_IN_PROGRESS:
+ case DMA_PAUSED:
+ continue;
+ }
+
+ if (tx_desc->callback) {
+ pr_debug("%s: call callback for ch=%p\n",
+ __func__, ch);
+ tx_desc->callback(tx_desc->callback_param);
+ }
+ }
+ }
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+ struct moxart_dma_container *mc = devid;
+ struct moxart_dma_chan *mchan = &mc->slave_chans[0];
+ unsigned int i;
+ u32 ctrl;
+
+ pr_debug("%s\n", __func__);
+
+ for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+ if (mchan->allocated) {
+ ctrl = readl(mchan->base + REG_CTRL);
+ pr_debug("%s: ctrl=%x\n", __func__, ctrl);
+
+ if (ctrl & APB_DMA_FIN_INT_STS) {
+ ctrl &= ~APB_DMA_FIN_INT_STS;
+ dma_cookie_complete(&mchan->tx_desc);
+ }
+ if (ctrl & APB_DMA_ERR_INT_STS) {
+ ctrl &= ~APB_DMA_ERR_INT_STS;
+ mchan->error = 1;
+ }
+ /*
+ * bits must be cleared here, this function
+ * called in a loop if moved to tasklet
+ */
+ writel(ctrl, mchan->base + REG_CTRL);
+
+ tasklet_schedule(&mc->tasklet);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int moxart_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource *res;
+ static void __iomem *dma_base_addr;
+ int ret, i;
+ unsigned int irq;
+ struct moxart_dma_chan *mchan;
+ struct moxart_dma_container *mdc;
+
+ mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+ if (!mdc) {
+ dev_err(dev, "can't allocate DMA container\n");
+ return -ENOMEM;
+ }
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq <= 0) {
+ dev_err(dev, "irq_of_parse_and_map failed\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dma_base_addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(dma_base_addr)) {
+ dev_err(dev, "devm_ioremap_resource failed\n");
+ return PTR_ERR(dma_base_addr);
+ }
+
+ mdc->ctlr = pdev->id;
+ spin_lock_init(&mdc->dma_lock);
+
+ dma_cap_zero(mdc->dma_slave.cap_mask);
+ dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+
+ moxart_dma_init(&mdc->dma_slave, dev);
+
+ mchan = &mdc->slave_chans[0];
+ for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+ mchan->ch_num = i;
+ mchan->base = dma_base_addr + i * REG_CHAN_SIZE;
+ mchan->allocated = 0;
+
+ dma_cookie_init(&mchan->chan);
+ mchan->chan.device = &mdc->dma_slave;
+ list_add_tail(&mchan->chan.device_node,
+ &mdc->dma_slave.channels);
+
+ dev_dbg(dev, "%s: mchans[%d]: mchan->ch_num=%u mchan->base=%p\n",
+ __func__, i, mchan->ch_num, mchan->base);
+ }
+
+ ret = dma_async_device_register(&mdc->dma_slave);
+ if (ret) {
+ dev_err(dev, "dma_async_device_register failed\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, mdc);
+
+ ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
+ "moxart-dma-engine", mdc);
+ if (ret) {
+ dev_err(dev, "devm_request_irq failed\n");
+ return ret;
+ }
+
+ ret = of_dma_controller_register(node, moxart_of_xlate, mdc);
+ if (ret) {
+ dev_err(dev, "of_dma_controller_register failed\n");
+ dma_async_device_unregister(&mdc->dma_slave);
+ return ret;
+ }
+
+ tasklet_init(&mdc->tasklet, moxart_dma_tasklet, (unsigned long)mdc);
+
+ dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq);
+
+ return 0;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+ struct moxart_dma_container *m = platform_get_drvdata(pdev);
+
+ tasklet_kill(&m->tasklet);
+
+ dma_async_device_unregister(&m->dma_slave);
+
+ if (pdev->dev.of_node)
+ of_dma_controller_free(pdev->dev.of_node);
+
+ return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+ { .compatible = "moxa,moxart-dma" },
+ { }
+};
+
+static struct platform_driver moxart_driver = {
+ .probe = moxart_probe,
+ .remove = moxart_remove,
+ .driver = {
+ .name = "moxart-dma-engine",
+ .owner = THIS_MODULE,
+ .of_match_table = moxart_dma_match,
+ },
+};
+
+static int moxart_init(void)
+{
+ return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+ platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <[email protected]>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
--
1.8.2.1

2013-10-07 13:42:42

by Jonas Jensen

[permalink] [raw]
Subject: Re: [PATCH v8] dmaengine: Add MOXA ART DMA engine driver

Hi Mark,

Thanks for the replies. Please have a look at v9.

On 7 August 2013 17:13, Mark Rutland <[email protected]> wrote:
>> +
>> + ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
>> + "moxart-dma-engine", mdc);
>> + if (ret) {
>> + dev_err(dev, "devm_request_irq failed\n");
>
> Do you not need calls to of_dma_controller_free and
> dma_async_device_unregister here? I'm not all that familiar with the DMA
> API, so maybe you don't.

Yes. I see now, I should have moved both dma_async_device_register()
and of_dma_controller_register() to happen after devm_request_irq().
I'll include that in next version.

>> +static int moxart_remove(struct platform_device *pdev)
>> +{
>> + struct moxart_dma_container *m = dev_get_drvdata(&pdev->dev);
>
> Similarly, do you not need to call of_dma_controller free here?

Yes, this is now done.


Best regards,
Jonas

2013-10-07 14:10:57

by Jonas Jensen

[permalink] [raw]
Subject: [PATCH v10] dmaengine: Add MOXA ART DMA engine driver

The MOXA ART SoC has a DMA controller capable of offloading expensive
memory operations, such as large copies. This patch adds support for
the controller including four channels. Two of these are used to
handle MMC copy on the UC-7112-LX hardware. The remaining two can be
used in a future audio driver or client application.

Signed-off-by: Jonas Jensen <[email protected]>
---

Notes:
Changes since v9:

1. reorder probe path, also move dma_async_device_register() to happen after devm_request_irq()

Applies to next-20130927

.../devicetree/bindings/dma/moxa,moxart-dma.txt | 19 +
drivers/dma/Kconfig | 7 +
drivers/dma/Makefile | 1 +
drivers/dma/moxart-dma.c | 651 +++++++++++++++++++++
4 files changed, 678 insertions(+)
create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
create mode 100644 drivers/dma/moxart-dma.c

diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 0000000..79facce
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,19 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible : Must be "moxa,moxart-dma"
+- reg : Should contain registers location and length
+- interrupts : Should contain the interrupt number
+- #dma-cells : Should be 1, a single cell holding a line request number
+
+Example:
+
+ dma: dma@90500000 {
+ compatible = "moxa,moxart-dma";
+ reg = <0x90500080 0x40>;
+ interrupts = <24 0>;
+ #dma-cells = <1>;
+ };
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index f238cfd..f4ed3a9 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -318,6 +318,13 @@ config K3_DMA
Support the DMA engine for Hisilicon K3 platform
devices.

+config MOXART_DMA
+ tristate "MOXART DMA support"
+ depends on ARCH_MOXART
+ select DMA_ENGINE
+ help
+ Enable support for the MOXA ART SoC DMA controller.
+
config DMA_ENGINE
bool

diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index db89035..9ef0916 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -41,3 +41,4 @@ obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
obj-$(CONFIG_TI_CPPI41) += cppi41.o
obj-$(CONFIG_K3_DMA) += k3dma.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..edd6de2
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,651 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <[email protected]>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+
+#define APB_DMA_MAX_CHANNEL 4
+
+#define REG_ADDRESS_SOURCE 0
+#define REG_ADDRESS_DEST 4
+#define REG_CYCLES 8
+#define REG_CTRL 12
+#define REG_CHAN_SIZE 16
+
+#define APB_DMA_ENABLE 0x1
+#define APB_DMA_FIN_INT_STS 0x2
+#define APB_DMA_FIN_INT_EN 0x4
+#define APB_DMA_BURST_MODE 0x8
+#define APB_DMA_ERR_INT_STS 0x10
+#define APB_DMA_ERR_INT_EN 0x20
+
+/*
+ * unset to select APB source
+ * set to select AHB source
+ */
+#define APB_DMA_SOURCE_SELECT 0x40
+
+/*
+ * unset to select APB destination
+ * set to select AHB destination
+ */
+#define APB_DMA_DEST_SELECT 0x80
+
+#define APB_DMA_SOURCE 0x100
+#define APB_DMA_SOURCE_MASK 0x700
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4 (burst=1)
+ * 010: +2 (burst=0), +8 (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4 (burst=1)
+ * 110: -2 (burst=0), -8 (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+ */
+#define APB_DMA_SOURCE_INC_0 0
+#define APB_DMA_SOURCE_INC_1_4 0x100
+#define APB_DMA_SOURCE_INC_2_8 0x200
+#define APB_DMA_SOURCE_INC_4_16 0x300
+#define APB_DMA_SOURCE_DEC_1_4 0x500
+#define APB_DMA_SOURCE_DEC_2_8 0x600
+#define APB_DMA_SOURCE_DEC_4_16 0x700
+
+#define APB_DMA_DEST 0x1000
+#define APB_DMA_DEST_MASK 0x7000
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4 (burst=1)
+ * 010: +2 (burst=0), +8 (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4 (burst=1)
+ * 110: -2 (burst=0), -8 (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+*/
+#define APB_DMA_DEST_INC_0 0
+#define APB_DMA_DEST_INC_1_4 0x1000
+#define APB_DMA_DEST_INC_2_8 0x2000
+#define APB_DMA_DEST_INC_4_16 0x3000
+#define APB_DMA_DEST_DEC_1_4 0x5000
+#define APB_DMA_DEST_DEC_2_8 0x6000
+#define APB_DMA_DEST_DEC_4_16 0x7000
+
+/*
+ * request signal select of destination
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0: no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_DEST_REQ_NO 0x10000
+#define APB_DMA_DEST_REQ_NO_MASK 0xf0000
+
+#define APB_DMA_DATA_WIDTH 0x100000
+#define APB_DMA_DATA_WIDTH_MASK 0x300000
+/*
+ * data width of transfer
+ * 00: word
+ * 01: half
+ * 10: byte
+ */
+#define APB_DMA_DATA_WIDTH_4 0
+#define APB_DMA_DATA_WIDTH_2 0x100000
+#define APB_DMA_DATA_WIDTH_1 0x200000
+
+/*
+ * request signal select of source
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0: no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_SOURCE_REQ_NO 0x1000000
+#define APB_DMA_SOURCE_REQ_NO_MASK 0xf000000
+#define APB_DMA_CYCLES_MASK 0x00ffffff
+
+struct moxart_dma_chan {
+ struct dma_chan chan;
+ int ch_num;
+ bool allocated;
+ bool error;
+ void __iomem *base;
+ struct dma_slave_config cfg;
+ struct dma_async_tx_descriptor tx_desc;
+ unsigned int line_reqno;
+};
+
+struct moxart_dma_container {
+ int ctlr;
+ struct dma_device dma_slave;
+ struct moxart_dma_chan slave_chans[APB_DMA_MAX_CHANNEL];
+ spinlock_t dma_lock;
+ struct tasklet_struct tasklet;
+};
+
+struct moxart_dma_filter_data {
+ struct moxart_dma_container *mdc;
+ struct of_phandle_args *dma_spec;
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+ return &chan->dev->device;
+}
+
+static inline struct moxart_dma_container
+*to_dma_container(struct dma_device *d)
+{
+ return container_of(d, struct moxart_dma_container, dma_slave);
+}
+
+static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct moxart_dma_chan, chan);
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *ch = to_moxart_dma_chan(chan);
+ struct moxart_dma_container *c = to_dma_container(ch->chan.device);
+ u32 ctrl;
+ unsigned long flags;
+
+ dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
+
+ spin_lock_irqsave(&c->dma_lock, flags);
+
+ ctrl = readl(ch->base + REG_CTRL);
+ ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+ writel(ctrl, ch->base + REG_CTRL);
+
+ spin_unlock_irqrestore(&c->dma_lock, flags);
+
+ return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *cfg)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+ u32 ctrl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mc->dma_lock, flags);
+
+ memcpy(&mchan->cfg, cfg, sizeof(mchan->cfg));
+
+ ctrl = readl(mchan->base + REG_CTRL);
+ ctrl |= APB_DMA_BURST_MODE;
+ ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
+ ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
+
+ switch (mchan->cfg.src_addr_width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ ctrl |= APB_DMA_DATA_WIDTH_1;
+ if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+ ctrl |= APB_DMA_DEST_INC_1_4;
+ else
+ ctrl |= APB_DMA_SOURCE_INC_1_4;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ ctrl |= APB_DMA_DATA_WIDTH_2;
+ if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+ ctrl |= APB_DMA_DEST_INC_2_8;
+ else
+ ctrl |= APB_DMA_SOURCE_INC_2_8;
+ break;
+ default:
+ ctrl &= ~APB_DMA_DATA_WIDTH;
+ if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+ ctrl |= APB_DMA_DEST_INC_4_16;
+ else
+ ctrl |= APB_DMA_SOURCE_INC_4_16;
+ break;
+ }
+
+ if (mchan->cfg.direction == DMA_MEM_TO_DEV) {
+ ctrl &= ~APB_DMA_DEST_SELECT;
+ ctrl |= APB_DMA_SOURCE_SELECT;
+ ctrl |= (mchan->line_reqno << 16 &
+ APB_DMA_DEST_REQ_NO_MASK);
+ } else {
+ ctrl |= APB_DMA_DEST_SELECT;
+ ctrl &= ~APB_DMA_SOURCE_SELECT;
+ ctrl |= (mchan->line_reqno << 24 &
+ APB_DMA_SOURCE_REQ_NO_MASK);
+ }
+
+ writel(ctrl, mchan->base + REG_CTRL);
+
+ spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+ return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ int ret = 0;
+ struct dma_slave_config *config;
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ moxart_terminate_all(chan);
+ break;
+ case DMA_SLAVE_CONFIG:
+ config = (struct dma_slave_config *)arg;
+ ret = moxart_slave_config(chan, config);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ return ret;
+}
+
+static dma_cookie_t moxart_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(tx->chan);
+ struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+ dma_cookie_t cookie;
+ u32 ctrl;
+ unsigned long flags;
+
+ dev_dbg(chan2dev(tx->chan), "%s: mchan=%p mchan->ch_num=%u mchan->base=%p\n",
+ __func__, mchan, mchan->ch_num, mchan->base);
+
+ spin_lock_irqsave(&mc->dma_lock, flags);
+
+ cookie = dma_cookie_assign(tx);
+
+ ctrl = readl(mchan->base + REG_CTRL);
+ ctrl |= (APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+ writel(ctrl, mchan->base + REG_CTRL);
+
+ spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+ return cookie;
+}
+
+static struct dma_async_tx_descriptor
+*moxart_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_transfer_direction direction,
+ unsigned long tx_flags, void *context)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+ unsigned long flags;
+ unsigned int size, adr_width;
+
+ spin_lock_irqsave(&mc->dma_lock, flags);
+
+ if (direction == DMA_MEM_TO_DEV) {
+ writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+ mchan->base + REG_ADDRESS_SOURCE);
+ writel(mchan->cfg.dst_addr, mchan->base + REG_ADDRESS_DEST);
+
+ adr_width = mchan->cfg.src_addr_width;
+ } else {
+ writel(mchan->cfg.src_addr, mchan->base + REG_ADDRESS_SOURCE);
+ writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+ mchan->base + REG_ADDRESS_DEST);
+
+ adr_width = mchan->cfg.dst_addr_width;
+ }
+
+ size = sgl->length >> adr_width;
+
+ /*
+ * size is 4 on 64 bytes copied, i.e. one cycle copies 16 bytes
+ * ( when data_width == APB_DMAB_DATA_WIDTH_4 )
+ */
+ writel(size, mchan->base + REG_CYCLES);
+
+ dev_dbg(chan2dev(chan), "%s: set %u DMA cycles (sgl->length=%u adr_width=%u)\n",
+ __func__, size, sgl->length, adr_width);
+
+ dma_async_tx_descriptor_init(&mchan->tx_desc, chan);
+ mchan->tx_desc.tx_submit = moxart_tx_submit;
+ mchan->error = 0;
+
+ spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+ return &mchan->tx_desc;
+}
+
+bool moxart_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+ struct moxart_dma_filter_data *fdata = param;
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+ if (chan->device->dev != fdata->mdc->dma_slave.dev ||
+ chan->device->dev->of_node != fdata->dma_spec->np) {
+ dev_dbg(chan2dev(chan), "device not registered to this DMA engine\n");
+ return 0;
+ }
+
+ dev_dbg(chan2dev(chan), "%s: mchan=%p line_reqno=%u mchan->ch_num=%u\n",
+ __func__, mchan, fdata->dma_spec->args[0], mchan->ch_num);
+
+ mchan->line_reqno = fdata->dma_spec->args[0];
+
+ return 1;
+}
+
+static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct moxart_dma_container *mdc = ofdma->of_dma_data;
+ struct moxart_dma_filter_data fdata = {
+ .mdc = mdc,
+ };
+
+ if (dma_spec->args_count < 1)
+ return NULL;
+
+ fdata.dma_spec = dma_spec;
+
+ return dma_request_channel(mdc->dma_slave.cap_mask,
+ moxart_dma_filter_fn, &fdata);
+}
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+ dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n",
+ __func__, mchan->ch_num);
+ mchan->allocated = 1;
+
+ return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+ dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+ __func__, mchan->ch_num);
+ mchan->allocated = 0;
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+ u32 ctrl;
+ unsigned long flags;
+
+ dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+ spin_lock_irqsave(&mc->dma_lock, flags);
+
+ ctrl = readl(mchan->base + REG_CTRL);
+ ctrl |= APB_DMA_ENABLE;
+ writel(ctrl, mchan->base + REG_CTRL);
+
+ spin_unlock_irqrestore(&mc->dma_lock, flags);
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txs)
+{
+ struct moxart_dma_chan *ch = to_moxart_dma_chan(chan);
+ enum dma_status ret;
+
+ ret = (ch->error) ? DMA_ERROR : dma_cookie_status(chan, cookie, txs);
+
+ return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+ dma->device_prep_slave_sg = moxart_prep_slave_sg;
+ dma->device_alloc_chan_resources = moxart_alloc_chan_resources;
+ dma->device_free_chan_resources = moxart_free_chan_resources;
+ dma->device_issue_pending = moxart_issue_pending;
+ dma->device_tx_status = moxart_tx_status;
+ dma->device_control = moxart_control;
+ dma->dev = dev;
+
+ INIT_LIST_HEAD(&dma->channels);
+}
+
+static void moxart_dma_tasklet(unsigned long data)
+{
+ struct moxart_dma_container *mc = (void *)data;
+ struct moxart_dma_chan *ch = &mc->slave_chans[0];
+ struct dma_async_tx_descriptor *tx_desc;
+ unsigned int i;
+ enum dma_status s;
+ struct dma_tx_state txs;
+
+ pr_debug("%s\n", __func__);
+
+ for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+ if (ch->allocated) {
+ tx_desc = &ch->tx_desc;
+
+ s = mc->dma_slave.device_tx_status(&ch->chan,
+ ch->chan.cookie,
+ &txs);
+
+ switch (s) {
+ case DMA_ERROR:
+ printk_ratelimited("%s: DMA error\n",
+ __func__);
+ break;
+ case DMA_SUCCESS:
+ break;
+ case DMA_IN_PROGRESS:
+ case DMA_PAUSED:
+ continue;
+ }
+
+ if (tx_desc->callback) {
+ pr_debug("%s: call callback for ch=%p\n",
+ __func__, ch);
+ tx_desc->callback(tx_desc->callback_param);
+ }
+ }
+ }
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+ struct moxart_dma_container *mc = devid;
+ struct moxart_dma_chan *mchan = &mc->slave_chans[0];
+ unsigned int i;
+ u32 ctrl;
+
+ pr_debug("%s\n", __func__);
+
+ for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+ if (mchan->allocated) {
+ ctrl = readl(mchan->base + REG_CTRL);
+ pr_debug("%s: ctrl=%x\n", __func__, ctrl);
+
+ if (ctrl & APB_DMA_FIN_INT_STS) {
+ ctrl &= ~APB_DMA_FIN_INT_STS;
+ dma_cookie_complete(&mchan->tx_desc);
+ }
+ if (ctrl & APB_DMA_ERR_INT_STS) {
+ ctrl &= ~APB_DMA_ERR_INT_STS;
+ mchan->error = 1;
+ }
+ /*
+ * bits must be cleared here, this function
+ * called in a loop if moved to tasklet
+ */
+ writel(ctrl, mchan->base + REG_CTRL);
+
+ tasklet_schedule(&mc->tasklet);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int moxart_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource *res;
+ static void __iomem *dma_base_addr;
+ int ret, i;
+ unsigned int irq;
+ struct moxart_dma_chan *mchan;
+ struct moxart_dma_container *mdc;
+
+ mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+ if (!mdc) {
+ dev_err(dev, "can't allocate DMA container\n");
+ return -ENOMEM;
+ }
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq <= 0) {
+ dev_err(dev, "irq_of_parse_and_map failed\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dma_base_addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(dma_base_addr)) {
+ dev_err(dev, "devm_ioremap_resource failed\n");
+ return PTR_ERR(dma_base_addr);
+ }
+
+ mdc->ctlr = pdev->id;
+ spin_lock_init(&mdc->dma_lock);
+
+ dma_cap_zero(mdc->dma_slave.cap_mask);
+ dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+
+ moxart_dma_init(&mdc->dma_slave, dev);
+
+ mchan = &mdc->slave_chans[0];
+ for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+ mchan->ch_num = i;
+ mchan->base = dma_base_addr + i * REG_CHAN_SIZE;
+ mchan->allocated = 0;
+
+ dma_cookie_init(&mchan->chan);
+ mchan->chan.device = &mdc->dma_slave;
+ list_add_tail(&mchan->chan.device_node,
+ &mdc->dma_slave.channels);
+
+ dev_dbg(dev, "%s: mchans[%d]: mchan->ch_num=%u mchan->base=%p\n",
+ __func__, i, mchan->ch_num, mchan->base);
+ }
+
+ platform_set_drvdata(pdev, mdc);
+
+ ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
+ "moxart-dma-engine", mdc);
+ if (ret) {
+ dev_err(dev, "devm_request_irq failed\n");
+ return ret;
+ }
+
+ ret = dma_async_device_register(&mdc->dma_slave);
+ if (ret) {
+ dev_err(dev, "dma_async_device_register failed\n");
+ return ret;
+ }
+
+ ret = of_dma_controller_register(node, moxart_of_xlate, mdc);
+ if (ret) {
+ dev_err(dev, "of_dma_controller_register failed\n");
+ dma_async_device_unregister(&mdc->dma_slave);
+ return ret;
+ }
+
+ tasklet_init(&mdc->tasklet, moxart_dma_tasklet, (unsigned long)mdc);
+
+ dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq);
+
+ return 0;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+ struct moxart_dma_container *m = platform_get_drvdata(pdev);
+
+ tasklet_kill(&m->tasklet);
+
+ dma_async_device_unregister(&m->dma_slave);
+
+ if (pdev->dev.of_node)
+ of_dma_controller_free(pdev->dev.of_node);
+
+ return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+ { .compatible = "moxa,moxart-dma" },
+ { }
+};
+
+static struct platform_driver moxart_driver = {
+ .probe = moxart_probe,
+ .remove = moxart_remove,
+ .driver = {
+ .name = "moxart-dma-engine",
+ .owner = THIS_MODULE,
+ .of_match_table = moxart_dma_match,
+ },
+};
+
+static int moxart_init(void)
+{
+ return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+ platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <[email protected]>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
--
1.8.2.1

2013-10-07 15:13:03

by Mark Rutland

[permalink] [raw]
Subject: Re: [PATCH v10] dmaengine: Add MOXA ART DMA engine driver

[adding devicetree]

On Mon, Oct 07, 2013 at 03:10:34PM +0100, Jonas Jensen wrote:
> The MOXA ART SoC has a DMA controller capable of offloading expensive
> memory operations, such as large copies. This patch adds support for
> the controller including four channels. Two of these are used to
> handle MMC copy on the UC-7112-LX hardware. The remaining two can be
> used in a future audio driver or client application.
>
> Signed-off-by: Jonas Jensen <[email protected]>
> ---
>
> Notes:
> Changes since v9:
>
> 1. reorder probe path, also move dma_async_device_register() to happen after devm_request_irq()
>
> Applies to next-20130927
>
> .../devicetree/bindings/dma/moxa,moxart-dma.txt | 19 +
> drivers/dma/Kconfig | 7 +
> drivers/dma/Makefile | 1 +
> drivers/dma/moxart-dma.c | 651 +++++++++++++++++++++
> 4 files changed, 678 insertions(+)
> create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> create mode 100644 drivers/dma/moxart-dma.c
>
> diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> new file mode 100644
> index 0000000..79facce
> --- /dev/null
> +++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> @@ -0,0 +1,19 @@
> +MOXA ART DMA Controller
> +
> +See dma.txt first
> +
> +Required properties:
> +
> +- compatible : Must be "moxa,moxart-dma"

Sorry I didn't notice this previously, but "moxa" isn't in
Documentation/devicetree/bindings/vendor-prefixes.txt (despite several
bindings using it). Could you cook up a separate patch to add an entry
for Moxa, please?

Also, given the SoC is called "ART" it's a shame that we're calling this
"moxa,moxart-dma" rather than "moxa,art-dma". We already have precedent
for "moxart" in bindings though, so changing that's likely to lead to
more problems.

> +- reg : Should contain registers location and length
> +- interrupts : Should contain the interrupt number

Sorry for yet more pendantry, but could we instead have:

- interrupts: Should contain an interrupt-specifier for the sole
interrupt generated by the device.

> +- #dma-cells : Should be 1, a single cell holding a line request number
> +
> +Example:
> +
> + dma: dma@90500000 {
> + compatible = "moxa,moxart-dma";
> + reg = <0x90500080 0x40>;
> + interrupts = <24 0>;
> + #dma-cells = <1>;
> + };

Otherwise I think the binding looks OK.

Thanks,
Mark

2013-10-08 08:43:42

by Jonas Jensen

[permalink] [raw]
Subject: [PATCH v11] dmaengine: Add MOXA ART DMA engine driver

The MOXA ART SoC has a DMA controller capable of offloading expensive
memory operations, such as large copies. This patch adds support for
the controller including four channels. Two of these are used to
handle MMC copy on the UC-7112-LX hardware. The remaining two can be
used in a future audio driver or client application.

Signed-off-by: Jonas Jensen <[email protected]>
---

Notes:
Changes since v10:

device tree bindings document:
1. reformat interrupt description text

Applies to next-20130927

.../devicetree/bindings/dma/moxa,moxart-dma.txt | 20 +
drivers/dma/Kconfig | 7 +
drivers/dma/Makefile | 1 +
drivers/dma/moxart-dma.c | 651 +++++++++++++++++++++
4 files changed, 679 insertions(+)
create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
create mode 100644 drivers/dma/moxart-dma.c

diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 0000000..697e3f6
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,20 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible : Must be "moxa,moxart-dma"
+- reg : Should contain registers location and length
+- interrupts : Should contain an interrupt-specifier for the sole
+ interrupt generated by the device
+- #dma-cells : Should be 1, a single cell holding a line request number
+
+Example:
+
+ dma: dma@90500000 {
+ compatible = "moxa,moxart-dma";
+ reg = <0x90500080 0x40>;
+ interrupts = <24 0>;
+ #dma-cells = <1>;
+ };
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index f238cfd..f4ed3a9 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -318,6 +318,13 @@ config K3_DMA
Support the DMA engine for Hisilicon K3 platform
devices.

+config MOXART_DMA
+ tristate "MOXART DMA support"
+ depends on ARCH_MOXART
+ select DMA_ENGINE
+ help
+ Enable support for the MOXA ART SoC DMA controller.
+
config DMA_ENGINE
bool

diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index db89035..9ef0916 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -41,3 +41,4 @@ obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
obj-$(CONFIG_TI_CPPI41) += cppi41.o
obj-$(CONFIG_K3_DMA) += k3dma.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..edd6de2
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,651 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <[email protected]>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+
+#define APB_DMA_MAX_CHANNEL 4
+
+#define REG_ADDRESS_SOURCE 0
+#define REG_ADDRESS_DEST 4
+#define REG_CYCLES 8
+#define REG_CTRL 12
+#define REG_CHAN_SIZE 16
+
+#define APB_DMA_ENABLE 0x1
+#define APB_DMA_FIN_INT_STS 0x2
+#define APB_DMA_FIN_INT_EN 0x4
+#define APB_DMA_BURST_MODE 0x8
+#define APB_DMA_ERR_INT_STS 0x10
+#define APB_DMA_ERR_INT_EN 0x20
+
+/*
+ * unset to select APB source
+ * set to select AHB source
+ */
+#define APB_DMA_SOURCE_SELECT 0x40
+
+/*
+ * unset to select APB destination
+ * set to select AHB destination
+ */
+#define APB_DMA_DEST_SELECT 0x80
+
+#define APB_DMA_SOURCE 0x100
+#define APB_DMA_SOURCE_MASK 0x700
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4 (burst=1)
+ * 010: +2 (burst=0), +8 (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4 (burst=1)
+ * 110: -2 (burst=0), -8 (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+ */
+#define APB_DMA_SOURCE_INC_0 0
+#define APB_DMA_SOURCE_INC_1_4 0x100
+#define APB_DMA_SOURCE_INC_2_8 0x200
+#define APB_DMA_SOURCE_INC_4_16 0x300
+#define APB_DMA_SOURCE_DEC_1_4 0x500
+#define APB_DMA_SOURCE_DEC_2_8 0x600
+#define APB_DMA_SOURCE_DEC_4_16 0x700
+
+#define APB_DMA_DEST 0x1000
+#define APB_DMA_DEST_MASK 0x7000
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4 (burst=1)
+ * 010: +2 (burst=0), +8 (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4 (burst=1)
+ * 110: -2 (burst=0), -8 (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+*/
+#define APB_DMA_DEST_INC_0 0
+#define APB_DMA_DEST_INC_1_4 0x1000
+#define APB_DMA_DEST_INC_2_8 0x2000
+#define APB_DMA_DEST_INC_4_16 0x3000
+#define APB_DMA_DEST_DEC_1_4 0x5000
+#define APB_DMA_DEST_DEC_2_8 0x6000
+#define APB_DMA_DEST_DEC_4_16 0x7000
+
+/*
+ * request signal select of destination
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0: no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_DEST_REQ_NO 0x10000
+#define APB_DMA_DEST_REQ_NO_MASK 0xf0000
+
+#define APB_DMA_DATA_WIDTH 0x100000
+#define APB_DMA_DATA_WIDTH_MASK 0x300000
+/*
+ * data width of transfer
+ * 00: word
+ * 01: half
+ * 10: byte
+ */
+#define APB_DMA_DATA_WIDTH_4 0
+#define APB_DMA_DATA_WIDTH_2 0x100000
+#define APB_DMA_DATA_WIDTH_1 0x200000
+
+/*
+ * request signal select of source
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0: no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_SOURCE_REQ_NO 0x1000000
+#define APB_DMA_SOURCE_REQ_NO_MASK 0xf000000
+#define APB_DMA_CYCLES_MASK 0x00ffffff
+
+struct moxart_dma_chan {
+ struct dma_chan chan;
+ int ch_num;
+ bool allocated;
+ bool error;
+ void __iomem *base;
+ struct dma_slave_config cfg;
+ struct dma_async_tx_descriptor tx_desc;
+ unsigned int line_reqno;
+};
+
+struct moxart_dma_container {
+ int ctlr;
+ struct dma_device dma_slave;
+ struct moxart_dma_chan slave_chans[APB_DMA_MAX_CHANNEL];
+ spinlock_t dma_lock;
+ struct tasklet_struct tasklet;
+};
+
+struct moxart_dma_filter_data {
+ struct moxart_dma_container *mdc;
+ struct of_phandle_args *dma_spec;
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+ return &chan->dev->device;
+}
+
+static inline struct moxart_dma_container
+*to_dma_container(struct dma_device *d)
+{
+ return container_of(d, struct moxart_dma_container, dma_slave);
+}
+
+static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct moxart_dma_chan, chan);
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *ch = to_moxart_dma_chan(chan);
+ struct moxart_dma_container *c = to_dma_container(ch->chan.device);
+ u32 ctrl;
+ unsigned long flags;
+
+ dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
+
+ spin_lock_irqsave(&c->dma_lock, flags);
+
+ ctrl = readl(ch->base + REG_CTRL);
+ ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+ writel(ctrl, ch->base + REG_CTRL);
+
+ spin_unlock_irqrestore(&c->dma_lock, flags);
+
+ return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *cfg)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+ u32 ctrl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mc->dma_lock, flags);
+
+ memcpy(&mchan->cfg, cfg, sizeof(mchan->cfg));
+
+ ctrl = readl(mchan->base + REG_CTRL);
+ ctrl |= APB_DMA_BURST_MODE;
+ ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
+ ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
+
+ switch (mchan->cfg.src_addr_width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ ctrl |= APB_DMA_DATA_WIDTH_1;
+ if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+ ctrl |= APB_DMA_DEST_INC_1_4;
+ else
+ ctrl |= APB_DMA_SOURCE_INC_1_4;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ ctrl |= APB_DMA_DATA_WIDTH_2;
+ if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+ ctrl |= APB_DMA_DEST_INC_2_8;
+ else
+ ctrl |= APB_DMA_SOURCE_INC_2_8;
+ break;
+ default:
+ ctrl &= ~APB_DMA_DATA_WIDTH;
+ if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+ ctrl |= APB_DMA_DEST_INC_4_16;
+ else
+ ctrl |= APB_DMA_SOURCE_INC_4_16;
+ break;
+ }
+
+ if (mchan->cfg.direction == DMA_MEM_TO_DEV) {
+ ctrl &= ~APB_DMA_DEST_SELECT;
+ ctrl |= APB_DMA_SOURCE_SELECT;
+ ctrl |= (mchan->line_reqno << 16 &
+ APB_DMA_DEST_REQ_NO_MASK);
+ } else {
+ ctrl |= APB_DMA_DEST_SELECT;
+ ctrl &= ~APB_DMA_SOURCE_SELECT;
+ ctrl |= (mchan->line_reqno << 24 &
+ APB_DMA_SOURCE_REQ_NO_MASK);
+ }
+
+ writel(ctrl, mchan->base + REG_CTRL);
+
+ spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+ return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ int ret = 0;
+ struct dma_slave_config *config;
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ moxart_terminate_all(chan);
+ break;
+ case DMA_SLAVE_CONFIG:
+ config = (struct dma_slave_config *)arg;
+ ret = moxart_slave_config(chan, config);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ return ret;
+}
+
+static dma_cookie_t moxart_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(tx->chan);
+ struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+ dma_cookie_t cookie;
+ u32 ctrl;
+ unsigned long flags;
+
+ dev_dbg(chan2dev(tx->chan), "%s: mchan=%p mchan->ch_num=%u mchan->base=%p\n",
+ __func__, mchan, mchan->ch_num, mchan->base);
+
+ spin_lock_irqsave(&mc->dma_lock, flags);
+
+ cookie = dma_cookie_assign(tx);
+
+ ctrl = readl(mchan->base + REG_CTRL);
+ ctrl |= (APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+ writel(ctrl, mchan->base + REG_CTRL);
+
+ spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+ return cookie;
+}
+
+static struct dma_async_tx_descriptor
+*moxart_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_transfer_direction direction,
+ unsigned long tx_flags, void *context)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+ unsigned long flags;
+ unsigned int size, adr_width;
+
+ spin_lock_irqsave(&mc->dma_lock, flags);
+
+ if (direction == DMA_MEM_TO_DEV) {
+ writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+ mchan->base + REG_ADDRESS_SOURCE);
+ writel(mchan->cfg.dst_addr, mchan->base + REG_ADDRESS_DEST);
+
+ adr_width = mchan->cfg.src_addr_width;
+ } else {
+ writel(mchan->cfg.src_addr, mchan->base + REG_ADDRESS_SOURCE);
+ writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+ mchan->base + REG_ADDRESS_DEST);
+
+ adr_width = mchan->cfg.dst_addr_width;
+ }
+
+ size = sgl->length >> adr_width;
+
+ /*
+ * size is 4 on 64 bytes copied, i.e. one cycle copies 16 bytes
+ * ( when data_width == APB_DMAB_DATA_WIDTH_4 )
+ */
+ writel(size, mchan->base + REG_CYCLES);
+
+ dev_dbg(chan2dev(chan), "%s: set %u DMA cycles (sgl->length=%u adr_width=%u)\n",
+ __func__, size, sgl->length, adr_width);
+
+ dma_async_tx_descriptor_init(&mchan->tx_desc, chan);
+ mchan->tx_desc.tx_submit = moxart_tx_submit;
+ mchan->error = 0;
+
+ spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+ return &mchan->tx_desc;
+}
+
+bool moxart_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+ struct moxart_dma_filter_data *fdata = param;
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+ if (chan->device->dev != fdata->mdc->dma_slave.dev ||
+ chan->device->dev->of_node != fdata->dma_spec->np) {
+ dev_dbg(chan2dev(chan), "device not registered to this DMA engine\n");
+ return 0;
+ }
+
+ dev_dbg(chan2dev(chan), "%s: mchan=%p line_reqno=%u mchan->ch_num=%u\n",
+ __func__, mchan, fdata->dma_spec->args[0], mchan->ch_num);
+
+ mchan->line_reqno = fdata->dma_spec->args[0];
+
+ return 1;
+}
+
+static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct moxart_dma_container *mdc = ofdma->of_dma_data;
+ struct moxart_dma_filter_data fdata = {
+ .mdc = mdc,
+ };
+
+ if (dma_spec->args_count < 1)
+ return NULL;
+
+ fdata.dma_spec = dma_spec;
+
+ return dma_request_channel(mdc->dma_slave.cap_mask,
+ moxart_dma_filter_fn, &fdata);
+}
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+ dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n",
+ __func__, mchan->ch_num);
+ mchan->allocated = 1;
+
+ return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+ dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+ __func__, mchan->ch_num);
+ mchan->allocated = 0;
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+ struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+ struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+ u32 ctrl;
+ unsigned long flags;
+
+ dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+ spin_lock_irqsave(&mc->dma_lock, flags);
+
+ ctrl = readl(mchan->base + REG_CTRL);
+ ctrl |= APB_DMA_ENABLE;
+ writel(ctrl, mchan->base + REG_CTRL);
+
+ spin_unlock_irqrestore(&mc->dma_lock, flags);
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txs)
+{
+ struct moxart_dma_chan *ch = to_moxart_dma_chan(chan);
+ enum dma_status ret;
+
+ ret = (ch->error) ? DMA_ERROR : dma_cookie_status(chan, cookie, txs);
+
+ return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+ dma->device_prep_slave_sg = moxart_prep_slave_sg;
+ dma->device_alloc_chan_resources = moxart_alloc_chan_resources;
+ dma->device_free_chan_resources = moxart_free_chan_resources;
+ dma->device_issue_pending = moxart_issue_pending;
+ dma->device_tx_status = moxart_tx_status;
+ dma->device_control = moxart_control;
+ dma->dev = dev;
+
+ INIT_LIST_HEAD(&dma->channels);
+}
+
+static void moxart_dma_tasklet(unsigned long data)
+{
+ struct moxart_dma_container *mc = (void *)data;
+ struct moxart_dma_chan *ch = &mc->slave_chans[0];
+ struct dma_async_tx_descriptor *tx_desc;
+ unsigned int i;
+ enum dma_status s;
+ struct dma_tx_state txs;
+
+ pr_debug("%s\n", __func__);
+
+ for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+ if (ch->allocated) {
+ tx_desc = &ch->tx_desc;
+
+ s = mc->dma_slave.device_tx_status(&ch->chan,
+ ch->chan.cookie,
+ &txs);
+
+ switch (s) {
+ case DMA_ERROR:
+ printk_ratelimited("%s: DMA error\n",
+ __func__);
+ break;
+ case DMA_SUCCESS:
+ break;
+ case DMA_IN_PROGRESS:
+ case DMA_PAUSED:
+ continue;
+ }
+
+ if (tx_desc->callback) {
+ pr_debug("%s: call callback for ch=%p\n",
+ __func__, ch);
+ tx_desc->callback(tx_desc->callback_param);
+ }
+ }
+ }
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+ struct moxart_dma_container *mc = devid;
+ struct moxart_dma_chan *mchan = &mc->slave_chans[0];
+ unsigned int i;
+ u32 ctrl;
+
+ pr_debug("%s\n", __func__);
+
+ for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+ if (mchan->allocated) {
+ ctrl = readl(mchan->base + REG_CTRL);
+ pr_debug("%s: ctrl=%x\n", __func__, ctrl);
+
+ if (ctrl & APB_DMA_FIN_INT_STS) {
+ ctrl &= ~APB_DMA_FIN_INT_STS;
+ dma_cookie_complete(&mchan->tx_desc);
+ }
+ if (ctrl & APB_DMA_ERR_INT_STS) {
+ ctrl &= ~APB_DMA_ERR_INT_STS;
+ mchan->error = 1;
+ }
+ /*
+ * bits must be cleared here, this function
+ * called in a loop if moved to tasklet
+ */
+ writel(ctrl, mchan->base + REG_CTRL);
+
+ tasklet_schedule(&mc->tasklet);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int moxart_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource *res;
+ static void __iomem *dma_base_addr;
+ int ret, i;
+ unsigned int irq;
+ struct moxart_dma_chan *mchan;
+ struct moxart_dma_container *mdc;
+
+ mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+ if (!mdc) {
+ dev_err(dev, "can't allocate DMA container\n");
+ return -ENOMEM;
+ }
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq <= 0) {
+ dev_err(dev, "irq_of_parse_and_map failed\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dma_base_addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(dma_base_addr)) {
+ dev_err(dev, "devm_ioremap_resource failed\n");
+ return PTR_ERR(dma_base_addr);
+ }
+
+ mdc->ctlr = pdev->id;
+ spin_lock_init(&mdc->dma_lock);
+
+ dma_cap_zero(mdc->dma_slave.cap_mask);
+ dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+
+ moxart_dma_init(&mdc->dma_slave, dev);
+
+ mchan = &mdc->slave_chans[0];
+ for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+ mchan->ch_num = i;
+ mchan->base = dma_base_addr + i * REG_CHAN_SIZE;
+ mchan->allocated = 0;
+
+ dma_cookie_init(&mchan->chan);
+ mchan->chan.device = &mdc->dma_slave;
+ list_add_tail(&mchan->chan.device_node,
+ &mdc->dma_slave.channels);
+
+ dev_dbg(dev, "%s: mchans[%d]: mchan->ch_num=%u mchan->base=%p\n",
+ __func__, i, mchan->ch_num, mchan->base);
+ }
+
+ platform_set_drvdata(pdev, mdc);
+
+ ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
+ "moxart-dma-engine", mdc);
+ if (ret) {
+ dev_err(dev, "devm_request_irq failed\n");
+ return ret;
+ }
+
+ ret = dma_async_device_register(&mdc->dma_slave);
+ if (ret) {
+ dev_err(dev, "dma_async_device_register failed\n");
+ return ret;
+ }
+
+ ret = of_dma_controller_register(node, moxart_of_xlate, mdc);
+ if (ret) {
+ dev_err(dev, "of_dma_controller_register failed\n");
+ dma_async_device_unregister(&mdc->dma_slave);
+ return ret;
+ }
+
+ tasklet_init(&mdc->tasklet, moxart_dma_tasklet, (unsigned long)mdc);
+
+ dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq);
+
+ return 0;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+ struct moxart_dma_container *m = platform_get_drvdata(pdev);
+
+ tasklet_kill(&m->tasklet);
+
+ dma_async_device_unregister(&m->dma_slave);
+
+ if (pdev->dev.of_node)
+ of_dma_controller_free(pdev->dev.of_node);
+
+ return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+ { .compatible = "moxa,moxart-dma" },
+ { }
+};
+
+static struct platform_driver moxart_driver = {
+ .probe = moxart_probe,
+ .remove = moxart_remove,
+ .driver = {
+ .name = "moxart-dma-engine",
+ .owner = THIS_MODULE,
+ .of_match_table = moxart_dma_match,
+ },
+};
+
+static int moxart_init(void)
+{
+ return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+ platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <[email protected]>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
--
1.8.2.1

2013-10-08 09:53:41

by Jonas Jensen

[permalink] [raw]
Subject: Re: [PATCH v10] dmaengine: Add MOXA ART DMA engine driver

On 7 October 2013 17:12, Mark Rutland <[email protected]> wrote:
> Sorry I didn't notice this previously, but "moxa" isn't in
> Documentation/devicetree/bindings/vendor-prefixes.txt (despite several
> bindings using it). Could you cook up a separate patch to add an entry
> for Moxa, please?

Yes, I'll submit a separate patch.

> Also, given the SoC is called "ART" it's a shame that we're calling this
> "moxa,moxart-dma" rather than "moxa,art-dma". We already have precedent
> for "moxart" in bindings though, so changing that's likely to lead to
> more problems.

Sorry about that, I think the "moxart" contraction was suggested and
has been sticky ever since.

It's at least a little appropriate because the physical chip text
reads "MOXA ART" (photo):

https://lh4.googleusercontent.com/-A-2FXDrObU8/UMcMc_K2vEI/AAAAAAAABwg/ldaLZ7ps1P4/w1331-h998-no/UC-7112-LX-picture4.jpg

Currently three drivers in linux-next use the name with accompanying
device tree bindings.
Considering the amount of patches required, can we keep the name, please?

> Sorry for yet more pendantry, but could we instead have:
>
> - interrupts: Should contain an interrupt-specifier for the sole
> interrupt generated by the device.

Fixed in v11.


Regards,
Jonas

2013-10-08 12:56:07

by Mark Rutland

[permalink] [raw]
Subject: Re: [PATCH v10] dmaengine: Add MOXA ART DMA engine driver

On Tue, Oct 08, 2013 at 10:53:36AM +0100, Jonas Jensen wrote:
> On 7 October 2013 17:12, Mark Rutland <[email protected]> wrote:
> > Sorry I didn't notice this previously, but "moxa" isn't in
> > Documentation/devicetree/bindings/vendor-prefixes.txt (despite several
> > bindings using it). Could you cook up a separate patch to add an entry
> > for Moxa, please?
>
> Yes, I'll submit a separate patch.

Cheers.

>
> > Also, given the SoC is called "ART" it's a shame that we're calling this
> > "moxa,moxart-dma" rather than "moxa,art-dma". We already have precedent
> > for "moxart" in bindings though, so changing that's likely to lead to
> > more problems.
>
> Sorry about that, I think the "moxart" contraction was suggested and
> has been sticky ever since.
>
> It's at least a little appropriate because the physical chip text
> reads "MOXA ART" (photo):
>
> https://lh4.googleusercontent.com/-A-2FXDrObU8/UMcMc_K2vEI/AAAAAAAABwg/ldaLZ7ps1P4/w1331-h998-no/UC-7112-LX-picture4.jpg
>
> Currently three drivers in linux-next use the name with accompanying
> device tree bindings.
> Considering the amount of patches required, can we keep the name, please?

Yeah, I think we have to keep it. It's not objectively wrong, and we
have other contractions (e.g. vexpress) in bindings. It just looks a bit
more odd than the others due to the repetition of "moxa". There's no
benefit to be had changing it now.

>
> > Sorry for yet more pendantry, but could we instead have:
> >
> > - interrupts: Should contain an interrupt-specifier for the sole
> > interrupt generated by the device.
>
> Fixed in v11.

Sounds good.

Cheers,
Mark.