2009-03-24 11:35:28

by Hans-Christian Egtvedt

[permalink] [raw]
Subject: [PATCH 1/2] dw_dmac: set CAP_PRIVATE capability for DW DMA controller

This patch adds the private capability for the DW DMA controller. This
allows the DMA controller to do other DMA transfers than
memory-to-memory. For example when the DMA controller is used by
peripherals to transfer data between memory and peripheral.

Signed-off-by: Hans-Christian Egtvedt <[email protected]>
---
drivers/dma/dw_dmac.c | 1 +
1 files changed, 1 insertions(+), 0 deletions(-)

diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index a97c07e..1c5e31d 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -994,6 +994,7 @@ static int __init dw_probe(struct platform_device *pdev)
channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);

+ dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
dw->dma.dev = &pdev->dev;
--
1.5.6.3


2009-03-24 11:35:44

by Hans-Christian Egtvedt

[permalink] [raw]
Subject: [PATCH 2/2] dw_dmac: add cyclic API to DW DMA driver

This patch adds a cyclic DMA interface to the DW DMA driver. This is
very useful if you want to use the DMA controller in combination with a
sound device which uses cyclic buffers.

Using a DMA channel for cyclic DMA will disable the possibility to use
it as a normal DMA engine until the user calls the cyclic free function
on the DMA channel. Also a cyclic DMA list can not be prepared if the
channel is already active.

Signed-off-by: Hans-Christian Egtvedt <[email protected]>
---
drivers/dma/dw_dmac.c | 303 +++++++++++++++++++++++++++++++++++++++++++-
drivers/dma/dw_dmac_regs.h | 7 +-
include/linux/dw_dmac.h | 19 +++
3 files changed, 327 insertions(+), 2 deletions(-)

diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 1c5e31d..9e7160d 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -363,6 +363,84 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
dwc_descriptor_complete(dwc, bad_desc);
}

+/* --------------------- Cyclic DMA API extensions -------------------- */
+
+inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ return channel_readl(dwc, SAR);
+}
+EXPORT_SYMBOL(dw_dma_get_src_addr);
+
+inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ return channel_readl(dwc, DAR);
+}
+EXPORT_SYMBOL(dw_dma_get_dst_addr);
+
+/* called with dwc->lock held and all interrupts disabled */
+static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
+ u32 status_block, u32 status_err, u32 status_xfer)
+{
+ if (status_block & dwc->mask) {
+ void (*callback)(void *param);
+ void *callback_param;
+
+ dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
+ channel_readl(dwc, LLP));
+ dma_writel(dw, CLEAR.BLOCK, dwc->mask);
+
+ callback = dwc->cdesc->period_callback;
+ callback_param = dwc->cdesc->period_callback_param;
+ if (callback) {
+ spin_unlock(&dwc->lock);
+ callback(callback_param);
+ spin_lock(&dwc->lock);
+ }
+ }
+
+ /*
+ * Error and transfer complete are highly unlikely, and will most
+ * likely be due to a configuration error by the user.
+ */
+ if (unlikely(status_err & dwc->mask) ||
+ unlikely(status_xfer & dwc->mask)) {
+ int i;
+
+ dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
+ "interrupt, stopping DMA transfer\n",
+ status_xfer ? "xfer" : "error");
+ dev_err(chan2dev(&dwc->chan),
+ " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
+ channel_readl(dwc, SAR),
+ channel_readl(dwc, DAR),
+ channel_readl(dwc, LLP),
+ channel_readl(dwc, CTL_HI),
+ channel_readl(dwc, CTL_LO));
+
+ channel_clear_bit(dw, CH_EN, dwc->mask);
+ while (dma_readl(dw, CH_EN) & dwc->mask)
+ cpu_relax();
+
+ /* make sure DMA does not restart by loading a new list */
+ channel_writel(dwc, LLP, 0);
+ channel_writel(dwc, CTL_LO, 0);
+ channel_writel(dwc, CTL_HI, 0);
+
+ dma_writel(dw, CLEAR.BLOCK, dwc->mask);
+ dma_writel(dw, CLEAR.ERROR, dwc->mask);
+ dma_writel(dw, CLEAR.XFER, dwc->mask);
+
+ for (i = 0; i < dwc->cdesc->periods; i++)
+ dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
+
+ return;
+ }
+}
+
+/* ------------------------------------------------------------------------- */
+
static void dw_dma_tasklet(unsigned long data)
{
struct dw_dma *dw = (struct dw_dma *)data;
@@ -382,7 +460,10 @@ static void dw_dma_tasklet(unsigned long data)
for (i = 0; i < dw->dma.chancnt; i++) {
dwc = &dw->chan[i];
spin_lock(&dwc->lock);
- if (status_err & (1 << i))
+ if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
+ dwc_handle_cyclic(dw, dwc, status_block, status_err,
+ status_xfer);
+ else if (status_err & (1 << i))
dwc_handle_error(dw, dwc);
else if ((status_block | status_xfer) & (1 << i))
dwc_scan_descriptors(dw, dwc);
@@ -884,6 +965,226 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
}

+/* --------------------- Cyclic DMA API extensions -------------------- */
+
+int dw_dma_cyclic_start(struct dma_chan *chan)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+
+ if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
+ dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
+ return -ENODEV;
+ }
+
+ spin_lock(&dwc->lock);
+
+ /* assert channel is idle */
+ if (dma_readl(dw, CH_EN) & dwc->mask) {
+ dev_err(chan2dev(&dwc->chan),
+ "BUG: Attempted to start non-idle channel\n");
+ dev_err(chan2dev(&dwc->chan),
+ " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
+ channel_readl(dwc, SAR),
+ channel_readl(dwc, DAR),
+ channel_readl(dwc, LLP),
+ channel_readl(dwc, CTL_HI),
+ channel_readl(dwc, CTL_LO));
+ spin_unlock(&dwc->lock);
+ return -EBUSY;
+ }
+
+ dma_writel(dw, CLEAR.BLOCK, dwc->mask);
+ dma_writel(dw, CLEAR.ERROR, dwc->mask);
+ dma_writel(dw, CLEAR.XFER, dwc->mask);
+
+ /* setup DMAC channel registers */
+ channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
+ channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
+ channel_writel(dwc, CTL_HI, 0);
+
+ channel_set_bit(dw, CH_EN, dwc->mask);
+
+ spin_unlock(&dwc->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(dw_dma_cyclic_start);
+
+void dw_dma_cyclic_stop(struct dma_chan *chan)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+
+ channel_clear_bit(dw, CH_EN, dwc->mask);
+ while (dma_readl(dw, CH_EN) & dwc->mask)
+ cpu_relax();
+}
+EXPORT_SYMBOL(dw_dma_cyclic_stop);
+
+struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
+ dma_addr_t buf_addr, size_t buf_len, size_t period_len,
+ enum dma_data_direction direction)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_cyclic_desc *cdesc;
+ struct dw_cyclic_desc *retval = NULL;
+ struct dw_desc *desc;
+ struct dw_desc *last = NULL;
+ struct dw_dma_slave *dws = chan->private;
+ unsigned long was_cyclic;
+ unsigned int reg_width;
+ unsigned int periods;
+ unsigned int i;
+
+ spin_lock_bh(&dwc->lock);
+ if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
+ spin_unlock_bh(&dwc->lock);
+ dev_dbg(chan2dev(&dwc->chan),
+ "queue and/or active list are not empty\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
+ if (was_cyclic) {
+ spin_unlock_bh(&dwc->lock);
+ dev_dbg(chan2dev(&dwc->chan),
+ "channel already prepared for cyclic DMA\n");
+ return ERR_PTR(-EBUSY);
+ }
+ spin_unlock_bh(&dwc->lock);
+
+ retval = ERR_PTR(-EINVAL);
+ reg_width = dws->reg_width;
+ periods = buf_len / period_len;
+
+ /* Check for too big/unaligned periods and unaligned DMA buffer. */
+ if (period_len > (DWC_MAX_COUNT << reg_width))
+ goto out_err;
+ if (unlikely(period_len & ((1 << reg_width) - 1)))
+ goto out_err;
+ if (unlikely(buf_addr & ((1 << reg_width) - 1)))
+ goto out_err;
+ if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
+ goto out_err;
+
+ retval = ERR_PTR(-ENOMEM);
+
+ if (periods > NR_DESCS_PER_CHANNEL)
+ goto out_err;
+
+ cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
+ if (!cdesc)
+ goto out_err;
+
+ cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
+ if (!cdesc->desc)
+ goto out_err_alloc;
+
+ for (i = 0; i < periods; i++) {
+ desc = dwc_desc_get(dwc);
+ if (!desc)
+ goto out_err_desc_get;
+
+ switch (direction) {
+ case DMA_TO_DEVICE:
+ desc->lli.dar = dws->tx_reg;
+ desc->lli.sar = buf_addr + (period_len * i);
+ desc->lli.ctllo = (DWC_DEFAULT_CTLLO
+ | DWC_CTLL_DST_WIDTH(reg_width)
+ | DWC_CTLL_SRC_WIDTH(reg_width)
+ | DWC_CTLL_DST_FIX
+ | DWC_CTLL_SRC_INC
+ | DWC_CTLL_FC_M2P
+ | DWC_CTLL_INT_EN);
+ break;
+ case DMA_FROM_DEVICE:
+ desc->lli.dar = buf_addr + (period_len * i);
+ desc->lli.sar = dws->rx_reg;
+ desc->lli.ctllo = (DWC_DEFAULT_CTLLO
+ | DWC_CTLL_SRC_WIDTH(reg_width)
+ | DWC_CTLL_DST_WIDTH(reg_width)
+ | DWC_CTLL_DST_INC
+ | DWC_CTLL_SRC_FIX
+ | DWC_CTLL_FC_P2M
+ | DWC_CTLL_INT_EN);
+ break;
+ default:
+ break;
+ }
+
+ desc->lli.ctlhi = (period_len >> reg_width);
+ cdesc->desc[i] = desc;
+
+ if (last) {
+ last->lli.llp = desc->txd.phys;
+ dma_sync_single_for_device(chan2parent(chan),
+ last->txd.phys, sizeof(last->lli),
+ DMA_TO_DEVICE);
+ }
+
+ last = desc;
+ }
+
+ /* lets make a cyclic list */
+ last->lli.llp = cdesc->desc[0]->txd.phys;
+ dma_sync_single_for_device(chan2parent(chan), last->txd.phys,
+ sizeof(last->lli), DMA_TO_DEVICE);
+
+ dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%08x len %zu "
+ "period %zu periods %d\n", buf_addr, buf_len,
+ period_len, periods);
+
+ cdesc->periods = periods;
+ dwc->cdesc = cdesc;
+
+ return cdesc;
+
+out_err_desc_get:
+ while (i--)
+ dwc_desc_put(dwc, cdesc->desc[i]);
+out_err_alloc:
+ kfree(cdesc);
+out_err:
+ clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
+ return (struct dw_cyclic_desc *)retval;
+}
+EXPORT_SYMBOL(dw_dma_cyclic_prep);
+
+void dw_dma_cyclic_free(struct dma_chan *chan)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+ struct dw_cyclic_desc *cdesc = dwc->cdesc;
+ int i;
+
+ dev_dbg(chan2dev(&dwc->chan), "cyclic free\n");
+
+ if (!cdesc)
+ return;
+
+ spin_lock_bh(&dwc->lock);
+
+ channel_clear_bit(dw, CH_EN, dwc->mask);
+ while (dma_readl(dw, CH_EN) & dwc->mask)
+ cpu_relax();
+
+ dma_writel(dw, CLEAR.BLOCK, dwc->mask);
+ dma_writel(dw, CLEAR.ERROR, dwc->mask);
+ dma_writel(dw, CLEAR.XFER, dwc->mask);
+
+ spin_unlock_bh(&dwc->lock);
+
+ for (i = 0; i < cdesc->periods; i++)
+ dwc_desc_put(dwc, cdesc->desc[i]);
+
+ kfree(cdesc->desc);
+ kfree(cdesc);
+
+ clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
+}
+EXPORT_SYMBOL(dw_dma_cyclic_free);
+
/*----------------------------------------------------------------------*/

static void dw_dma_off(struct dw_dma *dw)
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index b252b20..13a5807 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -126,6 +126,10 @@ struct dw_dma_regs {

#define DW_REGLEN 0x400

+enum dw_dmac_flags {
+ DW_DMA_IS_CYCLIC = 0,
+};
+
struct dw_dma_chan {
struct dma_chan chan;
void __iomem *ch_regs;
@@ -134,10 +138,12 @@ struct dw_dma_chan {
spinlock_t lock;

/* these other elements are all protected by lock */
+ unsigned long flags;
dma_cookie_t completed;
struct list_head active_list;
struct list_head queue;
struct list_head free_list;
+ struct dw_cyclic_desc *cdesc;

unsigned int descs_allocated;
};
@@ -158,7 +164,6 @@ static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
return container_of(chan, struct dw_dma_chan, chan);
}

-
struct dw_dma {
struct dma_device dma;
void __iomem *regs;
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h
index d797dde..c8aad71 100644
--- a/include/linux/dw_dmac.h
+++ b/include/linux/dw_dmac.h
@@ -74,4 +74,23 @@ struct dw_dma_slave {
#define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */
#define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */

+/* DMA API extensions */
+struct dw_cyclic_desc {
+ struct dw_desc **desc;
+ unsigned long periods;
+ void (*period_callback)(void *param);
+ void *period_callback_param;
+};
+
+struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
+ dma_addr_t buf_addr, size_t buf_len, size_t period_len,
+ enum dma_data_direction direction);
+void dw_dma_cyclic_free(struct dma_chan *chan);
+int dw_dma_cyclic_start(struct dma_chan *chan);
+void dw_dma_cyclic_stop(struct dma_chan *chan);
+
+dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan);
+
+dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan);
+
#endif /* DW_DMAC_H */
--
1.5.6.3

2009-03-31 14:02:16

by Sosnowski, Maciej

[permalink] [raw]
Subject: RE: [PATCH 1/2] dw_dmac: set CAP_PRIVATE capability for DW DMA controller

Hans-Christian Egtvedt wrote:
> This patch adds the private capability for the DW DMA controller. This
> allows the DMA controller to do other DMA transfers than
> memory-to-memory. For example when the DMA controller is used by
> peripherals to transfer data between memory and peripheral.
>
> Signed-off-by: Hans-Christian Egtvedt <[email protected]>
> ---
> drivers/dma/dw_dmac.c | 1 +
> 1 files changed, 1 insertions(+), 0 deletions(-)
>
> diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
> index a97c07e..1c5e31d 100644
> --- a/drivers/dma/dw_dmac.c
> +++ b/drivers/dma/dw_dmac.c
> @@ -994,6 +994,7 @@ static int __init dw_probe(struct platform_device *pdev)
> channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
> channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
>
> + dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
> dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
> dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
> dw->dma.dev = &pdev->dev;

Hi Hans,

Adding Dan...

Please note that this way you permenently exclude the device from public use.
If that's your intent that's fine.
Remember however that you can leave the device generally non-private
and attempt as needed to allocate a channel for exclusive usage by dma_request_channel()
- it would set DMA_PRIVATE flag if private channel candidate has been found.

With this note:

Acked-by: Maciej Sosnowski <[email protected]>

Regards,
Maciej-

2009-03-31 14:02:38

by Sosnowski, Maciej

[permalink] [raw]
Subject: RE: [PATCH 2/2] dw_dmac: add cyclic API to DW DMA driver

Hans-Christian Egtvedt wrote:
> This patch adds a cyclic DMA interface to the DW DMA driver. This is
> very useful if you want to use the DMA controller in combination with a
> sound device which uses cyclic buffers.
>
> Using a DMA channel for cyclic DMA will disable the possibility to use
> it as a normal DMA engine until the user calls the cyclic free function
> on the DMA channel. Also a cyclic DMA list can not be prepared if the
> channel is already active.
>
> Signed-off-by: Hans-Christian Egtvedt <[email protected]>
> ---
> drivers/dma/dw_dmac.c | 303 +++++++++++++++++++++++++++++++++++++++++++-
> drivers/dma/dw_dmac_regs.h | 7 +-
> include/linux/dw_dmac.h | 19 +++
> 3 files changed, 327 insertions(+), 2 deletions(-)
>

Adding Dan...

> diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
> index 1c5e31d..9e7160d 100644
> --- a/drivers/dma/dw_dmac.c
> +++ b/drivers/dma/dw_dmac.c
> @@ -363,6 +363,84 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
> dwc_descriptor_complete(dwc, bad_desc);
> }
>
> +/* --------------------- Cyclic DMA API extensions -------------------- */
> +
> +inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
> +{
> + struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
> + return channel_readl(dwc, SAR);
> +}
> +EXPORT_SYMBOL(dw_dma_get_src_addr);
> +
> +inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
> +{
> + struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
> + return channel_readl(dwc, DAR);
> +}
> +EXPORT_SYMBOL(dw_dma_get_dst_addr);
> +
> +/* called with dwc->lock held and all interrupts disabled */
> +static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
> + u32 status_block, u32 status_err, u32 status_xfer)
> +{
> + if (status_block & dwc->mask) {
> + void (*callback)(void *param);
> + void *callback_param;
> +
> + dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
> + channel_readl(dwc, LLP));
> + dma_writel(dw, CLEAR.BLOCK, dwc->mask);
> +
> + callback = dwc->cdesc->period_callback;
> + callback_param = dwc->cdesc->period_callback_param;
> + if (callback) {
> + spin_unlock(&dwc->lock);
> + callback(callback_param);
> + spin_lock(&dwc->lock);
> + }
> + }
> +
> + /*
> + * Error and transfer complete are highly unlikely, and will most
> + * likely be due to a configuration error by the user.
> + */
> + if (unlikely(status_err & dwc->mask) ||
> + unlikely(status_xfer & dwc->mask)) {
> + int i;
> +
> + dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
> + "interrupt, stopping DMA transfer\n",
> + status_xfer ? "xfer" : "error");
> + dev_err(chan2dev(&dwc->chan),
> + " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
> + channel_readl(dwc, SAR),
> + channel_readl(dwc, DAR),
> + channel_readl(dwc, LLP),
> + channel_readl(dwc, CTL_HI),
> + channel_readl(dwc, CTL_LO));
> +
> + channel_clear_bit(dw, CH_EN, dwc->mask);
> + while (dma_readl(dw, CH_EN) & dwc->mask)
> + cpu_relax();
> +
> + /* make sure DMA does not restart by loading a new list */
> + channel_writel(dwc, LLP, 0);
> + channel_writel(dwc, CTL_LO, 0);
> + channel_writel(dwc, CTL_HI, 0);
> +
> + dma_writel(dw, CLEAR.BLOCK, dwc->mask);
> + dma_writel(dw, CLEAR.ERROR, dwc->mask);
> + dma_writel(dw, CLEAR.XFER, dwc->mask);
> +
> + for (i = 0; i < dwc->cdesc->periods; i++)
> + dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
> +
> + return;
> + }
> +}
> +
> +/* ------------------------------------------------------------------------- */
> +
> static void dw_dma_tasklet(unsigned long data)
> {
> struct dw_dma *dw = (struct dw_dma *)data;
> @@ -382,7 +460,10 @@ static void dw_dma_tasklet(unsigned long data)
> for (i = 0; i < dw->dma.chancnt; i++) {
> dwc = &dw->chan[i];
> spin_lock(&dwc->lock);
> - if (status_err & (1 << i))
> + if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
> + dwc_handle_cyclic(dw, dwc, status_block, status_err,
> + status_xfer);
> + else if (status_err & (1 << i))
> dwc_handle_error(dw, dwc);
> else if ((status_block | status_xfer) & (1 << i))
> dwc_scan_descriptors(dw, dwc);
> @@ -884,6 +965,226 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
> dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
> }
>
> +/* --------------------- Cyclic DMA API extensions -------------------- */
> +
> +int dw_dma_cyclic_start(struct dma_chan *chan)
> +{
> + struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
> + struct dw_dma *dw = to_dw_dma(dwc->chan.device);
> +
> + if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
> + dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
> + return -ENODEV;
> + }
> +
> + spin_lock(&dwc->lock);
> +
> + /* assert channel is idle */
> + if (dma_readl(dw, CH_EN) & dwc->mask) {
> + dev_err(chan2dev(&dwc->chan),
> + "BUG: Attempted to start non-idle channel\n");
> + dev_err(chan2dev(&dwc->chan),
> + " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
> + channel_readl(dwc, SAR),
> + channel_readl(dwc, DAR),
> + channel_readl(dwc, LLP),
> + channel_readl(dwc, CTL_HI),
> + channel_readl(dwc, CTL_LO));
> + spin_unlock(&dwc->lock);
> + return -EBUSY;
> + }
> +
> + dma_writel(dw, CLEAR.BLOCK, dwc->mask);
> + dma_writel(dw, CLEAR.ERROR, dwc->mask);
> + dma_writel(dw, CLEAR.XFER, dwc->mask);
> +
> + /* setup DMAC channel registers */
> + channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
> + channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
> + channel_writel(dwc, CTL_HI, 0);
> +
> + channel_set_bit(dw, CH_EN, dwc->mask);
> +
> + spin_unlock(&dwc->lock);
> +
> + return 0;
> +}
> +EXPORT_SYMBOL(dw_dma_cyclic_start);
> +
> +void dw_dma_cyclic_stop(struct dma_chan *chan)
> +{
> + struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
> + struct dw_dma *dw = to_dw_dma(dwc->chan.device);
> +
> + channel_clear_bit(dw, CH_EN, dwc->mask);
> + while (dma_readl(dw, CH_EN) & dwc->mask)
> + cpu_relax();
> +}
> +EXPORT_SYMBOL(dw_dma_cyclic_stop);
> +
> +struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
> + dma_addr_t buf_addr, size_t buf_len, size_t period_len,
> + enum dma_data_direction direction)
> +{
> + struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
> + struct dw_cyclic_desc *cdesc;
> + struct dw_cyclic_desc *retval = NULL;
> + struct dw_desc *desc;
> + struct dw_desc *last = NULL;
> + struct dw_dma_slave *dws = chan->private;
> + unsigned long was_cyclic;
> + unsigned int reg_width;
> + unsigned int periods;
> + unsigned int i;
> +
> + spin_lock_bh(&dwc->lock);
> + if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
> + spin_unlock_bh(&dwc->lock);
> + dev_dbg(chan2dev(&dwc->chan),
> + "queue and/or active list are not empty\n");
> + return ERR_PTR(-EBUSY);
> + }
> +
> + was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
> + if (was_cyclic) {
> + spin_unlock_bh(&dwc->lock);
> + dev_dbg(chan2dev(&dwc->chan),
> + "channel already prepared for cyclic DMA\n");
> + return ERR_PTR(-EBUSY);
> + }
> + spin_unlock_bh(&dwc->lock);
> +
> + retval = ERR_PTR(-EINVAL);
> + reg_width = dws->reg_width;
> + periods = buf_len / period_len;
> +
> + /* Check for too big/unaligned periods and unaligned DMA buffer. */
> + if (period_len > (DWC_MAX_COUNT << reg_width))
> + goto out_err;
> + if (unlikely(period_len & ((1 << reg_width) - 1)))
> + goto out_err;
> + if (unlikely(buf_addr & ((1 << reg_width) - 1)))
> + goto out_err;
> + if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
> + goto out_err;
> +
> + retval = ERR_PTR(-ENOMEM);
> +
> + if (periods > NR_DESCS_PER_CHANNEL)
> + goto out_err;
> +
> + cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
> + if (!cdesc)
> + goto out_err;
> +
> + cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
> + if (!cdesc->desc)
> + goto out_err_alloc;
> +
> + for (i = 0; i < periods; i++) {
> + desc = dwc_desc_get(dwc);
> + if (!desc)
> + goto out_err_desc_get;
> +
> + switch (direction) {
> + case DMA_TO_DEVICE:
> + desc->lli.dar = dws->tx_reg;
> + desc->lli.sar = buf_addr + (period_len * i);
> + desc->lli.ctllo = (DWC_DEFAULT_CTLLO
> + | DWC_CTLL_DST_WIDTH(reg_width)
> + | DWC_CTLL_SRC_WIDTH(reg_width)
> + | DWC_CTLL_DST_FIX
> + | DWC_CTLL_SRC_INC
> + | DWC_CTLL_FC_M2P
> + | DWC_CTLL_INT_EN);
> + break;
> + case DMA_FROM_DEVICE:
> + desc->lli.dar = buf_addr + (period_len * i);
> + desc->lli.sar = dws->rx_reg;
> + desc->lli.ctllo = (DWC_DEFAULT_CTLLO
> + | DWC_CTLL_SRC_WIDTH(reg_width)
> + | DWC_CTLL_DST_WIDTH(reg_width)
> + | DWC_CTLL_DST_INC
> + | DWC_CTLL_SRC_FIX
> + | DWC_CTLL_FC_P2M
> + | DWC_CTLL_INT_EN);
> + break;
> + default:
> + break;
> + }
> +
> + desc->lli.ctlhi = (period_len >> reg_width);
> + cdesc->desc[i] = desc;
> +
> + if (last) {
> + last->lli.llp = desc->txd.phys;
> + dma_sync_single_for_device(chan2parent(chan),
> + last->txd.phys, sizeof(last->lli),
> + DMA_TO_DEVICE);
> + }
> +
> + last = desc;
> + }
> +
> + /* lets make a cyclic list */
> + last->lli.llp = cdesc->desc[0]->txd.phys;
> + dma_sync_single_for_device(chan2parent(chan), last->txd.phys,
> + sizeof(last->lli), DMA_TO_DEVICE);
> +
> + dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%08x len %zu "
> + "period %zu periods %d\n", buf_addr, buf_len,
> + period_len, periods);
> +
> + cdesc->periods = periods;
> + dwc->cdesc = cdesc;
> +
> + return cdesc;
> +
> +out_err_desc_get:
> + while (i--)
> + dwc_desc_put(dwc, cdesc->desc[i]);
> +out_err_alloc:
> + kfree(cdesc);
> +out_err:
> + clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
> + return (struct dw_cyclic_desc *)retval;
> +}
> +EXPORT_SYMBOL(dw_dma_cyclic_prep);
> +
> +void dw_dma_cyclic_free(struct dma_chan *chan)
> +{
> + struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
> + struct dw_dma *dw = to_dw_dma(dwc->chan.device);
> + struct dw_cyclic_desc *cdesc = dwc->cdesc;
> + int i;
> +
> + dev_dbg(chan2dev(&dwc->chan), "cyclic free\n");
> +
> + if (!cdesc)
> + return;
> +
> + spin_lock_bh(&dwc->lock);
> +
> + channel_clear_bit(dw, CH_EN, dwc->mask);
> + while (dma_readl(dw, CH_EN) & dwc->mask)
> + cpu_relax();
> +
> + dma_writel(dw, CLEAR.BLOCK, dwc->mask);
> + dma_writel(dw, CLEAR.ERROR, dwc->mask);
> + dma_writel(dw, CLEAR.XFER, dwc->mask);
> +
> + spin_unlock_bh(&dwc->lock);
> +
> + for (i = 0; i < cdesc->periods; i++)
> + dwc_desc_put(dwc, cdesc->desc[i]);
> +
> + kfree(cdesc->desc);
> + kfree(cdesc);
> +
> + clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
> +}
> +EXPORT_SYMBOL(dw_dma_cyclic_free);
> +
> /*----------------------------------------------------------------------*/
>
> static void dw_dma_off(struct dw_dma *dw)
> diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
> index b252b20..13a5807 100644
> --- a/drivers/dma/dw_dmac_regs.h
> +++ b/drivers/dma/dw_dmac_regs.h
> @@ -126,6 +126,10 @@ struct dw_dma_regs {
>
> #define DW_REGLEN 0x400
>
> +enum dw_dmac_flags {
> + DW_DMA_IS_CYCLIC = 0,
> +};
> +
> struct dw_dma_chan {
> struct dma_chan chan;
> void __iomem *ch_regs;
> @@ -134,10 +138,12 @@ struct dw_dma_chan {
> spinlock_t lock;
>
> /* these other elements are all protected by lock */
> + unsigned long flags;
> dma_cookie_t completed;
> struct list_head active_list;
> struct list_head queue;
> struct list_head free_list;
> + struct dw_cyclic_desc *cdesc;
>
> unsigned int descs_allocated;
> };
> @@ -158,7 +164,6 @@ static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
> return container_of(chan, struct dw_dma_chan, chan);
> }
>
> -
> struct dw_dma {
> struct dma_device dma;
> void __iomem *regs;
> diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h
> index d797dde..c8aad71 100644
> --- a/include/linux/dw_dmac.h
> +++ b/include/linux/dw_dmac.h
> @@ -74,4 +74,23 @@ struct dw_dma_slave {
> #define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */
> #define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */
>
> +/* DMA API extensions */
> +struct dw_cyclic_desc {
> + struct dw_desc **desc;
> + unsigned long periods;
> + void (*period_callback)(void *param);
> + void *period_callback_param;
> +};
> +
> +struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
> + dma_addr_t buf_addr, size_t buf_len, size_t period_len,
> + enum dma_data_direction direction);
> +void dw_dma_cyclic_free(struct dma_chan *chan);
> +int dw_dma_cyclic_start(struct dma_chan *chan);
> +void dw_dma_cyclic_stop(struct dma_chan *chan);
> +
> +dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan);
> +
> +dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan);
> +
> #endif /* DW_DMAC_H */

2009-03-31 14:03:42

by Sosnowski, Maciej

[permalink] [raw]
Subject: RE: [PATCH 2/2] dw_dmac: add cyclic API to DW DMA driver

Hans-Christian Egtvedt wrote:
> This patch adds a cyclic DMA interface to the DW DMA driver. This is
> very useful if you want to use the DMA controller in combination with a
> sound device which uses cyclic buffers.
>
> Using a DMA channel for cyclic DMA will disable the possibility to use
> it as a normal DMA engine until the user calls the cyclic free function
> on the DMA channel. Also a cyclic DMA list can not be prepared if the
> channel is already active.
>
> Signed-off-by: Hans-Christian Egtvedt <[email protected]>
> ---
> drivers/dma/dw_dmac.c | 303 +++++++++++++++++++++++++++++++++++++++++++-
> drivers/dma/dw_dmac_regs.h | 7 +- include/linux/dw_dmac.h | 19 +++
> 3 files changed, 327 insertions(+), 2 deletions(-)
>

The patch looks good to me.
Just a few remarks below:

> + for (i = 0; i < dwc->cdesc->periods; i++)
> + dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
> +
> + return;
> + }
> +}

This return is not needed

> +void dw_dma_cyclic_stop(struct dma_chan *chan)
> +{
> + struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
> + struct dw_dma *dw = to_dw_dma(dwc->chan.device);
> +
> + channel_clear_bit(dw, CH_EN, dwc->mask);
> + while (dma_readl(dw, CH_EN) & dwc->mask)
> + cpu_relax();
> +}

Don't you need locks in dw_dma_cyclic_stop?

> + was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
> + if (was_cyclic) {
> + spin_unlock_bh(&dwc->lock);
> + dev_dbg(chan2dev(&dwc->chan),
> + "channel already prepared for cyclic DMA\n");
> + return ERR_PTR(-EBUSY);
> + }
> + spin_unlock_bh(&dwc->lock);

In this case both spin_unlock_bh() could be replaced by single one called before "if (was_cyclic)":

+ was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
+ spin_unlock_bh(&dwc->lock);
+ if (was_cyclic) {
+ dev_dbg(chan2dev(&dwc->chan),
+ "channel already prepared for cyclic DMA\n");
+ return ERR_PTR(-EBUSY);
+ }

Regards,
Maciej-

2009-04-01 05:04:58

by Hans-Christian Egtvedt

[permalink] [raw]
Subject: Re: [PATCH 1/2] dw_dmac: set CAP_PRIVATE capability for DW DMA controller

On Tue, 31 Mar 2009 15:01:49 +0100
"Sosnowski, Maciej" <[email protected]> wrote:

> Hans-Christian Egtvedt wrote:
> > This patch adds the private capability for the DW DMA controller.
> > This allows the DMA controller to do other DMA transfers than
> > memory-to-memory. For example when the DMA controller is used by
> > peripherals to transfer data between memory and peripheral.
> >
> > Signed-off-by: Hans-Christian Egtvedt
> > <[email protected]> ---
> > drivers/dma/dw_dmac.c | 1 +
> > 1 files changed, 1 insertions(+), 0 deletions(-)
> >
> > diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
> > index a97c07e..1c5e31d 100644
> > --- a/drivers/dma/dw_dmac.c
> > +++ b/drivers/dma/dw_dmac.c
> > @@ -994,6 +994,7 @@ static int __init dw_probe(struct
> > platform_device *pdev) channel_clear_bit(dw, MASK.DST_TRAN,
> > dw->all_chan_mask); channel_clear_bit(dw, MASK.ERROR,
> > dw->all_chan_mask);
> >
> > + dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
> > dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
> > dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
> > dw->dma.dev = &pdev->dev;
>
> Hi Hans,
>
> Adding Dan...
>
> Please note that this way you permenently exclude the device from
> public use. If that's your intent that's fine.
>

No, my intention was to be able to use dma_request_channel in the ALSA
driver I was working on in parallel.

> Remember however that you can leave the device generally non-private
> and attempt as needed to allocate a channel for exclusive usage by
> dma_request_channel()
>

The weird part was that when testing this, the dma_request_channel()
would not return any channel before DMA_PRIVATE flag was set. I think I
did this some time ago, I can do a retest with the current 2.6.29 and
see how it behaves.

> - it would set DMA_PRIVATE flag if private channel candidate has been
> found.
>

Which I think was the reason I added the private flag.

--
Best regards,
Hans-Christian Egtvedt

2009-04-01 05:11:17

by Hans-Christian Egtvedt

[permalink] [raw]
Subject: Re: [PATCH 2/2] dw_dmac: add cyclic API to DW DMA driver

On Tue, 31 Mar 2009 15:03:07 +0100
"Sosnowski, Maciej" <[email protected]> wrote:

> Hans-Christian Egtvedt wrote:
> > This patch adds a cyclic DMA interface to the DW DMA driver. This is
> > very useful if you want to use the DMA controller in combination
> > with a sound device which uses cyclic buffers.
> >
> > Using a DMA channel for cyclic DMA will disable the possibility to
> > use it as a normal DMA engine until the user calls the cyclic free
> > function on the DMA channel. Also a cyclic DMA list can not be
> > prepared if the channel is already active.
> >
> > Signed-off-by: Hans-Christian Egtvedt
> > <[email protected]> ---
> > drivers/dma/dw_dmac.c | 303
> > +++++++++++++++++++++++++++++++++++++++++++-
> > drivers/dma/dw_dmac_regs.h | 7 +- include/linux/dw_dmac.h
> > | 19 +++ 3 files changed, 327 insertions(+), 2 deletions(-)
> >
>
> The patch looks good to me.
>

Thanks

> Just a few remarks below:
>
> > + for (i = 0; i < dwc->cdesc->periods; i++)
> > + dwc_dump_lli(dwc,
> > &dwc->cdesc->desc[i]->lli); +
> > + return;
> > + }
> > +}
>
> This return is not needed
>

Agreed, will fixup.

> > +void dw_dma_cyclic_stop(struct dma_chan *chan)
> > +{
> > + struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
> > + struct dw_dma *dw =
> > to_dw_dma(dwc->chan.device); +
> > + channel_clear_bit(dw, CH_EN, dwc->mask);
> > + while (dma_readl(dw, CH_EN) & dwc->mask)
> > + cpu_relax();
> > +}
>
> Don't you need locks in dw_dma_cyclic_stop?
>

Hmmm, I'll look into this.

> > + was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC,
> > &dwc->flags);
> > + if (was_cyclic) {
> > + spin_unlock_bh(&dwc->lock);
> > + dev_dbg(chan2dev(&dwc->chan),
> > + "channel already prepared for
> > cyclic DMA\n");
> > + return ERR_PTR(-EBUSY);
> > + }
> > + spin_unlock_bh(&dwc->lock);
>
> In this case both spin_unlock_bh() could be replaced by single one
> called before "if (was_cyclic)":
>

Ah yes, will fixup.

> + was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
> + spin_unlock_bh(&dwc->lock);
> + if (was_cyclic) {
> + dev_dbg(chan2dev(&dwc->chan),
> + "channel already prepared for cyclic
> DMA\n");
> + return ERR_PTR(-EBUSY);
> + }
>

--
Best regards,
Hans-Christian Egtvedt

2009-04-01 05:30:35

by Dan Williams

[permalink] [raw]
Subject: Re: [PATCH 1/2] dw_dmac: set CAP_PRIVATE capability for DW DMA controller

Hans-Christian Egtvedt wrote:
> The weird part was that when testing this, the dma_request_channel()
> would not return any channel before DMA_PRIVATE flag was set. I think I
> did this some time ago, I can do a retest with the current 2.6.29 and
> see how it behaves.
>

If CONFIG_NET_DMA=y then available channels will be consumed for
'public' usage. I would turn that option off for AVR32 as it hurts
performance on dma-incoherent archs like ARM. Async_tx will also
consume public channels if loaded. 2.6.30 will have the
CONFIG_ASYNC_TX_DMA option to turn off this allocation... or just set
DMA_PRIVATE and not worry these :-).

--
Dan

2009-04-01 08:29:36

by Haavard Skinnemoen

[permalink] [raw]
Subject: Re: [PATCH 2/2] dw_dmac: add cyclic API to DW DMA driver

Sosnowski, Maciej wrote:
> > +void dw_dma_cyclic_stop(struct dma_chan *chan)
> > +{
> > + struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
> > + struct dw_dma *dw = to_dw_dma(dwc->chan.device);
> > +
> > + channel_clear_bit(dw, CH_EN, dwc->mask);
> > + while (dma_readl(dw, CH_EN) & dwc->mask)
> > + cpu_relax();
> > +}
>
> Don't you need locks in dw_dma_cyclic_stop?

Good question. On one hand, if cyclic_start() can race with
cyclic_stop(), the client probably has more serious issues to deal
with. On the other hand, if something ever manages to set the CH_EN bit
at the wrong moment, the loop may never finish.

So it's probably safest to wrap it in spin_lock_bh(). You should
probably add a note that this function can not be called from interrupt
context too.

Haavard

2009-04-01 08:33:45

by Hans-Christian Egtvedt

[permalink] [raw]
Subject: Re: [PATCH 1/2] dw_dmac: set CAP_PRIVATE capability for DW DMA controller

On Wed, 1 Apr 2009 07:04:40 +0200
Hans-Christian Egtvedt <[email protected]> wrote:

> On Tue, 31 Mar 2009 15:01:49 +0100
> "Sosnowski, Maciej" <[email protected]> wrote:

<snipp>

> > Remember however that you can leave the device generally non-private
> > and attempt as needed to allocate a channel for exclusive usage by
> > dma_request_channel()
> >
>
> The weird part was that when testing this, the dma_request_channel()
> would not return any channel before DMA_PRIVATE flag was set. I think
> I did this some time ago, I can do a retest with the current 2.6.29
> and see how it behaves.
>

On 2.6.29 I do not have to set the DMA_PRIVATE flag for
dma_request_channel() to work now. So I guess this patch could be
skipped.

I am quite sure I did the previous testing without any of the DMA
clients enabled. My original work was based on 2.6.29-rc3 IIRC. So I do
not really know why this was needed.

<snipp>

--
Best regards,
Hans-Christian Egtvedt

2009-04-01 09:11:35

by Hans-Christian Egtvedt

[permalink] [raw]
Subject: Re: [PATCH 2/2] dw_dmac: add cyclic API to DW DMA driver

On Wed, 1 Apr 2009 10:28:30 +0200
Haavard Skinnemoen <[email protected]> wrote:

> Sosnowski, Maciej wrote:
> > > +void dw_dma_cyclic_stop(struct dma_chan *chan)
> > > +{
> > > + struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
> > > + struct dw_dma *dw =
> > > to_dw_dma(dwc->chan.device); +
> > > + channel_clear_bit(dw, CH_EN, dwc->mask);
> > > + while (dma_readl(dw, CH_EN) & dwc->mask)
> > > + cpu_relax();
> > > +}
> >
> > Don't you need locks in dw_dma_cyclic_stop?
>
> Good question. On one hand, if cyclic_start() can race with
> cyclic_stop(), the client probably has more serious issues to deal
> with. On the other hand, if something ever manages to set the CH_EN
> bit at the wrong moment, the loop may never finish.
>
> So it's probably safest to wrap it in spin_lock_bh(). You should
> probably add a note that this function can not be called from
> interrupt context too.
>

The problem then is when calling cyclic_stop from the trigger function
in an ALSA driver.

http://www.kernel.org/pub/linux/kernel/people/tiwai/docs/writing-an-alsa-driver/ch05s06.html#pcm-interface-operators-trigger-callback

This callback is atomic, and with interrupts disabled. So using
spin_lock_bh() is AFAICT not allowed.

I am going to brush up the interface and document that the _stop and
_start interface has to be called with interrupts disabled. Will post a
v2 shortly.

--
Best regards,
Hans-Christian Egtvedt

2009-04-01 13:28:39

by Hans-Christian Egtvedt

[permalink] [raw]
Subject: [PATCH v2 1/1] dw_dmac: add cyclic API to DW DMA driver

This patch adds a cyclic DMA interface to the DW DMA driver. This is
very useful if you want to use the DMA controller in combination with a
sound device which uses cyclic buffers.

Using a DMA channel for cyclic DMA will disable the possibility to use
it as a normal DMA engine until the user calls the cyclic free function
on the DMA channel. Also a cyclic DMA list can not be prepared if the
channel is already active.

Signed-off-by: Hans-Christian Egtvedt <[email protected]>
---
drivers/dma/dw_dmac.c | 336 +++++++++++++++++++++++++++++++++++++++++++-
drivers/dma/dw_dmac_regs.h | 7 +-
include/linux/dw_dmac.h | 19 +++
3 files changed, 360 insertions(+), 2 deletions(-)

diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index a97c07e..0b89708 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -363,6 +363,82 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
dwc_descriptor_complete(dwc, bad_desc);
}

+/* --------------------- Cyclic DMA API extensions -------------------- */
+
+inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ return channel_readl(dwc, SAR);
+}
+EXPORT_SYMBOL(dw_dma_get_src_addr);
+
+inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ return channel_readl(dwc, DAR);
+}
+EXPORT_SYMBOL(dw_dma_get_dst_addr);
+
+/* called with dwc->lock held and all DMAC interrupts disabled */
+static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
+ u32 status_block, u32 status_err, u32 status_xfer)
+{
+ if (status_block & dwc->mask) {
+ void (*callback)(void *param);
+ void *callback_param;
+
+ dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
+ channel_readl(dwc, LLP));
+ dma_writel(dw, CLEAR.BLOCK, dwc->mask);
+
+ callback = dwc->cdesc->period_callback;
+ callback_param = dwc->cdesc->period_callback_param;
+ if (callback) {
+ spin_unlock(&dwc->lock);
+ callback(callback_param);
+ spin_lock(&dwc->lock);
+ }
+ }
+
+ /*
+ * Error and transfer complete are highly unlikely, and will most
+ * likely be due to a configuration error by the user.
+ */
+ if (unlikely(status_err & dwc->mask) ||
+ unlikely(status_xfer & dwc->mask)) {
+ int i;
+
+ dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
+ "interrupt, stopping DMA transfer\n",
+ status_xfer ? "xfer" : "error");
+ dev_err(chan2dev(&dwc->chan),
+ " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
+ channel_readl(dwc, SAR),
+ channel_readl(dwc, DAR),
+ channel_readl(dwc, LLP),
+ channel_readl(dwc, CTL_HI),
+ channel_readl(dwc, CTL_LO));
+
+ channel_clear_bit(dw, CH_EN, dwc->mask);
+ while (dma_readl(dw, CH_EN) & dwc->mask)
+ cpu_relax();
+
+ /* make sure DMA does not restart by loading a new list */
+ channel_writel(dwc, LLP, 0);
+ channel_writel(dwc, CTL_LO, 0);
+ channel_writel(dwc, CTL_HI, 0);
+
+ dma_writel(dw, CLEAR.BLOCK, dwc->mask);
+ dma_writel(dw, CLEAR.ERROR, dwc->mask);
+ dma_writel(dw, CLEAR.XFER, dwc->mask);
+
+ for (i = 0; i < dwc->cdesc->periods; i++)
+ dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
+ }
+}
+
+/* ------------------------------------------------------------------------- */
+
static void dw_dma_tasklet(unsigned long data)
{
struct dw_dma *dw = (struct dw_dma *)data;
@@ -382,7 +458,10 @@ static void dw_dma_tasklet(unsigned long data)
for (i = 0; i < dw->dma.chancnt; i++) {
dwc = &dw->chan[i];
spin_lock(&dwc->lock);
- if (status_err & (1 << i))
+ if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
+ dwc_handle_cyclic(dw, dwc, status_block, status_err,
+ status_xfer);
+ else if (status_err & (1 << i))
dwc_handle_error(dw, dwc);
else if ((status_block | status_xfer) & (1 << i))
dwc_scan_descriptors(dw, dwc);
@@ -884,6 +963,261 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
}

+/* --------------------- Cyclic DMA API extensions -------------------- */
+
+/**
+ * dw_dma_cyclic_start - start the cyclic DMA transfer
+ *
+ * @chan: the DMA channel to start
+ *
+ * Must be called with soft interrupts disabled. Returns zero on success or
+ * -errno on failure.
+ */
+int dw_dma_cyclic_start(struct dma_chan *chan)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+
+ if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
+ dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
+ return -ENODEV;
+ }
+
+ spin_lock(&dwc->lock);
+
+ /* assert channel is idle */
+ if (dma_readl(dw, CH_EN) & dwc->mask) {
+ dev_err(chan2dev(&dwc->chan),
+ "BUG: Attempted to start non-idle channel\n");
+ dev_err(chan2dev(&dwc->chan),
+ " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
+ channel_readl(dwc, SAR),
+ channel_readl(dwc, DAR),
+ channel_readl(dwc, LLP),
+ channel_readl(dwc, CTL_HI),
+ channel_readl(dwc, CTL_LO));
+ spin_unlock(&dwc->lock);
+ return -EBUSY;
+ }
+
+ dma_writel(dw, CLEAR.BLOCK, dwc->mask);
+ dma_writel(dw, CLEAR.ERROR, dwc->mask);
+ dma_writel(dw, CLEAR.XFER, dwc->mask);
+
+ /* setup DMAC channel registers */
+ channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
+ channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
+ channel_writel(dwc, CTL_HI, 0);
+
+ channel_set_bit(dw, CH_EN, dwc->mask);
+
+ spin_unlock(&dwc->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(dw_dma_cyclic_start);
+
+/**
+ * dw_dma_cyclic_stop - stop the cyclic DMA transfer
+ *
+ * @chan: the DMA channel to stop
+ *
+ * Must be called with soft interrupts disabled.
+ */
+void dw_dma_cyclic_stop(struct dma_chan *chan)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+
+ spin_lock(&dwc->lock);
+
+ channel_clear_bit(dw, CH_EN, dwc->mask);
+ while (dma_readl(dw, CH_EN) & dwc->mask)
+ cpu_relax();
+
+ spin_unlock(&dwc->lock);
+}
+EXPORT_SYMBOL(dw_dma_cyclic_stop);
+
+/**
+ * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
+ *
+ * @chan: the DMA channel to prepare
+ * @buf_addr: physical DMA address where the buffer starts
+ * @buf_len: total number of bytes for the entire buffer
+ * @period_len: number of bytes for each period
+ * @direction: transfer direction, to or from device
+ *
+ * Must be called before trying to start the transfer. Returns a valid struct
+ * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
+ */
+struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
+ dma_addr_t buf_addr, size_t buf_len, size_t period_len,
+ enum dma_data_direction direction)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_cyclic_desc *cdesc;
+ struct dw_cyclic_desc *retval = NULL;
+ struct dw_desc *desc;
+ struct dw_desc *last = NULL;
+ struct dw_dma_slave *dws = chan->private;
+ unsigned long was_cyclic;
+ unsigned int reg_width;
+ unsigned int periods;
+ unsigned int i;
+
+ spin_lock_bh(&dwc->lock);
+ if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
+ spin_unlock_bh(&dwc->lock);
+ dev_dbg(chan2dev(&dwc->chan),
+ "queue and/or active list are not empty\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
+ spin_unlock_bh(&dwc->lock);
+ if (was_cyclic) {
+ dev_dbg(chan2dev(&dwc->chan),
+ "channel already prepared for cyclic DMA\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ retval = ERR_PTR(-EINVAL);
+ reg_width = dws->reg_width;
+ periods = buf_len / period_len;
+
+ /* Check for too big/unaligned periods and unaligned DMA buffer. */
+ if (period_len > (DWC_MAX_COUNT << reg_width))
+ goto out_err;
+ if (unlikely(period_len & ((1 << reg_width) - 1)))
+ goto out_err;
+ if (unlikely(buf_addr & ((1 << reg_width) - 1)))
+ goto out_err;
+ if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
+ goto out_err;
+
+ retval = ERR_PTR(-ENOMEM);
+
+ if (periods > NR_DESCS_PER_CHANNEL)
+ goto out_err;
+
+ cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
+ if (!cdesc)
+ goto out_err;
+
+ cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
+ if (!cdesc->desc)
+ goto out_err_alloc;
+
+ for (i = 0; i < periods; i++) {
+ desc = dwc_desc_get(dwc);
+ if (!desc)
+ goto out_err_desc_get;
+
+ switch (direction) {
+ case DMA_TO_DEVICE:
+ desc->lli.dar = dws->tx_reg;
+ desc->lli.sar = buf_addr + (period_len * i);
+ desc->lli.ctllo = (DWC_DEFAULT_CTLLO
+ | DWC_CTLL_DST_WIDTH(reg_width)
+ | DWC_CTLL_SRC_WIDTH(reg_width)
+ | DWC_CTLL_DST_FIX
+ | DWC_CTLL_SRC_INC
+ | DWC_CTLL_FC_M2P
+ | DWC_CTLL_INT_EN);
+ break;
+ case DMA_FROM_DEVICE:
+ desc->lli.dar = buf_addr + (period_len * i);
+ desc->lli.sar = dws->rx_reg;
+ desc->lli.ctllo = (DWC_DEFAULT_CTLLO
+ | DWC_CTLL_SRC_WIDTH(reg_width)
+ | DWC_CTLL_DST_WIDTH(reg_width)
+ | DWC_CTLL_DST_INC
+ | DWC_CTLL_SRC_FIX
+ | DWC_CTLL_FC_P2M
+ | DWC_CTLL_INT_EN);
+ break;
+ default:
+ break;
+ }
+
+ desc->lli.ctlhi = (period_len >> reg_width);
+ cdesc->desc[i] = desc;
+
+ if (last) {
+ last->lli.llp = desc->txd.phys;
+ dma_sync_single_for_device(chan2parent(chan),
+ last->txd.phys, sizeof(last->lli),
+ DMA_TO_DEVICE);
+ }
+
+ last = desc;
+ }
+
+ /* lets make a cyclic list */
+ last->lli.llp = cdesc->desc[0]->txd.phys;
+ dma_sync_single_for_device(chan2parent(chan), last->txd.phys,
+ sizeof(last->lli), DMA_TO_DEVICE);
+
+ dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%08x len %zu "
+ "period %zu periods %d\n", buf_addr, buf_len,
+ period_len, periods);
+
+ cdesc->periods = periods;
+ dwc->cdesc = cdesc;
+
+ return cdesc;
+
+out_err_desc_get:
+ while (i--)
+ dwc_desc_put(dwc, cdesc->desc[i]);
+out_err_alloc:
+ kfree(cdesc);
+out_err:
+ clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
+ return (struct dw_cyclic_desc *)retval;
+}
+EXPORT_SYMBOL(dw_dma_cyclic_prep);
+
+/**
+ * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
+ *
+ * @chan: the DMA channel to free
+ */
+void dw_dma_cyclic_free(struct dma_chan *chan)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+ struct dw_cyclic_desc *cdesc = dwc->cdesc;
+ int i;
+
+ dev_dbg(chan2dev(&dwc->chan), "cyclic free\n");
+
+ if (!cdesc)
+ return;
+
+ spin_lock_bh(&dwc->lock);
+
+ channel_clear_bit(dw, CH_EN, dwc->mask);
+ while (dma_readl(dw, CH_EN) & dwc->mask)
+ cpu_relax();
+
+ dma_writel(dw, CLEAR.BLOCK, dwc->mask);
+ dma_writel(dw, CLEAR.ERROR, dwc->mask);
+ dma_writel(dw, CLEAR.XFER, dwc->mask);
+
+ spin_unlock_bh(&dwc->lock);
+
+ for (i = 0; i < cdesc->periods; i++)
+ dwc_desc_put(dwc, cdesc->desc[i]);
+
+ kfree(cdesc->desc);
+ kfree(cdesc);
+
+ clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
+}
+EXPORT_SYMBOL(dw_dma_cyclic_free);
+
/*----------------------------------------------------------------------*/

static void dw_dma_off(struct dw_dma *dw)
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index b252b20..13a5807 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -126,6 +126,10 @@ struct dw_dma_regs {

#define DW_REGLEN 0x400

+enum dw_dmac_flags {
+ DW_DMA_IS_CYCLIC = 0,
+};
+
struct dw_dma_chan {
struct dma_chan chan;
void __iomem *ch_regs;
@@ -134,10 +138,12 @@ struct dw_dma_chan {
spinlock_t lock;

/* these other elements are all protected by lock */
+ unsigned long flags;
dma_cookie_t completed;
struct list_head active_list;
struct list_head queue;
struct list_head free_list;
+ struct dw_cyclic_desc *cdesc;

unsigned int descs_allocated;
};
@@ -158,7 +164,6 @@ static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
return container_of(chan, struct dw_dma_chan, chan);
}

-
struct dw_dma {
struct dma_device dma;
void __iomem *regs;
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h
index d797dde..c8aad71 100644
--- a/include/linux/dw_dmac.h
+++ b/include/linux/dw_dmac.h
@@ -74,4 +74,23 @@ struct dw_dma_slave {
#define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */
#define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */

+/* DMA API extensions */
+struct dw_cyclic_desc {
+ struct dw_desc **desc;
+ unsigned long periods;
+ void (*period_callback)(void *param);
+ void *period_callback_param;
+};
+
+struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
+ dma_addr_t buf_addr, size_t buf_len, size_t period_len,
+ enum dma_data_direction direction);
+void dw_dma_cyclic_free(struct dma_chan *chan);
+int dw_dma_cyclic_start(struct dma_chan *chan);
+void dw_dma_cyclic_stop(struct dma_chan *chan);
+
+dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan);
+
+dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan);
+
#endif /* DW_DMAC_H */
--
1.5.6.3

2009-04-01 13:40:24

by Haavard Skinnemoen

[permalink] [raw]
Subject: Re: [PATCH v2 1/1] dw_dmac: add cyclic API to DW DMA driver

Hans-Christian Egtvedt wrote:
> +/**
> + * dw_dma_cyclic_start - start the cyclic DMA transfer
> + *
> + * @chan: the DMA channel to start

Documentation/kernel-doc-nano-HOWTO.txt says:

The @argument descriptions must begin on the very next line following
this opening short function description line, with no intervening
empty comment lines.

Looks good other than that.

Haavard

2009-04-01 13:53:25

by Hans-Christian Egtvedt

[permalink] [raw]
Subject: [PATCH v3 1/1] dw_dmac: add cyclic API to DW DMA driver

This patch adds a cyclic DMA interface to the DW DMA driver. This is
very useful if you want to use the DMA controller in combination with a
sound device which uses cyclic buffers.

Using a DMA channel for cyclic DMA will disable the possibility to use
it as a normal DMA engine until the user calls the cyclic free function
on the DMA channel. Also a cyclic DMA list can not be prepared if the
channel is already active.

Signed-off-by: Hans-Christian Egtvedt <[email protected]>
---
drivers/dma/dw_dmac.c | 332 +++++++++++++++++++++++++++++++++++++++++++-
drivers/dma/dw_dmac_regs.h | 7 +-
include/linux/dw_dmac.h | 19 +++
3 files changed, 356 insertions(+), 2 deletions(-)

diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index a97c07e..b22ca0d 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -363,6 +363,82 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
dwc_descriptor_complete(dwc, bad_desc);
}

+/* --------------------- Cyclic DMA API extensions -------------------- */
+
+inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ return channel_readl(dwc, SAR);
+}
+EXPORT_SYMBOL(dw_dma_get_src_addr);
+
+inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ return channel_readl(dwc, DAR);
+}
+EXPORT_SYMBOL(dw_dma_get_dst_addr);
+
+/* called with dwc->lock held and all DMAC interrupts disabled */
+static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
+ u32 status_block, u32 status_err, u32 status_xfer)
+{
+ if (status_block & dwc->mask) {
+ void (*callback)(void *param);
+ void *callback_param;
+
+ dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
+ channel_readl(dwc, LLP));
+ dma_writel(dw, CLEAR.BLOCK, dwc->mask);
+
+ callback = dwc->cdesc->period_callback;
+ callback_param = dwc->cdesc->period_callback_param;
+ if (callback) {
+ spin_unlock(&dwc->lock);
+ callback(callback_param);
+ spin_lock(&dwc->lock);
+ }
+ }
+
+ /*
+ * Error and transfer complete are highly unlikely, and will most
+ * likely be due to a configuration error by the user.
+ */
+ if (unlikely(status_err & dwc->mask) ||
+ unlikely(status_xfer & dwc->mask)) {
+ int i;
+
+ dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
+ "interrupt, stopping DMA transfer\n",
+ status_xfer ? "xfer" : "error");
+ dev_err(chan2dev(&dwc->chan),
+ " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
+ channel_readl(dwc, SAR),
+ channel_readl(dwc, DAR),
+ channel_readl(dwc, LLP),
+ channel_readl(dwc, CTL_HI),
+ channel_readl(dwc, CTL_LO));
+
+ channel_clear_bit(dw, CH_EN, dwc->mask);
+ while (dma_readl(dw, CH_EN) & dwc->mask)
+ cpu_relax();
+
+ /* make sure DMA does not restart by loading a new list */
+ channel_writel(dwc, LLP, 0);
+ channel_writel(dwc, CTL_LO, 0);
+ channel_writel(dwc, CTL_HI, 0);
+
+ dma_writel(dw, CLEAR.BLOCK, dwc->mask);
+ dma_writel(dw, CLEAR.ERROR, dwc->mask);
+ dma_writel(dw, CLEAR.XFER, dwc->mask);
+
+ for (i = 0; i < dwc->cdesc->periods; i++)
+ dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
+ }
+}
+
+/* ------------------------------------------------------------------------- */
+
static void dw_dma_tasklet(unsigned long data)
{
struct dw_dma *dw = (struct dw_dma *)data;
@@ -382,7 +458,10 @@ static void dw_dma_tasklet(unsigned long data)
for (i = 0; i < dw->dma.chancnt; i++) {
dwc = &dw->chan[i];
spin_lock(&dwc->lock);
- if (status_err & (1 << i))
+ if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
+ dwc_handle_cyclic(dw, dwc, status_block, status_err,
+ status_xfer);
+ else if (status_err & (1 << i))
dwc_handle_error(dw, dwc);
else if ((status_block | status_xfer) & (1 << i))
dwc_scan_descriptors(dw, dwc);
@@ -884,6 +963,257 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
}

+/* --------------------- Cyclic DMA API extensions -------------------- */
+
+/**
+ * dw_dma_cyclic_start - start the cyclic DMA transfer
+ * @chan: the DMA channel to start
+ *
+ * Must be called with soft interrupts disabled. Returns zero on success or
+ * -errno on failure.
+ */
+int dw_dma_cyclic_start(struct dma_chan *chan)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+
+ if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
+ dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
+ return -ENODEV;
+ }
+
+ spin_lock(&dwc->lock);
+
+ /* assert channel is idle */
+ if (dma_readl(dw, CH_EN) & dwc->mask) {
+ dev_err(chan2dev(&dwc->chan),
+ "BUG: Attempted to start non-idle channel\n");
+ dev_err(chan2dev(&dwc->chan),
+ " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
+ channel_readl(dwc, SAR),
+ channel_readl(dwc, DAR),
+ channel_readl(dwc, LLP),
+ channel_readl(dwc, CTL_HI),
+ channel_readl(dwc, CTL_LO));
+ spin_unlock(&dwc->lock);
+ return -EBUSY;
+ }
+
+ dma_writel(dw, CLEAR.BLOCK, dwc->mask);
+ dma_writel(dw, CLEAR.ERROR, dwc->mask);
+ dma_writel(dw, CLEAR.XFER, dwc->mask);
+
+ /* setup DMAC channel registers */
+ channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
+ channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
+ channel_writel(dwc, CTL_HI, 0);
+
+ channel_set_bit(dw, CH_EN, dwc->mask);
+
+ spin_unlock(&dwc->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(dw_dma_cyclic_start);
+
+/**
+ * dw_dma_cyclic_stop - stop the cyclic DMA transfer
+ * @chan: the DMA channel to stop
+ *
+ * Must be called with soft interrupts disabled.
+ */
+void dw_dma_cyclic_stop(struct dma_chan *chan)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+
+ spin_lock(&dwc->lock);
+
+ channel_clear_bit(dw, CH_EN, dwc->mask);
+ while (dma_readl(dw, CH_EN) & dwc->mask)
+ cpu_relax();
+
+ spin_unlock(&dwc->lock);
+}
+EXPORT_SYMBOL(dw_dma_cyclic_stop);
+
+/**
+ * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
+ * @chan: the DMA channel to prepare
+ * @buf_addr: physical DMA address where the buffer starts
+ * @buf_len: total number of bytes for the entire buffer
+ * @period_len: number of bytes for each period
+ * @direction: transfer direction, to or from device
+ *
+ * Must be called before trying to start the transfer. Returns a valid struct
+ * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
+ */
+struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
+ dma_addr_t buf_addr, size_t buf_len, size_t period_len,
+ enum dma_data_direction direction)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_cyclic_desc *cdesc;
+ struct dw_cyclic_desc *retval = NULL;
+ struct dw_desc *desc;
+ struct dw_desc *last = NULL;
+ struct dw_dma_slave *dws = chan->private;
+ unsigned long was_cyclic;
+ unsigned int reg_width;
+ unsigned int periods;
+ unsigned int i;
+
+ spin_lock_bh(&dwc->lock);
+ if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
+ spin_unlock_bh(&dwc->lock);
+ dev_dbg(chan2dev(&dwc->chan),
+ "queue and/or active list are not empty\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
+ spin_unlock_bh(&dwc->lock);
+ if (was_cyclic) {
+ dev_dbg(chan2dev(&dwc->chan),
+ "channel already prepared for cyclic DMA\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ retval = ERR_PTR(-EINVAL);
+ reg_width = dws->reg_width;
+ periods = buf_len / period_len;
+
+ /* Check for too big/unaligned periods and unaligned DMA buffer. */
+ if (period_len > (DWC_MAX_COUNT << reg_width))
+ goto out_err;
+ if (unlikely(period_len & ((1 << reg_width) - 1)))
+ goto out_err;
+ if (unlikely(buf_addr & ((1 << reg_width) - 1)))
+ goto out_err;
+ if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
+ goto out_err;
+
+ retval = ERR_PTR(-ENOMEM);
+
+ if (periods > NR_DESCS_PER_CHANNEL)
+ goto out_err;
+
+ cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
+ if (!cdesc)
+ goto out_err;
+
+ cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
+ if (!cdesc->desc)
+ goto out_err_alloc;
+
+ for (i = 0; i < periods; i++) {
+ desc = dwc_desc_get(dwc);
+ if (!desc)
+ goto out_err_desc_get;
+
+ switch (direction) {
+ case DMA_TO_DEVICE:
+ desc->lli.dar = dws->tx_reg;
+ desc->lli.sar = buf_addr + (period_len * i);
+ desc->lli.ctllo = (DWC_DEFAULT_CTLLO
+ | DWC_CTLL_DST_WIDTH(reg_width)
+ | DWC_CTLL_SRC_WIDTH(reg_width)
+ | DWC_CTLL_DST_FIX
+ | DWC_CTLL_SRC_INC
+ | DWC_CTLL_FC_M2P
+ | DWC_CTLL_INT_EN);
+ break;
+ case DMA_FROM_DEVICE:
+ desc->lli.dar = buf_addr + (period_len * i);
+ desc->lli.sar = dws->rx_reg;
+ desc->lli.ctllo = (DWC_DEFAULT_CTLLO
+ | DWC_CTLL_SRC_WIDTH(reg_width)
+ | DWC_CTLL_DST_WIDTH(reg_width)
+ | DWC_CTLL_DST_INC
+ | DWC_CTLL_SRC_FIX
+ | DWC_CTLL_FC_P2M
+ | DWC_CTLL_INT_EN);
+ break;
+ default:
+ break;
+ }
+
+ desc->lli.ctlhi = (period_len >> reg_width);
+ cdesc->desc[i] = desc;
+
+ if (last) {
+ last->lli.llp = desc->txd.phys;
+ dma_sync_single_for_device(chan2parent(chan),
+ last->txd.phys, sizeof(last->lli),
+ DMA_TO_DEVICE);
+ }
+
+ last = desc;
+ }
+
+ /* lets make a cyclic list */
+ last->lli.llp = cdesc->desc[0]->txd.phys;
+ dma_sync_single_for_device(chan2parent(chan), last->txd.phys,
+ sizeof(last->lli), DMA_TO_DEVICE);
+
+ dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%08x len %zu "
+ "period %zu periods %d\n", buf_addr, buf_len,
+ period_len, periods);
+
+ cdesc->periods = periods;
+ dwc->cdesc = cdesc;
+
+ return cdesc;
+
+out_err_desc_get:
+ while (i--)
+ dwc_desc_put(dwc, cdesc->desc[i]);
+out_err_alloc:
+ kfree(cdesc);
+out_err:
+ clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
+ return (struct dw_cyclic_desc *)retval;
+}
+EXPORT_SYMBOL(dw_dma_cyclic_prep);
+
+/**
+ * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
+ * @chan: the DMA channel to free
+ */
+void dw_dma_cyclic_free(struct dma_chan *chan)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+ struct dw_cyclic_desc *cdesc = dwc->cdesc;
+ int i;
+
+ dev_dbg(chan2dev(&dwc->chan), "cyclic free\n");
+
+ if (!cdesc)
+ return;
+
+ spin_lock_bh(&dwc->lock);
+
+ channel_clear_bit(dw, CH_EN, dwc->mask);
+ while (dma_readl(dw, CH_EN) & dwc->mask)
+ cpu_relax();
+
+ dma_writel(dw, CLEAR.BLOCK, dwc->mask);
+ dma_writel(dw, CLEAR.ERROR, dwc->mask);
+ dma_writel(dw, CLEAR.XFER, dwc->mask);
+
+ spin_unlock_bh(&dwc->lock);
+
+ for (i = 0; i < cdesc->periods; i++)
+ dwc_desc_put(dwc, cdesc->desc[i]);
+
+ kfree(cdesc->desc);
+ kfree(cdesc);
+
+ clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
+}
+EXPORT_SYMBOL(dw_dma_cyclic_free);
+
/*----------------------------------------------------------------------*/

static void dw_dma_off(struct dw_dma *dw)
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index b252b20..13a5807 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -126,6 +126,10 @@ struct dw_dma_regs {

#define DW_REGLEN 0x400

+enum dw_dmac_flags {
+ DW_DMA_IS_CYCLIC = 0,
+};
+
struct dw_dma_chan {
struct dma_chan chan;
void __iomem *ch_regs;
@@ -134,10 +138,12 @@ struct dw_dma_chan {
spinlock_t lock;

/* these other elements are all protected by lock */
+ unsigned long flags;
dma_cookie_t completed;
struct list_head active_list;
struct list_head queue;
struct list_head free_list;
+ struct dw_cyclic_desc *cdesc;

unsigned int descs_allocated;
};
@@ -158,7 +164,6 @@ static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
return container_of(chan, struct dw_dma_chan, chan);
}

-
struct dw_dma {
struct dma_device dma;
void __iomem *regs;
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h
index d797dde..c8aad71 100644
--- a/include/linux/dw_dmac.h
+++ b/include/linux/dw_dmac.h
@@ -74,4 +74,23 @@ struct dw_dma_slave {
#define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */
#define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */

+/* DMA API extensions */
+struct dw_cyclic_desc {
+ struct dw_desc **desc;
+ unsigned long periods;
+ void (*period_callback)(void *param);
+ void *period_callback_param;
+};
+
+struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
+ dma_addr_t buf_addr, size_t buf_len, size_t period_len,
+ enum dma_data_direction direction);
+void dw_dma_cyclic_free(struct dma_chan *chan);
+int dw_dma_cyclic_start(struct dma_chan *chan);
+void dw_dma_cyclic_stop(struct dma_chan *chan);
+
+dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan);
+
+dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan);
+
#endif /* DW_DMAC_H */
--
1.5.6.3

2009-04-01 13:53:50

by Haavard Skinnemoen

[permalink] [raw]
Subject: Re: [PATCH v3 1/1] dw_dmac: add cyclic API to DW DMA driver

Hans-Christian Egtvedt wrote:
> This patch adds a cyclic DMA interface to the DW DMA driver. This is
> very useful if you want to use the DMA controller in combination with a
> sound device which uses cyclic buffers.
>
> Using a DMA channel for cyclic DMA will disable the possibility to use
> it as a normal DMA engine until the user calls the cyclic free function
> on the DMA channel. Also a cyclic DMA list can not be prepared if the
> channel is already active.
>
> Signed-off-by: Hans-Christian Egtvedt <[email protected]>

Acked-by: Haavard Skinnemoen <[email protected]>

2009-04-01 15:08:30

by Sosnowski, Maciej

[permalink] [raw]
Subject: RE: [PATCH v3 1/1] dw_dmac: add cyclic API to DW DMA driver

Hans-Christian Egtvedt wrote:
> This patch adds a cyclic DMA interface to the DW DMA driver. This is
> very useful if you want to use the DMA controller in combination with a
> sound device which uses cyclic buffers.
>
> Using a DMA channel for cyclic DMA will disable the possibility to use
> it as a normal DMA engine until the user calls the cyclic free function
> on the DMA channel. Also a cyclic DMA list can not be prepared if the
> channel is already active.
>
> Signed-off-by: Hans-Christian Egtvedt <[email protected]>
> ---
> drivers/dma/dw_dmac.c | 332 +++++++++++++++++++++++++++++++++++++++++++-
> drivers/dma/dw_dmac_regs.h | 7 +-
> include/linux/dw_dmac.h | 19 +++
> 3 files changed, 356 insertions(+), 2 deletions(-)
>

Acked-by: Maciej Sosnowski <[email protected]>

2009-04-01 22:46:51

by Dan Williams

[permalink] [raw]
Subject: Re: [PATCH v3 1/1] dw_dmac: add cyclic API to DW DMA driver

On Wed, Apr 1, 2009 at 6:52 AM, Haavard Skinnemoen
<[email protected]> wrote:
> Hans-Christian Egtvedt wrote:
>> This patch adds a cyclic DMA interface to the DW DMA driver. This is
>> very useful if you want to use the DMA controller in combination with a
>> sound device which uses cyclic buffers.
>>
>> Using a DMA channel for cyclic DMA will disable the possibility to use
>> it as a normal DMA engine until the user calls the cyclic free function
>> on the DMA channel. Also a cyclic DMA list can not be prepared if the
>> channel is already active.
>>
>> Signed-off-by: Hans-Christian Egtvedt <[email protected]>
>
> Acked-by: Haavard Skinnemoen <[email protected]>

Thanks, applied.