This patchset fixes few issues and extends its support.
Changes in V2:
- lflags added in dw_dma_chan instead of dw_dma
- Patch from Linus Walleij added for pause and resume functionality.
Linus Walleij (1):
dmaengine/dw_dmac: implement pause and resume in dwc_control
Viresh Kumar (6):
dmaengine/dw_dmac: Replace spin_lock_bh with irqsave variants
dmaengine/dw_dmac: Enable resubmission from callback routine.
dmaengine/dw_dmac: call dwc_descriptor_complete from dwc_control with
lock held
dmaengine/dw_dmac: don't call callback routine in case
dmaengine_terminate_all() is called
dmaengine/dw_dmac: set residue as total len in dwc_tx_status if
status is !DMA_SUCCESS
dmaengine/dw_dmac: Divide one sg to many desc, if sg len is greater
than DWC_MAX_COUNT
drivers/dma/dw_dmac.c | 205 +++++++++++++++++++++++++++-----------------
drivers/dma/dw_dmac_regs.h | 2 +
2 files changed, 129 insertions(+), 78 deletions(-)
--
1.7.2.2
dmaengine routines can be called from interrupt context and with interrupts
disabled. Whereas spin_unlock_bh can't be called from such contexts. So this
patch converts all spin_*_bh routines to irqsave variants.
Flags to be passed to irqsave variants is kept in dw_dma_chan structure, so that
dwc_descriptor_complete() can unlock the lock taken by parent routines. This is
present in a later patch.
Signed-off-by: Viresh Kumar <[email protected]>
---
drivers/dma/dw_dmac.c | 48 ++++++++++++++++++++++----------------------
drivers/dma/dw_dmac_regs.h | 1 +
2 files changed, 25 insertions(+), 24 deletions(-)
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index b15c32c..4f4df9f 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -94,7 +94,7 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
struct dw_desc *ret = NULL;
unsigned int i = 0;
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, dwc->lflags);
list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
if (async_tx_test_ack(&desc->txd)) {
list_del(&desc->desc_node);
@@ -104,7 +104,7 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
i++;
}
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, dwc->lflags);
dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
@@ -135,7 +135,7 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
dwc_sync_desc_for_cpu(dwc, desc);
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, dwc->lflags);
list_for_each_entry(child, &desc->tx_list, desc_node)
dev_vdbg(chan2dev(&dwc->chan),
"moving child desc %p to freelist\n",
@@ -143,7 +143,7 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
list_splice_init(&desc->tx_list, &dwc->free_list);
dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
list_add(&desc->desc_node, &dwc->free_list);
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, dwc->lflags);
}
}
@@ -545,7 +545,7 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
dma_cookie_t cookie;
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, dwc->lflags);
cookie = dwc_assign_cookie(dwc, desc);
/*
@@ -565,7 +565,7 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
list_add_tail(&desc->desc_node, &dwc->queue);
}
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, dwc->lflags);
return cookie;
}
@@ -816,7 +816,7 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
* channel. We still have to poll the channel enable bit due
* to AHB/HSB limitations.
*/
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, dwc->lflags);
channel_clear_bit(dw, CH_EN, dwc->mask);
@@ -827,7 +827,7 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
list_splice_init(&dwc->queue, &list);
list_splice_init(&dwc->active_list, &list);
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, dwc->lflags);
/* Flush all pending and queued descriptors */
list_for_each_entry_safe(desc, _desc, &list, desc_node)
@@ -851,9 +851,9 @@ dwc_tx_status(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used);
if (ret != DMA_SUCCESS) {
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, dwc->lflags);
dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, dwc->lflags);
last_complete = dwc->completed;
last_used = chan->cookie;
@@ -870,10 +870,10 @@ static void dwc_issue_pending(struct dma_chan *chan)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, dwc->lflags);
if (!list_empty(&dwc->queue))
dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, dwc->lflags);
}
static int dwc_alloc_chan_resources(struct dma_chan *chan)
@@ -922,16 +922,16 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
* doesn't mean what you think it means), and status writeback.
*/
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, dwc->lflags);
i = dwc->descs_allocated;
while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, dwc->lflags);
desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
if (!desc) {
dev_info(chan2dev(chan),
"only allocated %d descriptors\n", i);
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, dwc->lflags);
break;
}
@@ -943,7 +943,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
sizeof(desc->lli), DMA_TO_DEVICE);
dwc_desc_put(dwc, desc);
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, dwc->lflags);
i = ++dwc->descs_allocated;
}
@@ -952,7 +952,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
channel_set_bit(dw, MASK.BLOCK, dwc->mask);
channel_set_bit(dw, MASK.ERROR, dwc->mask);
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, dwc->lflags);
dev_dbg(chan2dev(chan),
"alloc_chan_resources allocated %d descriptors\n", i);
@@ -975,7 +975,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
BUG_ON(!list_empty(&dwc->queue));
BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, dwc->lflags);
list_splice_init(&dwc->free_list, &list);
dwc->descs_allocated = 0;
@@ -984,7 +984,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
channel_clear_bit(dw, MASK.ERROR, dwc->mask);
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, dwc->lflags);
list_for_each_entry_safe(desc, _desc, &list, desc_node) {
dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
@@ -1096,16 +1096,16 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
unsigned int periods;
unsigned int i;
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, dwc->lflags);
if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, dwc->lflags);
dev_dbg(chan2dev(&dwc->chan),
"queue and/or active list are not empty\n");
return ERR_PTR(-EBUSY);
}
was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, dwc->lflags);
if (was_cyclic) {
dev_dbg(chan2dev(&dwc->chan),
"channel already prepared for cyclic DMA\n");
@@ -1225,7 +1225,7 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
if (!cdesc)
return;
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, dwc->lflags);
channel_clear_bit(dw, CH_EN, dwc->mask);
while (dma_readl(dw, CH_EN) & dwc->mask)
@@ -1235,7 +1235,7 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
dma_writel(dw, CLEAR.ERROR, dwc->mask);
dma_writel(dw, CLEAR.XFER, dwc->mask);
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, dwc->lflags);
for (i = 0; i < cdesc->periods; i++)
dwc_desc_put(dwc, cdesc->desc[i]);
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index 720f821..5915743 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -140,6 +140,7 @@ struct dw_dma_chan {
u8 priority;
spinlock_t lock;
+ unsigned long lflags;
/* these other elements are all protected by lock */
unsigned long flags;
--
1.7.2.2
dwc_descriptor_complete must always be called with channel lock held. This patch
moves unlock code, in dwc_control(), untill after dwc_descriptor_complete is
called.
Signed-off-by: Viresh Kumar <[email protected]>
---
drivers/dma/dw_dmac.c | 4 ++--
1 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index dbe8203..bbb658c 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -825,12 +825,12 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
list_splice_init(&dwc->queue, &list);
list_splice_init(&dwc->active_list, &list);
- spin_unlock_irqrestore(&dwc->lock, dwc->lflags);
-
/* Flush all pending and queued descriptors */
list_for_each_entry_safe(desc, _desc, &list, desc_node)
dwc_descriptor_complete(dwc, desc);
+ spin_unlock_irqrestore(&dwc->lock, dwc->lflags);
+
return 0;
}
--
1.7.2.2
If transfer status is !=DMA_SUCCESS, return total transfer len as residue,
instead of zero.
Signed-off-by: Viresh Kumar <[email protected]>
---
drivers/dma/dw_dmac.c | 6 +++++-
1 files changed, 5 insertions(+), 1 deletions(-)
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 8d5d048..211728c 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -864,7 +864,11 @@ dwc_tx_status(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used);
}
- dma_set_tx_state(txstate, last_complete, last_used, 0);
+ if (ret != DMA_SUCCESS)
+ dma_set_tx_state(txstate, last_complete, last_used,
+ dwc_first_active(dwc)->len);
+ else
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
return ret;
}
--
1.7.2.2
From: Linus Walleij <[email protected]>
Some peripherals like amba-pl011 needs pause to be implemented in DMA controller
drivers. This also returns correct status from dwc_tx_status() in case chan is
paused.
Signed-off-by: Linus Walleij <[email protected]>
Signed-off-by: Viresh Kumar <[email protected]>
---
drivers/dma/dw_dmac.c | 59 +++++++++++++++++++++++++++++---------------
drivers/dma/dw_dmac_regs.h | 1 +
2 files changed, 40 insertions(+), 20 deletions(-)
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 858505b..c654fe6 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -830,34 +830,50 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(chan->device);
struct dw_desc *desc, *_desc;
+ u32 cfglo;
LIST_HEAD(list);
- /* Only supports DMA_TERMINATE_ALL */
- if (cmd != DMA_TERMINATE_ALL)
- return -ENXIO;
+ if (cmd == DMA_PAUSE) {
+ spin_lock_irqsave(&dwc->lock, dwc->lflags);
- /*
- * This is only called when something went wrong elsewhere, so
- * we don't really care about the data. Just disable the
- * channel. We still have to poll the channel enable bit due
- * to AHB/HSB limitations.
- */
- spin_lock_irqsave(&dwc->lock, dwc->lflags);
+ cfglo = channel_readl(dwc, CFG_LO);
+ channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
+ while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY))
+ cpu_relax();
- channel_clear_bit(dw, CH_EN, dwc->mask);
+ dwc->paused = true;
+ spin_unlock_irqrestore(&dwc->lock, dwc->lflags);
+ } else if (cmd == DMA_RESUME) {
+ if (!dwc->paused)
+ return 0;
- while (dma_readl(dw, CH_EN) & dwc->mask)
- cpu_relax();
+ spin_lock_irqsave(&dwc->lock, dwc->lflags);
- /* active_list entries will end up before queued entries */
- list_splice_init(&dwc->queue, &list);
- list_splice_init(&dwc->active_list, &list);
+ cfglo = channel_readl(dwc, CFG_LO);
+ channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
+ dwc->paused = false;
- /* Flush all pending and queued descriptors */
- list_for_each_entry_safe(desc, _desc, &list, desc_node)
- dwc_descriptor_complete(dwc, desc, 0);
+ spin_unlock_irqrestore(&dwc->lock, dwc->lflags);
+ } else if (cmd == DMA_TERMINATE_ALL) {
+ spin_lock_irqsave(&dwc->lock, dwc->lflags);
- spin_unlock_irqrestore(&dwc->lock, dwc->lflags);
+ channel_clear_bit(dw, CH_EN, dwc->mask);
+ while (dma_readl(dw, CH_EN) & dwc->mask)
+ cpu_relax();
+
+ dwc->paused = false;
+
+ /* active_list entries will end up before queued entries */
+ list_splice_init(&dwc->queue, &list);
+ list_splice_init(&dwc->active_list, &list);
+
+ /* Flush all pending and queued descriptors */
+ list_for_each_entry_safe(desc, _desc, &list, desc_node)
+ dwc_descriptor_complete(dwc, desc, 0);
+
+ spin_unlock_irqrestore(&dwc->lock, dwc->lflags);
+ } else
+ return -ENXIO;
return 0;
}
@@ -893,6 +909,9 @@ dwc_tx_status(struct dma_chan *chan,
else
dma_set_tx_state(txstate, last_complete, last_used, 0);
+ if (dwc->paused)
+ return DMA_PAUSED;
+
return ret;
}
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index 5915743..47138b1 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -138,6 +138,7 @@ struct dw_dma_chan {
void __iomem *ch_regs;
u8 mask;
u8 priority;
+ bool paused;
spinlock_t lock;
unsigned long lflags;
--
1.7.2.2
If len passed in sg for slave_sg transfers is greater than DWC_MAX_COUNT, then
driver programmes controller incorrectly. This patch adds code to handle this
situation by allocation more than one desc for same sg.
Signed-off-by: Viresh Kumar <[email protected]>
---
drivers/dma/dw_dmac.c | 65 +++++++++++++++++++++++++++++++++----------------
1 files changed, 44 insertions(+), 21 deletions(-)
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 211728c..858505b 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -697,9 +697,15 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
reg = dws->tx_reg;
for_each_sg(sgl, sg, sg_len, i) {
struct dw_desc *desc;
- u32 len;
- u32 mem;
+ u32 len, dlen, mem;
+ mem = sg_phys(sg);
+ len = sg_dma_len(sg);
+ mem_width = 2;
+ if (unlikely(mem & 3 || len & 3))
+ mem_width = 0;
+
+slave_sg_todev_fill_desc:
desc = dwc_desc_get(dwc);
if (!desc) {
dev_err(chan2dev(chan),
@@ -707,16 +713,19 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
goto err_desc_get;
}
- mem = sg_phys(sg);
- len = sg_dma_len(sg);
- mem_width = 2;
- if (unlikely(mem & 3 || len & 3))
- mem_width = 0;
-
desc->lli.sar = mem;
desc->lli.dar = reg;
desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
- desc->lli.ctlhi = len >> mem_width;
+ if ((len >> mem_width) > DWC_MAX_COUNT) {
+ dlen = DWC_MAX_COUNT << mem_width;
+ mem += dlen;
+ len -= dlen;
+ } else {
+ dlen = len;
+ len = 0;
+ }
+
+ desc->lli.ctlhi = dlen >> mem_width;
if (!first) {
first = desc;
@@ -730,7 +739,10 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
&first->tx_list);
}
prev = desc;
- total_len += len;
+ total_len += dlen;
+
+ if (len)
+ goto slave_sg_todev_fill_desc;
}
break;
case DMA_FROM_DEVICE:
@@ -743,15 +755,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
reg = dws->rx_reg;
for_each_sg(sgl, sg, sg_len, i) {
struct dw_desc *desc;
- u32 len;
- u32 mem;
-
- desc = dwc_desc_get(dwc);
- if (!desc) {
- dev_err(chan2dev(chan),
- "not enough descriptors available\n");
- goto err_desc_get;
- }
+ u32 len, dlen, mem;
mem = sg_phys(sg);
len = sg_dma_len(sg);
@@ -759,10 +763,26 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (unlikely(mem & 3 || len & 3))
mem_width = 0;
+slave_sg_fromdev_fill_desc:
+ desc = dwc_desc_get(dwc);
+ if (!desc) {
+ dev_err(chan2dev(chan),
+ "not enough descriptors available\n");
+ goto err_desc_get;
+ }
+
desc->lli.sar = reg;
desc->lli.dar = mem;
desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
- desc->lli.ctlhi = len >> reg_width;
+ if ((len >> reg_width) > DWC_MAX_COUNT) {
+ dlen = DWC_MAX_COUNT << reg_width;
+ mem += dlen;
+ len -= dlen;
+ } else {
+ dlen = len;
+ len = 0;
+ }
+ desc->lli.ctlhi = dlen >> reg_width;
if (!first) {
first = desc;
@@ -776,7 +796,10 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
&first->tx_list);
}
prev = desc;
- total_len += len;
+ total_len += dlen;
+
+ if (len)
+ goto slave_sg_fromdev_fill_desc;
}
break;
default:
--
1.7.2.2
If dmaengine_terminate_all() is called for dma channel, then it doesn't make
much sense to call registered callback routine. While in case of success or
failure it must be called.
Signed-off-by: Viresh Kumar <[email protected]>
---
drivers/dma/dw_dmac.c | 27 ++++++++++++++++-----------
1 files changed, 16 insertions(+), 11 deletions(-)
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index bbb658c..8d5d048 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -195,7 +195,8 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
/*----------------------------------------------------------------------*/
static void
-dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
+dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
+ bool callback_required)
{
dma_async_tx_callback callback = NULL;
void *param = NULL;
@@ -205,8 +206,10 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
dwc->completed = txd->cookie;
- callback = txd->callback;
- param = txd->callback_param;
+ if (callback_required) {
+ callback = txd->callback;
+ param = txd->callback_param;
+ }
dwc_sync_desc_for_cpu(dwc, desc);
@@ -238,10 +241,12 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
}
}
- spin_unlock_irqrestore(&dwc->lock, dw->flags);
- if (callback)
- callback(param);
- spin_lock_irqsave(&dwc->lock, dw->flags);
+ if (callback_required) {
+ spin_unlock_irqrestore(&dwc->lock, dwc->lflags);
+ if (callback)
+ callback(param);
+ spin_lock_irqsave(&dwc->lock, dwc->lflags);
+ }
}
static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
@@ -270,7 +275,7 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
}
list_for_each_entry_safe(desc, _desc, &list, desc_node)
- dwc_descriptor_complete(dwc, desc);
+ dwc_descriptor_complete(dwc, desc, 1);
}
static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
@@ -320,7 +325,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
* No descriptors so far seem to be in progress, i.e.
* this one must be done.
*/
- dwc_descriptor_complete(dwc, desc);
+ dwc_descriptor_complete(dwc, desc, 1);
}
dev_err(chan2dev(&dwc->chan),
@@ -382,7 +387,7 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
dwc_dump_lli(dwc, &child->lli);
/* Pretend the descriptor completed successfully */
- dwc_descriptor_complete(dwc, bad_desc);
+ dwc_descriptor_complete(dwc, bad_desc, 1);
}
/* --------------------- Cyclic DMA API extensions -------------------- */
@@ -827,7 +832,7 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
/* Flush all pending and queued descriptors */
list_for_each_entry_safe(desc, _desc, &list, desc_node)
- dwc_descriptor_complete(dwc, desc);
+ dwc_descriptor_complete(dwc, desc, 0);
spin_unlock_irqrestore(&dwc->lock, dwc->lflags);
--
1.7.2.2
Resubmission of new transfer must be allowed from callbacks. For this release
lock before calling callback routine and enable them again.
Signed-off-by: Viresh Kumar <[email protected]>
---
drivers/dma/dw_dmac.c | 10 ++++------
1 files changed, 4 insertions(+), 6 deletions(-)
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 4f4df9f..dbe8203 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -197,8 +197,8 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
static void
dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
{
- dma_async_tx_callback callback;
- void *param;
+ dma_async_tx_callback callback = NULL;
+ void *param = NULL;
struct dma_async_tx_descriptor *txd = &desc->txd;
struct dw_desc *child;
@@ -238,12 +238,10 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
}
}
- /*
- * The API requires that no submissions are done from a
- * callback, so we don't need to drop the lock here
- */
+ spin_unlock_irqrestore(&dwc->lock, dw->flags);
if (callback)
callback(param);
+ spin_lock_irqsave(&dwc->lock, dw->flags);
}
static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
--
1.7.2.2
On Tue, 2011-04-26 at 11:08 +0530, Viresh Kumar wrote:
> This patchset fixes few issues and extends its support.
>
> Changes in V2:
> - lflags added in dw_dma_chan instead of dw_dma
> - Patch from Linus Walleij added for pause and resume functionality.
>
> Linus Walleij (1):
> dmaengine/dw_dmac: implement pause and resume in dwc_control
>
> Viresh Kumar (6):
> dmaengine/dw_dmac: Replace spin_lock_bh with irqsave variants
> dmaengine/dw_dmac: Enable resubmission from callback routine.
> dmaengine/dw_dmac: call dwc_descriptor_complete from dwc_control with
> lock held
> dmaengine/dw_dmac: don't call callback routine in case
> dmaengine_terminate_all() is called
> dmaengine/dw_dmac: set residue as total len in dwc_tx_status if
> status is !DMA_SUCCESS
> dmaengine/dw_dmac: Divide one sg to many desc, if sg len is greater
> than DWC_MAX_COUNT
>
> drivers/dma/dw_dmac.c | 205 +++++++++++++++++++++++++++-----------------
> drivers/dma/dw_dmac_regs.h | 2 +
> 2 files changed, 129 insertions(+), 78 deletions(-)
>
Applied thanks
--
~Vinod