2011-04-18 10:51:16

by Viresh Kumar

[permalink] [raw]
Subject: [PATCH 1/6] dmaengine/dw_dmac: Replace spin_lock_bh with irqsave variants

dmaengine routines can be called from interrupt context and with interrupts
disabled. Whereas spin_unlock_bh can't be called from such contexts. So this
patch converts all spin_*_bh routines to irqsave variants.

Signed-off-by: Viresh Kumar <[email protected]>
---
drivers/dma/dw_dmac.c | 56 +++++++++++++++++++++++++------------------
drivers/dma/dw_dmac_regs.h | 1 +
2 files changed, 33 insertions(+), 24 deletions(-)

diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 1bd4803..357265f 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -90,11 +90,12 @@ static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)

static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
{
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
struct dw_desc *desc, *_desc;
struct dw_desc *ret = NULL;
unsigned int i = 0;

- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, dw->flags);
list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
if (async_tx_test_ack(&desc->txd)) {
list_del(&desc->desc_node);
@@ -104,7 +105,7 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
i++;
}
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, dw->flags);

dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);

@@ -130,12 +131,14 @@ static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
*/
static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
{
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+
if (desc) {
struct dw_desc *child;

dwc_sync_desc_for_cpu(dwc, desc);

- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, dw->flags);
list_for_each_entry(child, &desc->tx_list, desc_node)
dev_vdbg(chan2dev(&dwc->chan),
"moving child desc %p to freelist\n",
@@ -143,7 +146,7 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
list_splice_init(&desc->tx_list, &dwc->free_list);
dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
list_add(&desc->desc_node, &dwc->free_list);
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, dw->flags);
}
}

@@ -199,6 +202,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
{
dma_async_tx_callback callback;
void *param;
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
struct dma_async_tx_descriptor *txd = &desc->txd;
struct dw_desc *child;

@@ -543,9 +547,10 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
{
struct dw_desc *desc = txd_to_dw_desc(tx);
struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
dma_cookie_t cookie;

- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, dw->flags);
cookie = dwc_assign_cookie(dwc, desc);

/*
@@ -565,7 +570,7 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
list_add_tail(&desc->desc_node, &dwc->queue);
}

- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, dw->flags);

return cookie;
}
@@ -816,7 +821,7 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
* channel. We still have to poll the channel enable bit due
* to AHB/HSB limitations.
*/
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, dw->flags);

channel_clear_bit(dw, CH_EN, dwc->mask);

@@ -827,7 +832,7 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
list_splice_init(&dwc->queue, &list);
list_splice_init(&dwc->active_list, &list);

- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, dw->flags);

/* Flush all pending and queued descriptors */
list_for_each_entry_safe(desc, _desc, &list, desc_node)
@@ -842,6 +847,7 @@ dwc_tx_status(struct dma_chan *chan,
struct dma_tx_state *txstate)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
dma_cookie_t last_used;
dma_cookie_t last_complete;
int ret;
@@ -851,9 +857,9 @@ dwc_tx_status(struct dma_chan *chan,

ret = dma_async_is_complete(cookie, last_complete, last_used);
if (ret != DMA_SUCCESS) {
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, dw->flags);
dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, dw->flags);

last_complete = dwc->completed;
last_used = chan->cookie;
@@ -869,11 +875,12 @@ dwc_tx_status(struct dma_chan *chan,
static void dwc_issue_pending(struct dma_chan *chan)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);

- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, dw->flags);
if (!list_empty(&dwc->queue))
dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, dw->flags);
}

static int dwc_alloc_chan_resources(struct dma_chan *chan)
@@ -922,16 +929,16 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
* doesn't mean what you think it means), and status writeback.
*/

- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, dw->flags);
i = dwc->descs_allocated;
while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, dw->flags);

desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
if (!desc) {
dev_info(chan2dev(chan),
"only allocated %d descriptors\n", i);
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, dw->flags);
break;
}

@@ -943,7 +950,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
sizeof(desc->lli), DMA_TO_DEVICE);
dwc_desc_put(dwc, desc);

- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, dw->flags);
i = ++dwc->descs_allocated;
}

@@ -952,7 +959,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
/* channel_set_bit(dw, MASK.BLOCK, dwc->mask); */
channel_set_bit(dw, MASK.ERROR, dwc->mask);

- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, dw->flags);

dev_dbg(chan2dev(chan),
"alloc_chan_resources allocated %d descriptors\n", i);
@@ -975,7 +982,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
BUG_ON(!list_empty(&dwc->queue));
BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);

- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, dw->flags);
list_splice_init(&dwc->free_list, &list);
dwc->descs_allocated = 0;

@@ -984,7 +991,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
/* channel_clear_bit(dw, MASK.BLOCK, dwc->mask); */
channel_clear_bit(dw, MASK.ERROR, dwc->mask);

- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, dw->flags);

list_for_each_entry_safe(desc, _desc, &list, desc_node) {
dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
@@ -1086,6 +1093,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
enum dma_data_direction direction)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
struct dw_cyclic_desc *cdesc;
struct dw_cyclic_desc *retval = NULL;
struct dw_desc *desc;
@@ -1096,16 +1104,16 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
unsigned int periods;
unsigned int i;

- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, dw->flags);
if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, dw->flags);
dev_dbg(chan2dev(&dwc->chan),
"queue and/or active list are not empty\n");
return ERR_PTR(-EBUSY);
}

was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, dw->flags);
if (was_cyclic) {
dev_dbg(chan2dev(&dwc->chan),
"channel already prepared for cyclic DMA\n");
@@ -1225,7 +1233,7 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
if (!cdesc)
return;

- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, dw->flags);

channel_clear_bit(dw, CH_EN, dwc->mask);
while (dma_readl(dw, CH_EN) & dwc->mask)
@@ -1235,7 +1243,7 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
dma_writel(dw, CLEAR.ERROR, dwc->mask);
dma_writel(dw, CLEAR.XFER, dwc->mask);

- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, dw->flags);

for (i = 0; i < cdesc->periods; i++)
dwc_desc_put(dwc, cdesc->desc[i]);
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index 720f821..c89fd83 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -173,6 +173,7 @@ struct dw_dma {
void __iomem *regs;
struct tasklet_struct tasklet;
struct clk *clk;
+ unsigned long flags; /* for spin_lock_irqsave */

u8 all_chan_mask;

--
1.7.2.2


2011-04-18 10:50:53

by Viresh Kumar

[permalink] [raw]
Subject: [PATCH 5/6] dmaengine/dw_dmac: set residue as total len in dwc_tx_status if status is !DMA_SUCCESS

If transfer status is !=DMA_SUCCESS, return total transfer len as residue,
instead of zero.

Signed-off-by: Viresh Kumar <[email protected]>
---
drivers/dma/dw_dmac.c | 6 +++++-
1 files changed, 5 insertions(+), 1 deletions(-)

diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 9d8ed35..92f4cb0 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -870,7 +870,11 @@ dwc_tx_status(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used);
}

- dma_set_tx_state(txstate, last_complete, last_used, 0);
+ if (ret != DMA_SUCCESS)
+ dma_set_tx_state(txstate, last_complete, last_used,
+ dwc_first_active(dwc)->len);
+ else
+ dma_set_tx_state(txstate, last_complete, last_used, 0);

return ret;
}
--
1.7.2.2

2011-04-18 10:51:11

by Viresh Kumar

[permalink] [raw]
Subject: [PATCH 3/6] dmaengine/dw_dmac: call dwc_descriptor_complete from dwc_control with lock held

dwc_descriptor_complete must always be called with channel lock held. This patch
moves unlock code, in dwc_control(), untill after dwc_descriptor_complete is
called.

Signed-off-by: Viresh Kumar <[email protected]>
---
drivers/dma/dw_dmac.c | 4 ++--
1 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 69a9c9d..b49d7f3 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -830,12 +830,12 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
list_splice_init(&dwc->queue, &list);
list_splice_init(&dwc->active_list, &list);

- spin_unlock_irqrestore(&dwc->lock, dw->flags);
-
/* Flush all pending and queued descriptors */
list_for_each_entry_safe(desc, _desc, &list, desc_node)
dwc_descriptor_complete(dwc, desc);

+ spin_unlock_irqrestore(&dwc->lock, dw->flags);
+
return 0;
}

--
1.7.2.2

2011-04-18 10:51:33

by Viresh Kumar

[permalink] [raw]
Subject: [PATCH 4/6] dmaengine/dw_dmac: don't call callback routine in case dmaengine_terminate_all() is called

If dmaengine_terminate_all() is called for dma channel, then it doesn't make
much sense to call registered callback routine. While in case of success or
failure it must be called.

Signed-off-by: Viresh Kumar <[email protected]>
---
drivers/dma/dw_dmac.c | 31 ++++++++++++++++++-------------
1 files changed, 18 insertions(+), 13 deletions(-)

diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index b49d7f3..9d8ed35 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -198,10 +198,11 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
/*----------------------------------------------------------------------*/

static void
-dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
+dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
+ bool callback_required)
{
- dma_async_tx_callback callback;
- void *param;
+ dma_async_tx_callback callback = NULL;
+ void *param = NULL;
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
struct dma_async_tx_descriptor *txd = &desc->txd;
struct dw_desc *child;
@@ -209,8 +210,10 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);

dwc->completed = txd->cookie;
- callback = txd->callback;
- param = txd->callback_param;
+ if (callback_required) {
+ callback = txd->callback;
+ param = txd->callback_param;
+ }

dwc_sync_desc_for_cpu(dwc, desc);

@@ -242,10 +245,12 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
}
}

- spin_unlock_irqrestore(&dwc->lock, dw->flags);
- if (callback)
- callback(param);
- spin_lock_irqsave(&dwc->lock, dw->flags);
+ if (callback_required) {
+ spin_unlock_irqrestore(&dwc->lock, dw->flags);
+ if (callback)
+ callback(param);
+ spin_lock_irqsave(&dwc->lock, dw->flags);
+ }
}

static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
@@ -274,7 +279,7 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
}

list_for_each_entry_safe(desc, _desc, &list, desc_node)
- dwc_descriptor_complete(dwc, desc);
+ dwc_descriptor_complete(dwc, desc, 1);
}

static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
@@ -324,7 +329,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
* No descriptors so far seem to be in progress, i.e.
* this one must be done.
*/
- dwc_descriptor_complete(dwc, desc);
+ dwc_descriptor_complete(dwc, desc, 1);
}

dev_dbg(chan2dev(&dwc->chan),
@@ -386,7 +391,7 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
dwc_dump_lli(dwc, &child->lli);

/* Pretend the descriptor completed successfully */
- dwc_descriptor_complete(dwc, bad_desc);
+ dwc_descriptor_complete(dwc, bad_desc, 1);
}

/* --------------------- Cyclic DMA API extensions -------------------- */
@@ -832,7 +837,7 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,

/* Flush all pending and queued descriptors */
list_for_each_entry_safe(desc, _desc, &list, desc_node)
- dwc_descriptor_complete(dwc, desc);
+ dwc_descriptor_complete(dwc, desc, 0);

spin_unlock_irqrestore(&dwc->lock, dw->flags);

--
1.7.2.2

2011-04-18 10:51:24

by Viresh Kumar

[permalink] [raw]
Subject: [PATCH 6/6] dmaengine/dw_dmac: Divide one sg to many desc, if sg len is greater than DWC_MAX_COUNT

If len passed in sg for slave_sg transfers is greater than DWC_MAX_COUNT, then
driver programmes controller incorrectly. This patch adds code to handle this
situation by allocation more than one desc for same sg.

Signed-off-by: Viresh Kumar <[email protected]>
---
drivers/dma/dw_dmac.c | 65 +++++++++++++++++++++++++++++++++----------------
1 files changed, 44 insertions(+), 21 deletions(-)

diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 92f4cb0..2b38576 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -702,9 +702,15 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
reg = dws->tx_reg;
for_each_sg(sgl, sg, sg_len, i) {
struct dw_desc *desc;
- u32 len;
- u32 mem;
+ u32 len, dlen, mem;

+ mem = sg_phys(sg);
+ len = sg_dma_len(sg);
+ mem_width = 2;
+ if (unlikely(mem & 3 || len & 3))
+ mem_width = 0;
+
+slave_sg_todev_fill_desc:
desc = dwc_desc_get(dwc);
if (!desc) {
dev_err(chan2dev(chan),
@@ -712,16 +718,19 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
goto err_desc_get;
}

- mem = sg_phys(sg);
- len = sg_dma_len(sg);
- mem_width = 2;
- if (unlikely(mem & 3 || len & 3))
- mem_width = 0;
-
desc->lli.sar = mem;
desc->lli.dar = reg;
desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
- desc->lli.ctlhi = len >> mem_width;
+ if ((len >> mem_width) > DWC_MAX_COUNT) {
+ dlen = DWC_MAX_COUNT << mem_width;
+ mem += dlen;
+ len -= dlen;
+ } else {
+ dlen = len;
+ len = 0;
+ }
+
+ desc->lli.ctlhi = dlen >> mem_width;

if (!first) {
first = desc;
@@ -735,7 +744,10 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
&first->tx_list);
}
prev = desc;
- total_len += len;
+ total_len += dlen;
+
+ if (len)
+ goto slave_sg_todev_fill_desc;
}
break;
case DMA_FROM_DEVICE:
@@ -748,15 +760,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
reg = dws->rx_reg;
for_each_sg(sgl, sg, sg_len, i) {
struct dw_desc *desc;
- u32 len;
- u32 mem;
-
- desc = dwc_desc_get(dwc);
- if (!desc) {
- dev_err(chan2dev(chan),
- "not enough descriptors available\n");
- goto err_desc_get;
- }
+ u32 len, dlen, mem;

mem = sg_phys(sg);
len = sg_dma_len(sg);
@@ -764,10 +768,26 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (unlikely(mem & 3 || len & 3))
mem_width = 0;

+slave_sg_fromdev_fill_desc:
+ desc = dwc_desc_get(dwc);
+ if (!desc) {
+ dev_err(chan2dev(chan),
+ "not enough descriptors available\n");
+ goto err_desc_get;
+ }
+
desc->lli.sar = reg;
desc->lli.dar = mem;
desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
- desc->lli.ctlhi = len >> reg_width;
+ if ((len >> reg_width) > DWC_MAX_COUNT) {
+ dlen = DWC_MAX_COUNT << reg_width;
+ mem += dlen;
+ len -= dlen;
+ } else {
+ dlen = len;
+ len = 0;
+ }
+ desc->lli.ctlhi = dlen >> reg_width;

if (!first) {
first = desc;
@@ -781,7 +801,10 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
&first->tx_list);
}
prev = desc;
- total_len += len;
+ total_len += dlen;
+
+ if (len)
+ goto slave_sg_fromdev_fill_desc;
}
break;
default:
--
1.7.2.2

2011-04-18 10:51:42

by Viresh Kumar

[permalink] [raw]
Subject: [PATCH 2/6] dmaengine/dw_dmac: Enable resubmission from callback routine.

Resubmission of new transfer must be allowed from callbacks. For this release
lock before calling callback routine and enable them again.

Signed-off-by: Viresh Kumar <[email protected]>
---
drivers/dma/dw_dmac.c | 6 ++----
1 files changed, 2 insertions(+), 4 deletions(-)

diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 357265f..69a9c9d 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -242,12 +242,10 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
}
}

- /*
- * The API requires that no submissions are done from a
- * callback, so we don't need to drop the lock here
- */
+ spin_unlock_irqrestore(&dwc->lock, dw->flags);
if (callback)
callback(param);
+ spin_lock_irqsave(&dwc->lock, dw->flags);
}

static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
--
1.7.2.2

2011-04-19 06:13:19

by Vinod Koul

[permalink] [raw]
Subject: RE: [PATCH 2/6] dmaengine/dw_dmac: Enable resubmission from callback routine.

On 04/18/2011 04:20 PM, viresh kumar wrote:
>
> Resubmission of new transfer must be allowed from callbacks. For this release
> lock before calling callback routine and enable them again.
Why would you like to do that?

IMO it's not a good thing as you are essentially doing it in your tasklet...
Make sure the submit() is done by client before this and you use it to start
next transaction in you tasklet and let client know in callback txn is
finished.

~Vinod

2011-04-19 06:23:55

by Viresh Kumar

[permalink] [raw]
Subject: Re: [PATCH 2/6] dmaengine/dw_dmac: Enable resubmission from callback routine.

On 04/19/2011 11:42 AM, Koul, Vinod wrote:
> On 04/18/2011 04:20 PM, viresh kumar wrote:
>>
>> Resubmission of new transfer must be allowed from callbacks. For this release
>> lock before calling callback routine and enable them again.
> Why would you like to do that?
>
> IMO it's not a good thing as you are essentially doing it in your tasklet...
> Make sure the submit() is done by client before this and you use it to start
> next transaction in you tasklet and let client know in callback txn is
> finished.
>

Vinod,

I initiated this discussion few days back on a separate thread:

http://www.spinics.net/lists/arm-kernel/msg121139.html

And so implemented this.

--
viresh

2011-04-19 06:26:08

by Viresh Kumar

[permalink] [raw]
Subject: Re: [PATCH 1/6] dmaengine/dw_dmac: Replace spin_lock_bh with irqsave variants

On 04/18/2011 04:19 PM, Viresh KUMAR wrote:
> dmaengine routines can be called from interrupt context and with interrupts
> disabled. Whereas spin_unlock_bh can't be called from such contexts. So this
> patch converts all spin_*_bh routines to irqsave variants.
>
> Signed-off-by: Viresh Kumar <[email protected]>
> ---
> drivers/dma/dw_dmac.c | 56 +++++++++++++++++++++++++------------------
> drivers/dma/dw_dmac_regs.h | 1 +
> 2 files changed, 33 insertions(+), 24 deletions(-)
>
>
> diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
> index 720f821..c89fd83 100644
> --- a/drivers/dma/dw_dmac_regs.h
> +++ b/drivers/dma/dw_dmac_regs.h
> @@ -173,6 +173,7 @@ struct dw_dma {
> void __iomem *regs;
> struct tasklet_struct tasklet;
> struct clk *clk;
> + unsigned long flags; /* for spin_lock_irqsave */

Oops!!!
This must have been added in dw_dma_chan instead :(
Will resend it.

--
viresh

2011-04-19 06:36:09

by Vinod Koul

[permalink] [raw]
Subject: RE: [PATCH 2/6] dmaengine/dw_dmac: Enable resubmission from callback routine.

On 04/19/2011 11:53 AM, viresh kumar wrote:
> On 04/19/2011 11:42 AM, Koul, Vinod wrote:
> > On 04/18/2011 04:20 PM, viresh kumar wrote:
> Vinod,
>
> I initiated this discussion few days back on a separate thread:
>
> http://www.spinics.net/lists/arm-kernel/msg121139.html
>
> And so implemented this.
Ahhh, on the road, catching up on email...

This was a dma engine requirement, not absolute for slave-dma.
But would be good idea to resist this. What is your reason for this?

I am adding documentation for slave, hopefully will complete after my
vacation, should help to clarify these things

~Vinod

2011-04-19 06:47:17

by Viresh Kumar

[permalink] [raw]
Subject: Re: [PATCH 2/6] dmaengine/dw_dmac: Enable resubmission from callback routine.

On 04/19/2011 12:03 PM, Koul, Vinod wrote:
> On 04/19/2011 11:53 AM, viresh kumar wrote:
>> On 04/19/2011 11:42 AM, Koul, Vinod wrote:
>>> On 04/18/2011 04:20 PM, viresh kumar wrote:
>> Vinod,
>>
>> I initiated this discussion few days back on a separate thread:
>>
>> http://www.spinics.net/lists/arm-kernel/msg121139.html
>>
>> And so implemented this.
> Ahhh, on the road, catching up on email...
>
> This was a dma engine requirement, not absolute for slave-dma.
> But would be good idea to resist this. What is your reason for this?

Mainline drivers using dmaengine are already submitting new transfers from
callback routines. For example: amba-pl011.
And i have to use pl011 with DMA, so faced this issue in dw_dmac.

--
viresh

2011-04-26 20:31:01

by Russell King - ARM Linux

[permalink] [raw]
Subject: Re: [PATCH 1/6] dmaengine/dw_dmac: Replace spin_lock_bh with irqsave variants

On Mon, Apr 18, 2011 at 04:19:59PM +0530, Viresh Kumar wrote:
> static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
> {
> + struct dw_dma *dw = to_dw_dma(dwc->chan.device);
> struct dw_desc *desc, *_desc;
> struct dw_desc *ret = NULL;
> unsigned int i = 0;
>
> - spin_lock_bh(&dwc->lock);
> + spin_lock_irqsave(&dwc->lock, dw->flags);

It's a very bad idea to store the IRQ flags like this - it means that
another thread operating on this corrupts this threads interrupt flag
settings.

It should be a local variable.

2011-04-26 20:31:48

by Russell King - ARM Linux

[permalink] [raw]
Subject: Re: [PATCH 2/6] dmaengine/dw_dmac: Enable resubmission from callback routine.

On Tue, Apr 19, 2011 at 11:42:24AM +0530, Koul, Vinod wrote:
> On 04/18/2011 04:20 PM, viresh kumar wrote:
> >
> > Resubmission of new transfer must be allowed from callbacks. For this release
> > lock before calling callback routine and enable them again.
> Why would you like to do that?
>
> IMO it's not a good thing as you are essentially doing it in your tasklet...
> Make sure the submit() is done by client before this and you use it to start
> next transaction in you tasklet and let client know in callback txn is
> finished.

No. Submission is explicitly permitted (by Dan) in slave DMA callbacks.

2011-04-26 20:33:19

by Russell King - ARM Linux

[permalink] [raw]
Subject: Re: [PATCH 1/6] dmaengine/dw_dmac: Replace spin_lock_bh with irqsave variants

On Tue, Apr 19, 2011 at 11:55:45AM +0530, viresh kumar wrote:
> This must have been added in dw_dma_chan instead :(
> Will resend it.

Even that doesn't work. It has to be a local variable.

2011-04-27 03:47:38

by Viresh Kumar

[permalink] [raw]
Subject: Re: [PATCH 1/6] dmaengine/dw_dmac: Replace spin_lock_bh with irqsave variants

On 04/27/2011 02:03 AM, Russell King - ARM Linux wrote:
> On Tue, Apr 19, 2011 at 11:55:45AM +0530, viresh kumar wrote:
>> This must have been added in dw_dma_chan instead :(
>> Will resend it.
>
> Even that doesn't work. It has to be a local variable.

I assumed that flags wouldn't be updated if lock is already taken by some other thread.
In which case global flags would have worked fine.
But as i see now, that's incorrect. Flags is always changed, whether or not lock is taken.

Will resend it again.

--
viresh

2011-04-27 06:00:11

by Viresh Kumar

[permalink] [raw]
Subject: Re: [PATCH 1/6] dmaengine/dw_dmac: Replace spin_lock_bh with irqsave variants

On 04/27/2011 02:03 AM, Russell King - ARM Linux wrote:
> On Tue, Apr 19, 2011 at 11:55:45AM +0530, viresh kumar wrote:
>> This must have been added in dw_dma_chan instead :(
>> Will resend it.
>
> Even that doesn't work. It has to be a local variable.

Hello,

There are two kinds of locks taken in dw_dmac: spin_lock_bh and spin_lock.
spin_lock_bh is taken from routines called from external drivers and spin_lock
taken inside tasklet.
For reasons mentioned earlier, spin_lock_bh has to be replaced with spin_lock_irqsave.

Now, Is simple spin_lock() sufficient inside tasklet?? As dma API's can be called
from interrupt context (which will try to take spin_lock_irqsave).

I think spin_lock must be replaced with irqsave variants, even in tasklet.
Now if we do that, do we need this tasklet at all??

--
viresh

2011-04-27 08:03:44

by Russell King - ARM Linux

[permalink] [raw]
Subject: Re: [PATCH 1/6] dmaengine/dw_dmac: Replace spin_lock_bh with irqsave variants

On Wed, Apr 27, 2011 at 11:29:49AM +0530, viresh kumar wrote:
> There are two kinds of locks taken in dw_dmac: spin_lock_bh and spin_lock.
> spin_lock_bh is taken from routines called from external drivers and spin_lock
> taken inside tasklet.
> For reasons mentioned earlier, spin_lock_bh has to be replaced with spin_lock_irqsave.
>
> Now, Is simple spin_lock() sufficient inside tasklet?? As dma API's can be called
> from interrupt context (which will try to take spin_lock_irqsave).

Tasklets can be interrupted, so no.