2011-02-26 00:23:36

by Ira W. Snyder

[permalink] [raw]
Subject: [PATCH 0/8] fsldma: lockup fixes

Hello everyone,

I've been chasing random infrequent controller lockups in the fsldma driver
for a long time. I finally managed to find the problem and fix it. I'm not
quite sure about the exact sequence of events which causes the race
condition, but it is related to using the hardware registers to track the
controller state. See the patch changelogs for more detail.

The problems were quickly found by turning on DMAPOOL_DEBUG inside
mm/dmapool.c. This poisons memory allocated with the dmapool API.

With dmapool poisoning turned on, the dmatest driver would start producing
failures within a few seconds. After this patchset has been applied, I have
run several iterations of the 10 threads per channel, 100000 iterations per
thread test without any problems.

I have made some changes which effect the 85xx/86xx part. I believe that
the changes only effect features which have been unused since the rewrite
in Jan 2010. It would be very good to get a test report from an 85xx/86xx
user.

While making the previous changes, I noticed that the fsldma driver does
not respect the automatic DMA unmapping of src and dst buffers. I have
added support for this feature. This also required a fix to dmatest, which
was sending incorrect flags.

The "support async_tx dependencies" patch could be split apart from the
automatic unmapping patch if it is desirable. They both touch the same
piece of code, so I thought it was ok to combine them. Let me know.

I would really like to see this go into 2.6.39. I think we can get it
reviewed before then. :)

Ira W. Snyder (8):
fsldma: move related helper functions near each other
fsldma: use channel name in printk output
fsldma: improve link descriptor debugging
fsldma: minor codingstyle and consistency fixes
fsldma: fix controller lockups
fsldma: support async_tx dependencies and automatic unmapping
dmatest: fix automatic buffer unmap type
fsldma: reduce locking during descriptor cleanup

drivers/dma/dmatest.c | 7 +-
drivers/dma/fsldma.c | 485 +++++++++++++++++++++++++-----------------------
drivers/dma/fsldma.h | 6 +-
3 files changed, 263 insertions(+), 235 deletions(-)

--
1.7.3.4


2011-02-26 00:23:37

by Ira W. Snyder

[permalink] [raw]
Subject: [PATCH 1/8] fsldma: move related helper functions near each other

This is a purely cosmetic cleanup. It is nice to have related functions
right next to each other in the code.

Signed-off-by: Ira W. Snyder <[email protected]>
---
drivers/dma/fsldma.c | 116 +++++++++++++++++++++++++++----------------------
1 files changed, 64 insertions(+), 52 deletions(-)

diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 4de947a..2e1af45 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -39,33 +39,9 @@

static const char msg_ld_oom[] = "No free memory for link descriptor\n";

-static void dma_init(struct fsldma_chan *chan)
-{
- /* Reset the channel */
- DMA_OUT(chan, &chan->regs->mr, 0, 32);
-
- switch (chan->feature & FSL_DMA_IP_MASK) {
- case FSL_DMA_IP_85XX:
- /* Set the channel to below modes:
- * EIE - Error interrupt enable
- * EOSIE - End of segments interrupt enable (basic mode)
- * EOLNIE - End of links interrupt enable
- * BWC - Bandwidth sharing among channels
- */
- DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC
- | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE
- | FSL_DMA_MR_EOSIE, 32);
- break;
- case FSL_DMA_IP_83XX:
- /* Set the channel to below modes:
- * EOTIE - End-of-transfer interrupt enable
- * PRC_RM - PCI read multiple
- */
- DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE
- | FSL_DMA_MR_PRC_RM, 32);
- break;
- }
-}
+/*
+ * Register Helpers
+ */

static void set_sr(struct fsldma_chan *chan, u32 val)
{
@@ -77,6 +53,30 @@ static u32 get_sr(struct fsldma_chan *chan)
return DMA_IN(chan, &chan->regs->sr, 32);
}

+static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
+{
+ DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
+}
+
+static dma_addr_t get_cdar(struct fsldma_chan *chan)
+{
+ return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
+}
+
+static dma_addr_t get_ndar(struct fsldma_chan *chan)
+{
+ return DMA_IN(chan, &chan->regs->ndar, 64);
+}
+
+static u32 get_bcr(struct fsldma_chan *chan)
+{
+ return DMA_IN(chan, &chan->regs->bcr, 32);
+}
+
+/*
+ * Descriptor Helpers
+ */
+
static void set_desc_cnt(struct fsldma_chan *chan,
struct fsl_dma_ld_hw *hw, u32 count)
{
@@ -113,24 +113,49 @@ static void set_desc_next(struct fsldma_chan *chan,
hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64);
}

-static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
+static void set_ld_eol(struct fsldma_chan *chan,
+ struct fsl_desc_sw *desc)
{
- DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
-}
+ u64 snoop_bits;

-static dma_addr_t get_cdar(struct fsldma_chan *chan)
-{
- return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
-}
+ snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
+ ? FSL_DMA_SNEN : 0;

-static dma_addr_t get_ndar(struct fsldma_chan *chan)
-{
- return DMA_IN(chan, &chan->regs->ndar, 64);
+ desc->hw.next_ln_addr = CPU_TO_DMA(chan,
+ DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
+ | snoop_bits, 64);
}

-static u32 get_bcr(struct fsldma_chan *chan)
+/*
+ * DMA Engine Hardware Control Helpers
+ */
+
+static void dma_init(struct fsldma_chan *chan)
{
- return DMA_IN(chan, &chan->regs->bcr, 32);
+ /* Reset the channel */
+ DMA_OUT(chan, &chan->regs->mr, 0, 32);
+
+ switch (chan->feature & FSL_DMA_IP_MASK) {
+ case FSL_DMA_IP_85XX:
+ /* Set the channel to below modes:
+ * EIE - Error interrupt enable
+ * EOSIE - End of segments interrupt enable (basic mode)
+ * EOLNIE - End of links interrupt enable
+ * BWC - Bandwidth sharing among channels
+ */
+ DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC
+ | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE
+ | FSL_DMA_MR_EOSIE, 32);
+ break;
+ case FSL_DMA_IP_83XX:
+ /* Set the channel to below modes:
+ * EOTIE - End-of-transfer interrupt enable
+ * PRC_RM - PCI read multiple
+ */
+ DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE
+ | FSL_DMA_MR_PRC_RM, 32);
+ break;
+ }
}

static int dma_is_idle(struct fsldma_chan *chan)
@@ -185,19 +210,6 @@ static void dma_halt(struct fsldma_chan *chan)
dev_err(chan->dev, "DMA halt timeout!\n");
}

-static void set_ld_eol(struct fsldma_chan *chan,
- struct fsl_desc_sw *desc)
-{
- u64 snoop_bits;
-
- snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
- ? FSL_DMA_SNEN : 0;
-
- desc->hw.next_ln_addr = CPU_TO_DMA(chan,
- DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
- | snoop_bits, 64);
-}
-
/**
* fsl_chan_set_src_loop_size - Set source address hold transfer size
* @chan : Freescale DMA channel
--
1.7.3.4

2011-02-26 00:23:55

by Ira W. Snyder

[permalink] [raw]
Subject: [PATCH 6/8] fsldma: support async_tx dependencies and automatic unmapping

Previous to this patch, the dma_run_dependencies() function has been
called while holding desc_lock. This function can call tx_submit() for
other descriptors, which may try to re-grab the lock. Avoid this by
moving the descriptors to be cleaned up to a temporary list, and
dropping the lock before cleanup.

At the same time, add support for automatic unmapping of src and dst
buffers, as offered by the DMAEngine API.

Signed-off-by: Ira W. Snyder <[email protected]>
---
drivers/dma/fsldma.c | 132 ++++++++++++++++++++++++++++++++++++--------------
1 files changed, 95 insertions(+), 37 deletions(-)

diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index d3c5100..4014790 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -78,6 +78,11 @@ static void set_desc_cnt(struct fsldma_chan *chan,
hw->count = CPU_TO_DMA(chan, count, 32);
}

+static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
+{
+ return DMA_TO_CPU(chan, desc->hw.count, 32);
+}
+
static void set_desc_src(struct fsldma_chan *chan,
struct fsl_dma_ld_hw *hw, dma_addr_t src)
{
@@ -88,6 +93,16 @@ static void set_desc_src(struct fsldma_chan *chan,
hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
}

+static dma_addr_t get_desc_src(struct fsldma_chan *chan,
+ struct fsl_desc_sw *desc)
+{
+ u64 snoop_bits;
+
+ snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
+ ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
+ return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits;
+}
+
static void set_desc_dst(struct fsldma_chan *chan,
struct fsl_dma_ld_hw *hw, dma_addr_t dst)
{
@@ -98,6 +113,16 @@ static void set_desc_dst(struct fsldma_chan *chan,
hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
}

+static dma_addr_t get_desc_dst(struct fsldma_chan *chan,
+ struct fsl_desc_sw *desc)
+{
+ u64 snoop_bits;
+
+ snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
+ ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
+ return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits;
+}
+
static void set_desc_next(struct fsldma_chan *chan,
struct fsl_dma_ld_hw *hw, dma_addr_t next)
{
@@ -796,6 +821,57 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
}

/**
+ * fsldma_cleanup_descriptor - cleanup and free a single link descriptor
+ * @chan: Freescale DMA channel
+ * @desc: descriptor to cleanup and free
+ *
+ * This function is used on a descriptor which has been executed by the DMA
+ * controller. It will run any callbacks, submit any dependencies, and then
+ * free the descriptor.
+ */
+static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
+ struct fsl_desc_sw *desc)
+{
+ struct dma_async_tx_descriptor *txd = &desc->async_tx;
+ struct device *dev = chan->common.device->dev;
+ dma_addr_t src = get_desc_src(chan, desc);
+ dma_addr_t dst = get_desc_dst(chan, desc);
+ u32 len = get_desc_cnt(chan, desc);
+
+ /* Run the link descriptor callback function */
+ if (txd->callback) {
+#ifdef FSL_DMA_LD_DEBUG
+ dev_dbg(chan->dev, "%s: LD %p callback\n", chan->name, desc);
+#endif
+ txd->callback(txd->callback_param);
+ }
+
+ /* Run any dependencies */
+ dma_run_dependencies(txd);
+
+ /* Unmap the dst buffer, if requested */
+ if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+ if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
+ dma_unmap_single(dev, dst, len, DMA_FROM_DEVICE);
+ else
+ dma_unmap_page(dev, dst, len, DMA_FROM_DEVICE);
+ }
+
+ /* Unmap the src buffer, if requested */
+ if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+ if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
+ dma_unmap_single(dev, src, len, DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dev, src, len, DMA_TO_DEVICE);
+ }
+
+#ifdef FSL_DMA_LD_DEBUG
+ dev_dbg(chan->dev, "%s: LD %p free\n", chan->name, desc);
+#endif
+ dma_pool_free(chan->desc_pool, desc, txd->phys);
+}
+
+/**
* fsl_chan_ld_cleanup - Clean up link descriptors
* @chan : Freescale DMA channel
*
@@ -809,57 +885,39 @@ static void fsl_chan_ld_cleanup(struct fsldma_chan *chan)
{
struct fsl_desc_sw *desc, *_desc;
const char *name = chan->name;
+ LIST_HEAD(ld_cleanup);
unsigned long flags;

spin_lock_irqsave(&chan->desc_lock, flags);

- /* if the ld_running list is empty, there is nothing to do */
- if (list_empty(&chan->ld_running)) {
- dev_dbg(chan->dev, "%s: no descriptors to cleanup\n", name);
- goto out_unlock;
+ /* update the cookie if we have some descriptors to cleanup */
+ if (!list_empty(&chan->ld_running)) {
+ dma_cookie_t cookie;
+
+ desc = to_fsl_desc(chan->ld_running.prev);
+ cookie = desc->async_tx.cookie;
+
+ chan->completed_cookie = cookie;
+ dev_dbg(chan->dev, "%s: completed_cookie=%d\n", name, cookie);
}

/*
- * Get the last descriptor, update the cookie to it
- *
- * This is done before callbacks run so that clients can check the
- * status of their DMA transfer inside the callback.
+ * move the descriptors to a temporary list so we can drop the lock
+ * during the entire cleanup operation
*/
- desc = to_fsl_desc(chan->ld_running.prev);
- chan->completed_cookie = desc->async_tx.cookie;
- dev_dbg(chan->dev, "%s: completed_cookie = %d\n",
- name, chan->completed_cookie);
+ list_splice_tail_init(&chan->ld_running, &ld_cleanup);
+
+ spin_unlock_irqrestore(&chan->desc_lock, flags);

/* Run the callback for each descriptor, in order */
- list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) {
- dma_async_tx_callback callback;
- void *callback_param;
+ list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) {

- /* Remove from the list of running transactions */
+ /* Remove from the list of transactions */
list_del(&desc->node);

- /* Run the link descriptor callback function */
- callback = desc->async_tx.callback;
- callback_param = desc->async_tx.callback_param;
- if (callback) {
- spin_unlock_irqrestore(&chan->desc_lock, flags);
-#ifdef FSL_DMA_LD_DEBUG
- dev_dbg(chan->dev, "%s: LD %p callback\n", name, desc);
-#endif
- callback(callback_param);
- spin_lock_irqsave(&chan->desc_lock, flags);
- }
-
- /* Run any dependencies, then free the descriptor */
- dma_run_dependencies(&desc->async_tx);
-#ifdef FSL_DMA_LD_DEBUG
- dev_dbg(chan->dev, "%s: LD %p free\n", name, desc);
-#endif
- dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
+ /* Run all cleanup for this descriptor */
+ fsldma_cleanup_descriptor(chan, desc);
}
-
-out_unlock:
- spin_unlock_irqrestore(&chan->desc_lock, flags);
}

/**
--
1.7.3.4

2011-02-26 00:23:54

by Ira W. Snyder

[permalink] [raw]
Subject: [PATCH 5/8] fsldma: fix controller lockups

Enabling poisoning in the dmapool API quickly showed that the DMA
controller was fetching descriptors that should not have been in use.
This has caused intermittent controller lockups during testing.

I have been unable to figure out the exact set of conditions which cause
this to happen. However, I believe it is related to the driver using the
hardware registers to track whether the controller is busy or not. The
code can incorrectly decide that the hardware is idle due to lag between
register writes and the hardware actually becoming busy.

To fix this, the driver has been reworked to explicitly track the state
of the hardware, rather than try to guess what it is doing based on the
register values.

This has passed dmatest with 10 threads per channel, 100000 iterations
per thread several times without error. Previously, this would fail
within a few seconds.

Signed-off-by: Ira W. Snyder <[email protected]>
---
drivers/dma/fsldma.c | 187 +++++++++++++++++++-------------------------------
drivers/dma/fsldma.h | 1 +
2 files changed, 72 insertions(+), 116 deletions(-)

diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 06421c0..d3c5100 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -63,11 +63,6 @@ static dma_addr_t get_cdar(struct fsldma_chan *chan)
return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
}

-static dma_addr_t get_ndar(struct fsldma_chan *chan)
-{
- return DMA_IN(chan, &chan->regs->ndar, 64);
-}
-
static u32 get_bcr(struct fsldma_chan *chan)
{
return DMA_IN(chan, &chan->regs->bcr, 32);
@@ -138,13 +133,11 @@ static void dma_init(struct fsldma_chan *chan)
case FSL_DMA_IP_85XX:
/* Set the channel to below modes:
* EIE - Error interrupt enable
- * EOSIE - End of segments interrupt enable (basic mode)
* EOLNIE - End of links interrupt enable
* BWC - Bandwidth sharing among channels
*/
DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC
- | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE
- | FSL_DMA_MR_EOSIE, 32);
+ | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE, 32);
break;
case FSL_DMA_IP_83XX:
/* Set the channel to below modes:
@@ -757,14 +750,15 @@ static int fsl_dma_device_control(struct dma_chan *dchan,

switch (cmd) {
case DMA_TERMINATE_ALL:
+ spin_lock_irqsave(&chan->desc_lock, flags);
+
/* Halt the DMA engine */
dma_halt(chan);

- spin_lock_irqsave(&chan->desc_lock, flags);
-
/* Remove and free all of the descriptors in the LD queue */
fsldma_free_desc_list(chan, &chan->ld_pending);
fsldma_free_desc_list(chan, &chan->ld_running);
+ chan->idle = true;

spin_unlock_irqrestore(&chan->desc_lock, flags);
return 0;
@@ -802,78 +796,45 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
}

/**
- * fsl_dma_update_completed_cookie - Update the completed cookie.
+ * fsl_chan_ld_cleanup - Clean up link descriptors
* @chan : Freescale DMA channel
*
- * CONTEXT: hardirq
+ * This function is run after the queue of running descriptors has been
+ * executed by the DMA engine. It will run any callbacks, and then free
+ * the descriptors.
+ *
+ * HARDWARE STATE: idle
*/
-static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan)
+static void fsl_chan_ld_cleanup(struct fsldma_chan *chan)
{
- struct fsl_desc_sw *desc;
+ struct fsl_desc_sw *desc, *_desc;
+ const char *name = chan->name;
unsigned long flags;
- dma_cookie_t cookie;

spin_lock_irqsave(&chan->desc_lock, flags);

+ /* if the ld_running list is empty, there is nothing to do */
if (list_empty(&chan->ld_running)) {
- dev_dbg(chan->dev, "%s: no running descriptors\n", chan->name);
+ dev_dbg(chan->dev, "%s: no descriptors to cleanup\n", name);
goto out_unlock;
}

- /* Get the last descriptor, update the cookie to that */
+ /*
+ * Get the last descriptor, update the cookie to it
+ *
+ * This is done before callbacks run so that clients can check the
+ * status of their DMA transfer inside the callback.
+ */
desc = to_fsl_desc(chan->ld_running.prev);
- if (dma_is_idle(chan))
- cookie = desc->async_tx.cookie;
- else {
- cookie = desc->async_tx.cookie - 1;
- if (unlikely(cookie < DMA_MIN_COOKIE))
- cookie = DMA_MAX_COOKIE;
- }
-
- chan->completed_cookie = cookie;
-
-out_unlock:
- spin_unlock_irqrestore(&chan->desc_lock, flags);
-}
-
-/**
- * fsldma_desc_status - Check the status of a descriptor
- * @chan: Freescale DMA channel
- * @desc: DMA SW descriptor
- *
- * This function will return the status of the given descriptor
- */
-static enum dma_status fsldma_desc_status(struct fsldma_chan *chan,
- struct fsl_desc_sw *desc)
-{
- return dma_async_is_complete(desc->async_tx.cookie,
- chan->completed_cookie,
- chan->common.cookie);
-}
-
-/**
- * fsl_chan_ld_cleanup - Clean up link descriptors
- * @chan : Freescale DMA channel
- *
- * This function clean up the ld_queue of DMA channel.
- */
-static void fsl_chan_ld_cleanup(struct fsldma_chan *chan)
-{
- struct fsl_desc_sw *desc, *_desc;
- const char *name = chan->name;
- unsigned long flags;
-
- spin_lock_irqsave(&chan->desc_lock, flags);
-
- dev_dbg(chan->dev, "%s: chan completed_cookie = %d\n",
+ chan->completed_cookie = desc->async_tx.cookie;
+ dev_dbg(chan->dev, "%s: completed_cookie = %d\n",
name, chan->completed_cookie);
+
+ /* Run the callback for each descriptor, in order */
list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) {
dma_async_tx_callback callback;
void *callback_param;

- if (fsldma_desc_status(chan, desc) == DMA_IN_PROGRESS)
- break;
-
/* Remove from the list of running transactions */
list_del(&desc->node);

@@ -897,6 +858,7 @@ static void fsl_chan_ld_cleanup(struct fsldma_chan *chan)
dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
}

+out_unlock:
spin_unlock_irqrestore(&chan->desc_lock, flags);
}

@@ -904,10 +866,7 @@ static void fsl_chan_ld_cleanup(struct fsldma_chan *chan)
* fsl_chan_xfer_ld_queue - transfer any pending transactions
* @chan : Freescale DMA channel
*
- * This will make sure that any pending transactions will be run.
- * If the DMA controller is idle, it will be started. Otherwise,
- * the DMA controller's interrupt handler will start any pending
- * transactions when it becomes idle.
+ * HARDWARE STATE: idle
*/
static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
{
@@ -927,23 +886,16 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
}

/*
- * The DMA controller is not idle, which means the interrupt
- * handler will start any queued transactions when it runs
- * at the end of the current transaction
+ * The DMA controller is not idle, which means that the interrupt
+ * handler will start any queued transactions when it runs after
+ * this transaction finishes
*/
- if (!dma_is_idle(chan)) {
+ if (!chan->idle) {
dev_dbg(chan->dev, "%s: DMA controller still busy\n", name);
goto out_unlock;
}

/*
- * TODO:
- * make sure the dma_halt() function really un-wedges the
- * controller as much as possible
- */
- dma_halt(chan);
-
- /*
* If there are some link descriptors which have not been
* transferred, we need to start the controller
*/
@@ -952,6 +904,7 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
* Move all elements from the queue of pending transactions
* onto the list of running transactions
*/
+ dev_dbg(chan->dev, "%s: idle, starting controller\n", name);
desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
list_splice_tail_init(&chan->ld_pending, &chan->ld_running);

@@ -960,7 +913,10 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
* then start the DMA transaction
*/
set_cdar(chan, desc->async_tx.phys);
+ get_cdar(chan);
+
dma_start(chan);
+ chan->idle = false;

out_unlock:
spin_unlock_irqrestore(&chan->desc_lock, flags);
@@ -985,16 +941,18 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan,
struct dma_tx_state *txstate)
{
struct fsldma_chan *chan = to_fsl_chan(dchan);
- dma_cookie_t last_used;
dma_cookie_t last_complete;
+ dma_cookie_t last_used;
+ unsigned long flags;

- fsl_chan_ld_cleanup(chan);
+ spin_lock_irqsave(&chan->desc_lock, flags);

- last_used = dchan->cookie;
last_complete = chan->completed_cookie;
+ last_used = dchan->cookie;

- dma_set_tx_state(txstate, last_complete, last_used, 0);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);

+ dma_set_tx_state(txstate, last_complete, last_used, 0);
return dma_async_is_complete(cookie, last_complete, last_used);
}

@@ -1006,8 +964,6 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
{
struct fsldma_chan *chan = data;
const char *name = chan->name;
- int update_cookie = 0;
- int xfer_ld_q = 0;
u32 stat;

/* save and clear the status register */
@@ -1015,6 +971,7 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
set_sr(chan, stat);
dev_dbg(chan->dev, "%s: irq: stat = 0x%x\n", name, stat);

+ /* check that this was really our device */
stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
if (!stat)
return IRQ_NONE;
@@ -1029,29 +986,9 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
*/
if (stat & FSL_DMA_SR_PE) {
dev_dbg(chan->dev, "%s: irq: Programming Error INT\n", name);
- if (get_bcr(chan) == 0) {
- /* BCR register is 0, this is a DMA_INTERRUPT async_tx.
- * Now, update the completed cookie, and continue the
- * next uncompleted transfer.
- */
- update_cookie = 1;
- xfer_ld_q = 1;
- }
stat &= ~FSL_DMA_SR_PE;
- }
-
- /*
- * If the link descriptor segment transfer finishes,
- * we will recycle the used descriptor.
- */
- if (stat & FSL_DMA_SR_EOSI) {
- dev_dbg(chan->dev, "%s: irq: End-of-segments INT\n", name);
- dev_dbg(chan->dev, "%s: irq: clndar 0x%llx, nlndar 0x%llx\n",
- name,
- (unsigned long long)get_cdar(chan),
- (unsigned long long)get_ndar(chan));
- stat &= ~FSL_DMA_SR_EOSI;
- update_cookie = 1;
+ if (get_bcr(chan) != 0)
+ dev_err(chan->dev, "%s: Programming Error!\n", name);
}

/*
@@ -1061,8 +998,6 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
if (stat & FSL_DMA_SR_EOCDI) {
dev_dbg(chan->dev, "%s: irq: End-of-Chain link INT\n", name);
stat &= ~FSL_DMA_SR_EOCDI;
- update_cookie = 1;
- xfer_ld_q = 1;
}

/*
@@ -1073,25 +1008,44 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
if (stat & FSL_DMA_SR_EOLNI) {
dev_dbg(chan->dev, "%s: irq: End-of-link INT\n", name);
stat &= ~FSL_DMA_SR_EOLNI;
- xfer_ld_q = 1;
}

- if (update_cookie)
- fsl_dma_update_completed_cookie(chan);
- if (xfer_ld_q)
- fsl_chan_xfer_ld_queue(chan);
+ /* check that the DMA controller is really idle */
+ if (!dma_is_idle(chan))
+ dev_err(chan->dev, "%s: irq: controller not idle!\n", name);
+
+ /* check that we handled all of the bits */
if (stat)
- dev_dbg(chan->dev, "%s: irq: unhandled sr 0x%02x\n", name, stat);
+ dev_err(chan->dev, "%s: irq: unhandled sr 0x%02x\n", name, stat);

- dev_dbg(chan->dev, "%s: irq: Exit\n", name);
+ /*
+ * Schedule the tasklet to handle all cleanup of the current
+ * transaction. It will start a new transaction if there is
+ * one pending.
+ */
tasklet_schedule(&chan->tasklet);
+ dev_dbg(chan->dev, "%s: irq: Exit\n", name);
return IRQ_HANDLED;
}

static void dma_do_tasklet(unsigned long data)
{
struct fsldma_chan *chan = (struct fsldma_chan *)data;
+ unsigned long flags;
+
+ dev_dbg(chan->dev, "%s: tasklet entry\n", chan->name);
+
+ /* run all callbacks, free all used descriptors */
fsl_chan_ld_cleanup(chan);
+
+ /* the channel is now idle */
+ spin_lock_irqsave(&chan->desc_lock, flags);
+ chan->idle = true;
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+ /* start any pending transactions automatically */
+ fsl_chan_xfer_ld_queue(chan);
+ dev_dbg(chan->dev, "%s: tasklet exit\n", chan->name);
}

static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
@@ -1274,6 +1228,7 @@ static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
spin_lock_init(&chan->desc_lock);
INIT_LIST_HEAD(&chan->ld_pending);
INIT_LIST_HEAD(&chan->ld_running);
+ chan->idle = true;

chan->common.device = &fdev->common;

diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index 49189da..9cb5aa5 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -148,6 +148,7 @@ struct fsldma_chan {
int id; /* Raw id of this channel */
struct tasklet_struct tasklet;
u32 feature;
+ bool idle; /* DMA controller is idle */

void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable);
void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable);
--
1.7.3.4

2011-02-26 00:23:52

by Ira W. Snyder

[permalink] [raw]
Subject: [PATCH 8/8] fsldma: reduce locking during descriptor cleanup

This merges the fsl_chan_ld_cleanup() function into the dma_do_tasklet()
function to reduce locking overhead. In the best case, we will be able
to keep the DMA controller busy while we are freeing used descriptors.
In all cases, the spinlock is grabbed two times fewer than before on
each transaction.

Signed-off-by: Ira W. Snyder <[email protected]>
---
drivers/dma/fsldma.c | 114 +++++++++++++++++++++----------------------------
1 files changed, 49 insertions(+), 65 deletions(-)

diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 4014790..3dc27a9 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -872,67 +872,16 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
}

/**
- * fsl_chan_ld_cleanup - Clean up link descriptors
- * @chan : Freescale DMA channel
- *
- * This function is run after the queue of running descriptors has been
- * executed by the DMA engine. It will run any callbacks, and then free
- * the descriptors.
- *
- * HARDWARE STATE: idle
- */
-static void fsl_chan_ld_cleanup(struct fsldma_chan *chan)
-{
- struct fsl_desc_sw *desc, *_desc;
- const char *name = chan->name;
- LIST_HEAD(ld_cleanup);
- unsigned long flags;
-
- spin_lock_irqsave(&chan->desc_lock, flags);
-
- /* update the cookie if we have some descriptors to cleanup */
- if (!list_empty(&chan->ld_running)) {
- dma_cookie_t cookie;
-
- desc = to_fsl_desc(chan->ld_running.prev);
- cookie = desc->async_tx.cookie;
-
- chan->completed_cookie = cookie;
- dev_dbg(chan->dev, "%s: completed_cookie=%d\n", name, cookie);
- }
-
- /*
- * move the descriptors to a temporary list so we can drop the lock
- * during the entire cleanup operation
- */
- list_splice_tail_init(&chan->ld_running, &ld_cleanup);
-
- spin_unlock_irqrestore(&chan->desc_lock, flags);
-
- /* Run the callback for each descriptor, in order */
- list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) {
-
- /* Remove from the list of transactions */
- list_del(&desc->node);
-
- /* Run all cleanup for this descriptor */
- fsldma_cleanup_descriptor(chan, desc);
- }
-}
-
-/**
* fsl_chan_xfer_ld_queue - transfer any pending transactions
* @chan : Freescale DMA channel
*
* HARDWARE STATE: idle
+ * LOCKING: must hold chan->desc_lock
*/
static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
{
const char *name = chan->name;
struct fsl_desc_sw *desc;
- unsigned long flags;
-
- spin_lock_irqsave(&chan->desc_lock, flags);

/*
* If the list of pending descriptors is empty, then we
@@ -940,7 +889,7 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
*/
if (list_empty(&chan->ld_pending)) {
dev_dbg(chan->dev, "%s: no pending LDs\n", name);
- goto out_unlock;
+ return;
}

/*
@@ -950,7 +899,7 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
*/
if (!chan->idle) {
dev_dbg(chan->dev, "%s: DMA controller still busy\n", name);
- goto out_unlock;
+ return;
}

/*
@@ -975,9 +924,6 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)

dma_start(chan);
chan->idle = false;
-
-out_unlock:
- spin_unlock_irqrestore(&chan->desc_lock, flags);
}

/**
@@ -987,7 +933,11 @@ out_unlock:
static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
{
struct fsldma_chan *chan = to_fsl_chan(dchan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->desc_lock, flags);
fsl_chan_xfer_ld_queue(chan);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
}

/**
@@ -1089,21 +1039,55 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
static void dma_do_tasklet(unsigned long data)
{
struct fsldma_chan *chan = (struct fsldma_chan *)data;
+ struct fsl_desc_sw *desc, *_desc;
+ const char *name = chan->name;
+ LIST_HEAD(ld_cleanup);
unsigned long flags;

- dev_dbg(chan->dev, "%s: tasklet entry\n", chan->name);
+ dev_dbg(chan->dev, "%s: tasklet entry\n", name);

- /* run all callbacks, free all used descriptors */
- fsl_chan_ld_cleanup(chan);
-
- /* the channel is now idle */
spin_lock_irqsave(&chan->desc_lock, flags);
+
+ /* update the cookie if we have some descriptors to cleanup */
+ if (!list_empty(&chan->ld_running)) {
+ dma_cookie_t cookie;
+
+ desc = to_fsl_desc(chan->ld_running.prev);
+ cookie = desc->async_tx.cookie;
+
+ chan->completed_cookie = cookie;
+ dev_dbg(chan->dev, "%s: completed_cookie=%d\n", name, cookie);
+ }
+
+ /*
+ * move the descriptors to a temporary list so we can drop the lock
+ * during the entire cleanup operation
+ */
+ list_splice_tail_init(&chan->ld_running, &ld_cleanup);
+
+ /* the hardware is now idle and ready for more */
chan->idle = true;
- spin_unlock_irqrestore(&chan->desc_lock, flags);

- /* start any pending transactions automatically */
+ /*
+ * Start any pending transactions automatically
+ *
+ * In the ideal case, we keep the DMA controller busy while we go
+ * ahead and free the descriptors below.
+ */
fsl_chan_xfer_ld_queue(chan);
- dev_dbg(chan->dev, "%s: tasklet exit\n", chan->name);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+ /* Run the callback for each descriptor, in order */
+ list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) {
+
+ /* Remove from the list of transactions */
+ list_del(&desc->node);
+
+ /* Run all cleanup for this descriptor */
+ fsldma_cleanup_descriptor(chan, desc);
+ }
+
+ dev_dbg(chan->dev, "%s: tasklet exit\n", name);
}

static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
--
1.7.3.4

2011-02-26 00:23:51

by Ira W. Snyder

[permalink] [raw]
Subject: [PATCH 7/8] dmatest: fix automatic buffer unmap type

The dmatest code relies on the DMAEngine API to automatically call
dma_unmap_single() on src buffers. The flags it passes are incorrect,
fix them.

Signed-off-by: Ira W. Snyder <[email protected]>
---
drivers/dma/dmatest.c | 7 ++++++-
1 files changed, 6 insertions(+), 1 deletions(-)

diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 5589358..7e1b0aa 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -285,7 +285,12 @@ static int dmatest_func(void *data)

set_user_nice(current, 10);

- flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP | DMA_PREP_INTERRUPT;
+ /*
+ * src buffers are freed by the DMAEngine code with dma_unmap_single()
+ * dst buffers are freed by ourselves below
+ */
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT
+ | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE;

while (!kthread_should_stop()
&& !(iterations && total_tests >= iterations)) {
--
1.7.3.4

2011-02-26 00:24:43

by Ira W. Snyder

[permalink] [raw]
Subject: [PATCH 4/8] fsldma: minor codingstyle and consistency fixes

This fixes some minor violations of the coding style. It also changes
the style of the device_prep_dma_*() function definitions so they are
identical.

Signed-off-by: Ira W. Snyder <[email protected]>
---
drivers/dma/fsldma.c | 29 +++++++++++++----------------
drivers/dma/fsldma.h | 4 ++--
2 files changed, 15 insertions(+), 18 deletions(-)

diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 851993c..06421c0 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -84,7 +84,7 @@ static void set_desc_cnt(struct fsldma_chan *chan,
}

static void set_desc_src(struct fsldma_chan *chan,
- struct fsl_dma_ld_hw *hw, dma_addr_t src)
+ struct fsl_dma_ld_hw *hw, dma_addr_t src)
{
u64 snoop_bits;

@@ -94,7 +94,7 @@ static void set_desc_src(struct fsldma_chan *chan,
}

static void set_desc_dst(struct fsldma_chan *chan,
- struct fsl_dma_ld_hw *hw, dma_addr_t dst)
+ struct fsl_dma_ld_hw *hw, dma_addr_t dst)
{
u64 snoop_bits;

@@ -104,7 +104,7 @@ static void set_desc_dst(struct fsldma_chan *chan,
}

static void set_desc_next(struct fsldma_chan *chan,
- struct fsl_dma_ld_hw *hw, dma_addr_t next)
+ struct fsl_dma_ld_hw *hw, dma_addr_t next)
{
u64 snoop_bits;

@@ -113,8 +113,7 @@ static void set_desc_next(struct fsldma_chan *chan,
hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64);
}

-static void set_ld_eol(struct fsldma_chan *chan,
- struct fsl_desc_sw *desc)
+static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
{
u64 snoop_bits;

@@ -333,8 +332,7 @@ static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable)
chan->feature &= ~FSL_DMA_CHAN_START_EXT;
}

-static void append_ld_queue(struct fsldma_chan *chan,
- struct fsl_desc_sw *desc)
+static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
{
struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev);

@@ -375,8 +373,8 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
cookie = chan->common.cookie;
list_for_each_entry(child, &desc->tx_list, node) {
cookie++;
- if (cookie < 0)
- cookie = 1;
+ if (cookie < DMA_MIN_COOKIE)
+ cookie = DMA_MIN_COOKIE;

child->async_tx.cookie = cookie;
}
@@ -397,8 +395,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
*
* Return - The descriptor allocated. NULL for failed.
*/
-static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
- struct fsldma_chan *chan)
+static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan)
{
const char *name = chan->name;
struct fsl_desc_sw *desc;
@@ -423,7 +420,6 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
return desc;
}

-
/**
* fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
* @chan : Freescale DMA channel
@@ -534,14 +530,15 @@ fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
/* Insert the link descriptor to the LD ring */
list_add_tail(&new->node, &new->tx_list);

- /* Set End-of-link to the last link descriptor of new list*/
+ /* Set End-of-link to the last link descriptor of new list */
set_ld_eol(chan, new);

return &new->async_tx;
}

-static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
- struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src,
+static struct dma_async_tx_descriptor *
+fsl_dma_prep_memcpy(struct dma_chan *dchan,
+ dma_addr_t dma_dst, dma_addr_t dma_src,
size_t len, unsigned long flags)
{
struct fsldma_chan *chan;
@@ -591,7 +588,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
new->async_tx.flags = flags; /* client is in control of this ack */
new->async_tx.cookie = -EBUSY;

- /* Set End-of-link to the last link descriptor of new list*/
+ /* Set End-of-link to the last link descriptor of new list */
set_ld_eol(chan, new);

return &first->async_tx;
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index 113e713..49189da 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -102,8 +102,8 @@ struct fsl_desc_sw {
} __attribute__((aligned(32)));

struct fsldma_chan_regs {
- u32 mr; /* 0x00 - Mode Register */
- u32 sr; /* 0x04 - Status Register */
+ u32 mr; /* 0x00 - Mode Register */
+ u32 sr; /* 0x04 - Status Register */
u64 cdar; /* 0x08 - Current descriptor address register */
u64 sar; /* 0x10 - Source Address Register */
u64 dar; /* 0x18 - Destination Address Register */
--
1.7.3.4

2011-02-26 00:24:58

by Ira W. Snyder

[permalink] [raw]
Subject: [PATCH 2/8] fsldma: use channel name in printk output

This makes debugging the driver much easier when multiple channels are
running concurrently. In addition, you can see how much descriptor
memory each channel has allocated via the dmapool API in sysfs.

Signed-off-by: Ira W. Snyder <[email protected]>
---
drivers/dma/fsldma.c | 60 +++++++++++++++++++++++++++----------------------
drivers/dma/fsldma.h | 1 +
2 files changed, 34 insertions(+), 27 deletions(-)

diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 2e1af45..6e3d3d7 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -37,7 +37,7 @@

#include "fsldma.h"

-static const char msg_ld_oom[] = "No free memory for link descriptor\n";
+static const char msg_ld_oom[] = "No free memory for link descriptor";

/*
* Register Helpers
@@ -207,7 +207,7 @@ static void dma_halt(struct fsldma_chan *chan)
}

if (!dma_is_idle(chan))
- dev_err(chan->dev, "DMA halt timeout!\n");
+ dev_err(chan->dev, "%s: DMA halt timeout!\n", chan->name);
}

/**
@@ -400,12 +400,13 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
struct fsldma_chan *chan)
{
+ const char *name = chan->name;
struct fsl_desc_sw *desc;
dma_addr_t pdesc;

desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
if (!desc) {
- dev_dbg(chan->dev, "out of memory for link desc\n");
+ dev_dbg(chan->dev, "%s: out of memory for link desc\n", name);
return NULL;
}

@@ -439,13 +440,12 @@ static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
* We need the descriptor to be aligned to 32bytes
* for meeting FSL DMA specification requirement.
*/
- chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool",
- chan->dev,
+ chan->desc_pool = dma_pool_create(chan->name, chan->dev,
sizeof(struct fsl_desc_sw),
__alignof__(struct fsl_desc_sw), 0);
if (!chan->desc_pool) {
- dev_err(chan->dev, "unable to allocate channel %d "
- "descriptor pool\n", chan->id);
+ dev_err(chan->dev, "%s: unable to allocate descriptor pool\n",
+ chan->name);
return -ENOMEM;
}

@@ -491,7 +491,7 @@ static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
struct fsldma_chan *chan = to_fsl_chan(dchan);
unsigned long flags;

- dev_dbg(chan->dev, "Free all channel resources.\n");
+ dev_dbg(chan->dev, "%s: Free all channel resources.\n", chan->name);
spin_lock_irqsave(&chan->desc_lock, flags);
fsldma_free_desc_list(chan, &chan->ld_pending);
fsldma_free_desc_list(chan, &chan->ld_running);
@@ -514,7 +514,7 @@ fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)

new = fsl_dma_alloc_descriptor(chan);
if (!new) {
- dev_err(chan->dev, msg_ld_oom);
+ dev_err(chan->dev, "%s: %s\n", chan->name, msg_ld_oom);
return NULL;
}

@@ -551,11 +551,11 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
/* Allocate the link descriptor from DMA pool */
new = fsl_dma_alloc_descriptor(chan);
if (!new) {
- dev_err(chan->dev, msg_ld_oom);
+ dev_err(chan->dev, "%s: %s\n", chan->name, msg_ld_oom);
goto fail;
}
#ifdef FSL_DMA_LD_DEBUG
- dev_dbg(chan->dev, "new link desc alloc %p\n", new);
+ dev_dbg(chan->dev, "%s: new link desc alloc %p\n", chan->name, new);
#endif

copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
@@ -639,11 +639,11 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
/* allocate and populate the descriptor */
new = fsl_dma_alloc_descriptor(chan);
if (!new) {
- dev_err(chan->dev, msg_ld_oom);
+ dev_err(chan->dev, "%s: %s\n", chan->name, msg_ld_oom);
goto fail;
}
#ifdef FSL_DMA_LD_DEBUG
- dev_dbg(chan->dev, "new link desc alloc %p\n", new);
+ dev_dbg(chan->dev, "%s: new link desc alloc %p\n", chan->name, new);
#endif

set_desc_cnt(chan, &new->hw, len);
@@ -815,7 +815,7 @@ static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan)
spin_lock_irqsave(&chan->desc_lock, flags);

if (list_empty(&chan->ld_running)) {
- dev_dbg(chan->dev, "no running descriptors\n");
+ dev_dbg(chan->dev, "%s: no running descriptors\n", chan->name);
goto out_unlock;
}

@@ -859,11 +859,13 @@ static enum dma_status fsldma_desc_status(struct fsldma_chan *chan,
static void fsl_chan_ld_cleanup(struct fsldma_chan *chan)
{
struct fsl_desc_sw *desc, *_desc;
+ const char *name = chan->name;
unsigned long flags;

spin_lock_irqsave(&chan->desc_lock, flags);

- dev_dbg(chan->dev, "chan completed_cookie = %d\n", chan->completed_cookie);
+ dev_dbg(chan->dev, "%s: chan completed_cookie = %d\n",
+ name, chan->completed_cookie);
list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) {
dma_async_tx_callback callback;
void *callback_param;
@@ -879,7 +881,7 @@ static void fsl_chan_ld_cleanup(struct fsldma_chan *chan)
callback_param = desc->async_tx.callback_param;
if (callback) {
spin_unlock_irqrestore(&chan->desc_lock, flags);
- dev_dbg(chan->dev, "LD %p callback\n", desc);
+ dev_dbg(chan->dev, "%s: LD %p callback\n", name, desc);
callback(callback_param);
spin_lock_irqsave(&chan->desc_lock, flags);
}
@@ -903,6 +905,7 @@ static void fsl_chan_ld_cleanup(struct fsldma_chan *chan)
*/
static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
{
+ const char *name = chan->name;
struct fsl_desc_sw *desc;
unsigned long flags;

@@ -913,7 +916,7 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
* don't need to do any work at all
*/
if (list_empty(&chan->ld_pending)) {
- dev_dbg(chan->dev, "no pending LDs\n");
+ dev_dbg(chan->dev, "%s: no pending LDs\n", name);
goto out_unlock;
}

@@ -923,7 +926,7 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
* at the end of the current transaction
*/
if (!dma_is_idle(chan)) {
- dev_dbg(chan->dev, "DMA controller still busy\n");
+ dev_dbg(chan->dev, "%s: DMA controller still busy\n", name);
goto out_unlock;
}

@@ -996,6 +999,7 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan,
static irqreturn_t fsldma_chan_irq(int irq, void *data)
{
struct fsldma_chan *chan = data;
+ const char *name = chan->name;
int update_cookie = 0;
int xfer_ld_q = 0;
u32 stat;
@@ -1003,14 +1007,14 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
/* save and clear the status register */
stat = get_sr(chan);
set_sr(chan, stat);
- dev_dbg(chan->dev, "irq: channel %d, stat = 0x%x\n", chan->id, stat);
+ dev_dbg(chan->dev, "%s: irq: stat = 0x%x\n", name, stat);

stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
if (!stat)
return IRQ_NONE;

if (stat & FSL_DMA_SR_TE)
- dev_err(chan->dev, "Transfer Error!\n");
+ dev_err(chan->dev, "%s: Transfer Error!\n", name);

/*
* Programming Error
@@ -1018,7 +1022,7 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
* triger a PE interrupt.
*/
if (stat & FSL_DMA_SR_PE) {
- dev_dbg(chan->dev, "irq: Programming Error INT\n");
+ dev_dbg(chan->dev, "%s: irq: Programming Error INT\n", name);
if (get_bcr(chan) == 0) {
/* BCR register is 0, this is a DMA_INTERRUPT async_tx.
* Now, update the completed cookie, and continue the
@@ -1035,8 +1039,9 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
* we will recycle the used descriptor.
*/
if (stat & FSL_DMA_SR_EOSI) {
- dev_dbg(chan->dev, "irq: End-of-segments INT\n");
- dev_dbg(chan->dev, "irq: clndar 0x%llx, nlndar 0x%llx\n",
+ dev_dbg(chan->dev, "%s: irq: End-of-segments INT\n", name);
+ dev_dbg(chan->dev, "%s: irq: clndar 0x%llx, nlndar 0x%llx\n",
+ name,
(unsigned long long)get_cdar(chan),
(unsigned long long)get_ndar(chan));
stat &= ~FSL_DMA_SR_EOSI;
@@ -1048,7 +1053,7 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
* and start the next transfer if it exist.
*/
if (stat & FSL_DMA_SR_EOCDI) {
- dev_dbg(chan->dev, "irq: End-of-Chain link INT\n");
+ dev_dbg(chan->dev, "%s: irq: End-of-Chain link INT\n", name);
stat &= ~FSL_DMA_SR_EOCDI;
update_cookie = 1;
xfer_ld_q = 1;
@@ -1060,7 +1065,7 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
* prepare next transfer.
*/
if (stat & FSL_DMA_SR_EOLNI) {
- dev_dbg(chan->dev, "irq: End-of-link INT\n");
+ dev_dbg(chan->dev, "%s: irq: End-of-link INT\n", name);
stat &= ~FSL_DMA_SR_EOLNI;
xfer_ld_q = 1;
}
@@ -1070,9 +1075,9 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
if (xfer_ld_q)
fsl_chan_xfer_ld_queue(chan);
if (stat)
- dev_dbg(chan->dev, "irq: unhandled sr 0x%02x\n", stat);
+ dev_dbg(chan->dev, "%s: irq: unhandled sr 0x%02x\n", name, stat);

- dev_dbg(chan->dev, "irq: Exit\n");
+ dev_dbg(chan->dev, "%s: irq: Exit\n", name);
tasklet_schedule(&chan->tasklet);
return IRQ_HANDLED;
}
@@ -1242,6 +1247,7 @@ static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,

fdev->chan[chan->id] = chan;
tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
+ snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id);

/* Initialize the channel */
dma_init(chan);
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index ba9f403..113e713 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -135,6 +135,7 @@ struct fsldma_device {
#define FSL_DMA_CHAN_START_EXT 0x00002000

struct fsldma_chan {
+ char name[8]; /* Channel name */
struct fsldma_chan_regs __iomem *regs;
dma_cookie_t completed_cookie; /* The maximum cookie completed */
spinlock_t desc_lock; /* Descriptor operation lock */
--
1.7.3.4

2011-02-26 00:24:57

by Ira W. Snyder

[permalink] [raw]
Subject: [PATCH 3/8] fsldma: improve link descriptor debugging

This adds better tracking to link descriptor allocations, callbacks, and
frees. This makes it much easier to track errors with link descriptors.

Signed-off-by: Ira W. Snyder <[email protected]>
---
drivers/dma/fsldma.c | 21 +++++++++++++++------
1 files changed, 15 insertions(+), 6 deletions(-)

diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 6e3d3d7..851993c 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -416,6 +416,10 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
desc->async_tx.tx_submit = fsl_dma_tx_submit;
desc->async_tx.phys = pdesc;

+#ifdef FSL_DMA_LD_DEBUG
+ dev_dbg(chan->dev, "%s: LD %p allocated\n", chan->name, desc);
+#endif
+
return desc;
}

@@ -467,6 +471,9 @@ static void fsldma_free_desc_list(struct fsldma_chan *chan,

list_for_each_entry_safe(desc, _desc, list, node) {
list_del(&desc->node);
+#ifdef FSL_DMA_LD_DEBUG
+ dev_dbg(chan->dev, "%s: LD %p free\n", chan->name, desc);
+#endif
dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
}
}
@@ -478,6 +485,9 @@ static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,

list_for_each_entry_safe_reverse(desc, _desc, list, node) {
list_del(&desc->node);
+#ifdef FSL_DMA_LD_DEBUG
+ dev_dbg(chan->dev, "%s: LD %p free\n", chan->name, desc);
+#endif
dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
}
}
@@ -554,9 +564,6 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
dev_err(chan->dev, "%s: %s\n", chan->name, msg_ld_oom);
goto fail;
}
-#ifdef FSL_DMA_LD_DEBUG
- dev_dbg(chan->dev, "%s: new link desc alloc %p\n", chan->name, new);
-#endif

copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);

@@ -642,9 +649,6 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
dev_err(chan->dev, "%s: %s\n", chan->name, msg_ld_oom);
goto fail;
}
-#ifdef FSL_DMA_LD_DEBUG
- dev_dbg(chan->dev, "%s: new link desc alloc %p\n", chan->name, new);
-#endif

set_desc_cnt(chan, &new->hw, len);
set_desc_src(chan, &new->hw, src);
@@ -881,13 +885,18 @@ static void fsl_chan_ld_cleanup(struct fsldma_chan *chan)
callback_param = desc->async_tx.callback_param;
if (callback) {
spin_unlock_irqrestore(&chan->desc_lock, flags);
+#ifdef FSL_DMA_LD_DEBUG
dev_dbg(chan->dev, "%s: LD %p callback\n", name, desc);
+#endif
callback(callback_param);
spin_lock_irqsave(&chan->desc_lock, flags);
}

/* Run any dependencies, then free the descriptor */
dma_run_dependencies(&desc->async_tx);
+#ifdef FSL_DMA_LD_DEBUG
+ dev_dbg(chan->dev, "%s: LD %p free\n", name, desc);
+#endif
dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
}

--
1.7.3.4