Subject: [PATCH v3 0/3] dmaengine: Add clock support for AXI DMAS

This patch series adds basic clock support for AXI DMAS
This patch series is created on top of the
"dmaengine: vdma: AXI DMA's enhancments" patch series.

Kedareswara rao Appana (3):
dmaengine: vdma: Add config structure to differentiate dmas
Documentation: DT: vdma: Add clock support for dmas
dmaengine: vdma: Add clock support

.../devicetree/bindings/dma/xilinx/xilinx_vdma.txt | 15 +
drivers/dma/xilinx/xilinx_vdma.c | 304 ++++++++++++++++++---
2 files changed, 287 insertions(+), 32 deletions(-)

--
2.1.2


Subject: [PATCH v3 2/3] Documentation: DT: vdma: Add clock support for dmas

This patch updates the binding doc with clock description
for AXI DMA's.

Acked-by: Sören Brinkmann <[email protected]>
Signed-off-by: Kedareswara rao Appana <[email protected]>
---
Changes for v3:
---> Added clock support for all the AXI DMA's.
Changes for v2:
--> Listed down all the clocks supported by the h/w
as suggested by the Datta.
--> Used IP clock names instead of shortcut clock name

.../devicetree/bindings/dma/xilinx/xilinx_vdma.txt | 15 +++++++++++++++
1 file changed, 15 insertions(+)

diff --git a/Documentation/devicetree/bindings/dma/xilinx/xilinx_vdma.txt b/Documentation/devicetree/bindings/dma/xilinx/xilinx_vdma.txt
index fcc2b65..a1f2683 100644
--- a/Documentation/devicetree/bindings/dma/xilinx/xilinx_vdma.txt
+++ b/Documentation/devicetree/bindings/dma/xilinx/xilinx_vdma.txt
@@ -21,6 +21,18 @@ Required properties:
- dma-channel child node: Should have at least one channel and can have up to
two channels per device. This node specifies the properties of each
DMA channel (see child node properties below).
+- clocks: Input clock specifier. Refer to common clock bindings.
+- clock-names: List of input clocks
+ For VDMA:
+ Required elements: "s_axi_lite_aclk"
+ Optional elements: "m_axi_mm2s_aclk" "m_axi_s2mm_aclk",
+ "m_axis_mm2s_aclk", "s_axis_s2mm_aclk"
+ For CDMA:
+ Required elements: "s_axi_lite_aclk", "m_axi_aclk"
+ FOR AXIDMA:
+ Required elements: "s_axi_lite_aclk"
+ Optional elements: "m_axi_mm2s_aclk", "m_axi_s2mm_aclk",
+ "m_axi_sg_aclk"

Required properties for VDMA:
- xlnx,num-fstores: Should be the number of framebuffers as configured in h/w.
@@ -60,6 +72,9 @@ axi_vdma_0: axivdma@40030000 {
xlnx,num-fstores = <0x8>;
xlnx,flush-fsync = <0x1>;
xlnx,addrwidth = <0x20>;
+ clocks = <&clk 0>, <&clk 1>, <&clk 2>, <&clk 3>, <&clk 4>;
+ clock-names = "s_axi_lite_aclk", "m_axi_mm2s_aclk", "m_axi_s2mm_aclk",
+ "m_axis_mm2s_aclk", "s_axis_s2mm_aclk";
dma-channel@40030000 {
compatible = "xlnx,axi-vdma-mm2s-channel";
interrupts = < 0 54 4 >;
--
2.1.2

Subject: [PATCH v3 3/3] dmaengine: vdma: Add clock support

Added basic clock support for axi dma's.
The clocks are requested at probe and released at remove.

Reviewed-by: Shubhrajyoti Datta <[email protected]>
Signed-off-by: Kedareswara rao Appana <[email protected]>
---
Changes for v3:
---> Added clock support for all the AXI DMA's.
---> Fixed clk_unprepare leak during probe fail as suggested by Moritz.
---> Fixed comment driver specifically asks for the clocks it needs and return
an error if a mandatory clock is missing as suggested by soren.
Changes for v2:
---> None.

drivers/dma/xilinx/xilinx_vdma.c | 225 ++++++++++++++++++++++++++++++++++++++-
1 file changed, 223 insertions(+), 2 deletions(-)

diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index 5bfaa32..41bb5b3 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -44,6 +44,7 @@
#include <linux/of_platform.h>
#include <linux/of_irq.h>
#include <linux/slab.h>
+#include <linux/clk.h>

#include "../dmaengine.h"

@@ -344,6 +345,9 @@ struct xilinx_dma_chan {

struct dma_config {
enum xdma_ip_type dmatype;
+ int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
+ struct clk **tx_clk, struct clk **txs_clk,
+ struct clk **rx_clk, struct clk **rxs_clk);
};

/**
@@ -365,7 +369,13 @@ struct xilinx_dma_device {
bool has_sg;
u32 flush_on_fsync;
bool ext_addr;
+ struct platform_device *pdev;
const struct dma_config *dma_config;
+ struct clk *axi_clk;
+ struct clk *tx_clk;
+ struct clk *txs_clk;
+ struct clk *rx_clk;
+ struct clk *rxs_clk;
};

/* Macros */
@@ -1757,6 +1767,200 @@ static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
list_del(&chan->common.device_node);
}

+static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
+ struct clk **tx_clk, struct clk **rx_clk,
+ struct clk **sg_clk, struct clk **tmp_clk)
+{
+ int err;
+
+ *tmp_clk = NULL;
+
+ *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
+ if (IS_ERR(*axi_clk)) {
+ err = PTR_ERR(*axi_clk);
+ dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err);
+ return err;
+ }
+
+ *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
+ if (IS_ERR(*tx_clk))
+ *tx_clk = NULL;
+
+ *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
+ if (IS_ERR(*rx_clk))
+ *rx_clk = NULL;
+
+ *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
+ if (IS_ERR(*sg_clk))
+ *sg_clk = NULL;
+
+
+ err = clk_prepare_enable(*axi_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(*tx_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
+ goto err_disable_axiclk;
+ }
+
+ err = clk_prepare_enable(*rx_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable txs_clk (%u)\n", err);
+ goto err_disable_txclk;
+ }
+
+ err = clk_prepare_enable(*sg_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable rxs_clk (%u)\n", err);
+ goto err_disable_rxclk;
+ }
+
+ return 0;
+
+err_disable_rxclk:
+ clk_disable_unprepare(*rx_clk);
+err_disable_txclk:
+ clk_disable_unprepare(*tx_clk);
+err_disable_axiclk:
+ clk_disable_unprepare(*axi_clk);
+
+ return err;
+}
+
+static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
+ struct clk **dev_clk, struct clk **tmp_clk,
+ struct clk **tmp1_clk, struct clk **tmp2_clk)
+{
+ int err;
+
+ *tmp_clk = NULL;
+ *tmp1_clk = NULL;
+ *tmp2_clk = NULL;
+
+ *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
+ if (IS_ERR(*axi_clk)) {
+ err = PTR_ERR(*axi_clk);
+ dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err);
+ return err;
+ }
+
+ *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
+ if (IS_ERR(*dev_clk))
+ *dev_clk = NULL;
+
+
+ err = clk_prepare_enable(*axi_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(*dev_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
+ goto err_disable_axiclk;
+ }
+
+
+ return 0;
+
+err_disable_axiclk:
+ clk_disable_unprepare(*axi_clk);
+
+ return err;
+}
+
+static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
+ struct clk **tx_clk, struct clk **txs_clk,
+ struct clk **rx_clk, struct clk **rxs_clk)
+{
+ int err;
+
+ *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
+ if (IS_ERR(*axi_clk)) {
+ err = PTR_ERR(*axi_clk);
+ dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err);
+ return err;
+ }
+
+ *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
+ if (IS_ERR(*tx_clk))
+ *tx_clk = NULL;
+
+ *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
+ if (IS_ERR(*txs_clk))
+ *txs_clk = NULL;
+
+ *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
+ if (IS_ERR(*rx_clk))
+ *rx_clk = NULL;
+
+ *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
+ if (IS_ERR(*rxs_clk))
+ *rxs_clk = NULL;
+
+
+ err = clk_prepare_enable(*axi_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(*tx_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
+ goto err_disable_axiclk;
+ }
+
+ err = clk_prepare_enable(*txs_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable txs_clk (%u)\n", err);
+ goto err_disable_txclk;
+ }
+
+ err = clk_prepare_enable(*rx_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
+ goto err_disable_txsclk;
+ }
+
+ err = clk_prepare_enable(*rxs_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable rxs_clk (%u)\n", err);
+ goto err_disable_rxclk;
+ }
+
+ return 0;
+
+err_disable_rxclk:
+ clk_disable_unprepare(*rx_clk);
+err_disable_txsclk:
+ clk_disable_unprepare(*txs_clk);
+err_disable_txclk:
+ clk_disable_unprepare(*tx_clk);
+err_disable_axiclk:
+ clk_disable_unprepare(*axi_clk);
+
+ return err;
+}
+
+static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
+{
+ if (!IS_ERR(xdev->rxs_clk))
+ clk_disable_unprepare(xdev->rxs_clk);
+ if (!IS_ERR(xdev->rx_clk))
+ clk_disable_unprepare(xdev->rx_clk);
+ if (!IS_ERR(xdev->txs_clk))
+ clk_disable_unprepare(xdev->txs_clk);
+ if (!IS_ERR(xdev->tx_clk))
+ clk_disable_unprepare(xdev->tx_clk);
+ clk_disable_unprepare(xdev->axi_clk);
+}
+
/**
* xilinx_dma_chan_probe - Per Channel Probing
* It get channel features from the device tree entry and
@@ -1900,14 +2104,17 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,

static const struct dma_config axidma_config = {
.dmatype = XDMA_TYPE_AXIDMA,
+ .clk_init = axidma_clk_init,
};

static const struct dma_config axicdma_config = {
.dmatype = XDMA_TYPE_CDMA,
+ .clk_init = axicdma_clk_init,
};

static const struct dma_config axivdma_config = {
.dmatype = XDMA_TYPE_VDMA,
+ .clk_init = axivdma_clk_init,
};

static const struct of_device_id xilinx_dma_of_ids[] = {
@@ -1926,9 +2133,13 @@ MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
*/
static int xilinx_dma_probe(struct platform_device *pdev)
{
+ int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
+ struct clk **, struct clk **, struct clk **)
+ = axivdma_clk_init;
struct device_node *node = pdev->dev.of_node;
struct xilinx_dma_device *xdev;
struct device_node *child, *np = pdev->dev.of_node;
+ struct clk *axi_clk, *tx_clk, *txs_clk, *rx_clk, *rxs_clk;
struct resource *io;
u32 num_frames, addr_width;
int i, err;
@@ -1943,10 +2154,16 @@ static int xilinx_dma_probe(struct platform_device *pdev)
const struct of_device_id *match;

match = of_match_node(xilinx_dma_of_ids, np);
- if (match && match->data)
+ if (match && match->data) {
xdev->dma_config = match->data;
+ clk_init = xdev->dma_config->clk_init;
+ }
}

+ err = clk_init(pdev, &axi_clk, &tx_clk, &txs_clk, &rx_clk, &rxs_clk);
+ if (err)
+ return err;
+
/* Request and map I/O memory */
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
xdev->regs = devm_ioremap_resource(&pdev->dev, io);
@@ -2019,7 +2236,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
for_each_child_of_node(node, child) {
err = xilinx_dma_chan_probe(xdev, child);
if (err < 0)
- goto error;
+ goto disable_clks;
}

if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
@@ -2043,6 +2260,8 @@ static int xilinx_dma_probe(struct platform_device *pdev)

return 0;

+disable_clks:
+ xdma_disable_allclks(xdev);
error:
for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++)
if (xdev->chan[i])
@@ -2070,6 +2289,8 @@ static int xilinx_dma_remove(struct platform_device *pdev)
if (xdev->chan[i])
xilinx_dma_chan_remove(xdev->chan[i]);

+ xdma_disable_allclks(xdev);
+
return 0;
}

--
2.1.2

Subject: [PATCH v3 1/3] dmaengine: vdma: Add config structure to differentiate dmas

This patch adds config structure in the driver to differentiate
AXI DMA's and to add more features(clock support etc..) to these DMA's.

Signed-off-by: Kedareswara rao Appana <[email protected]>
---
Changes for v3:
---> New patch.

drivers/dma/xilinx/xilinx_vdma.c | 81 +++++++++++++++++++++++++---------------
1 file changed, 50 insertions(+), 31 deletions(-)

diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index 70caea6..5bfaa32 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -342,6 +342,10 @@ struct xilinx_dma_chan {
void (*start_transfer)(struct xilinx_dma_chan *chan);
};

+struct dma_config {
+ enum xdma_ip_type dmatype;
+};
+
/**
* struct xilinx_dma_device - DMA device structure
* @regs: I/O mapped base address
@@ -361,7 +365,7 @@ struct xilinx_dma_device {
bool has_sg;
u32 flush_on_fsync;
bool ext_addr;
- enum xdma_ip_type dmatype;
+ const struct dma_config *dma_config;
};

/* Macros */
@@ -573,12 +577,12 @@ xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
if (!desc)
return;

- if (chan->xdev->dmatype == XDMA_TYPE_VDMA) {
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
list_for_each_entry_safe(segment, next, &desc->segments, node) {
list_del(&segment->node);
xilinx_vdma_free_tx_segment(chan, segment);
}
- } else if (chan->xdev->dmatype == XDMA_TYPE_CDMA) {
+ } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
list_for_each_entry_safe(cdma_segment, cdma_next,
&desc->segments, node) {
list_del(&cdma_segment->node);
@@ -641,7 +645,7 @@ static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
dev_dbg(chan->dev, "Free all channel resources.\n");

xilinx_dma_free_descriptors(chan);
- if (chan->xdev->dmatype == XDMA_TYPE_AXIDMA)
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
xilinx_dma_free_tx_segment(chan, chan->seg_v);
dma_pool_destroy(chan->desc_pool);
chan->desc_pool = NULL;
@@ -711,13 +715,13 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
* We need the descriptor to be aligned to 64bytes
* for meeting Xilinx VDMA specification requirement.
*/
- if (chan->xdev->dmatype == XDMA_TYPE_AXIDMA) {
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
chan->desc_pool = dma_pool_create("xilinx_dma_desc_pool",
chan->dev,
sizeof(struct xilinx_axidma_tx_segment),
__alignof__(struct xilinx_axidma_tx_segment),
0);
- } else if (chan->xdev->dmatype == XDMA_TYPE_CDMA) {
+ } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
chan->dev,
sizeof(struct xilinx_cdma_tx_segment),
@@ -738,7 +742,7 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
return -ENOMEM;
}

- if (chan->xdev->dmatype == XDMA_TYPE_AXIDMA)
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
/*
* For AXI DMA case after submitting a pending_list, keep
* an extra segment allocated so that the "next descriptor"
@@ -751,7 +755,7 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)

dma_cookie_init(dchan);

- if (chan->xdev->dmatype == XDMA_TYPE_AXIDMA) {
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
/* For AXI DMA resetting once channel will reset the
* other channel as well so enable the interrupts here.
*/
@@ -759,7 +763,7 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
XILINX_DMA_DMAXR_ALL_IRQ_MASK);
}

- if ((chan->xdev->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
+ if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
XILINX_CDMA_CR_SGMODE);

@@ -790,7 +794,7 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
if (ret == DMA_COMPLETE || !txstate)
return ret;

- if (chan->xdev->dmatype == XDMA_TYPE_AXIDMA) {
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
spin_lock_irqsave(&chan->lock, flags);

desc = list_last_entry(&chan->active_list,
@@ -1332,12 +1336,12 @@ static void append_desc_queue(struct xilinx_dma_chan *chan,
*/
tail_desc = list_last_entry(&chan->pending_list,
struct xilinx_dma_tx_descriptor, node);
- if (chan->xdev->dmatype == XDMA_TYPE_VDMA) {
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
tail_segment = list_last_entry(&tail_desc->segments,
struct xilinx_vdma_tx_segment,
node);
tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
- } else if (chan->xdev->dmatype == XDMA_TYPE_CDMA) {
+ } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
cdma_tail_segment = list_last_entry(&tail_desc->segments,
struct xilinx_cdma_tx_segment,
node);
@@ -1357,8 +1361,8 @@ append:
list_add_tail(&desc->node, &chan->pending_list);
chan->desc_pendingcount++;

- if (chan->has_sg && (chan->xdev->dmatype == XDMA_TYPE_VDMA) &&
- unlikely(chan->desc_pendingcount > chan->num_frms)) {
+ if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
+ && unlikely(chan->desc_pendingcount > chan->num_frms)) {
dev_dbg(chan->dev, "desc pendingcount is too high\n");
chan->desc_pendingcount = chan->num_frms;
}
@@ -1811,7 +1815,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
chan->id = 0;

chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
- if (xdev->dmatype == XDMA_TYPE_VDMA) {
+ if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;

if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
@@ -1824,7 +1828,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
chan->id = 1;

chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
- if (xdev->dmatype == XDMA_TYPE_VDMA) {
+ if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;

if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
@@ -1845,9 +1849,9 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
return err;
}

- if (xdev->dmatype == XDMA_TYPE_AXIDMA)
+ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
chan->start_transfer = xilinx_dma_start_transfer;
- else if (xdev->dmatype == XDMA_TYPE_CDMA)
+ else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
chan->start_transfer = xilinx_cdma_start_transfer;
else
chan->start_transfer = xilinx_vdma_start_transfer;
@@ -1894,13 +1898,22 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
return dma_get_slave_channel(&xdev->chan[chan_id]->common);
}

+static const struct dma_config axidma_config = {
+ .dmatype = XDMA_TYPE_AXIDMA,
+};
+
+static const struct dma_config axicdma_config = {
+ .dmatype = XDMA_TYPE_CDMA,
+};
+
+static const struct dma_config axivdma_config = {
+ .dmatype = XDMA_TYPE_VDMA,
+};
+
static const struct of_device_id xilinx_dma_of_ids[] = {
- { .compatible = "xlnx,axi-dma-1.00.a",
- .data = (void *)XDMA_TYPE_AXIDMA },
- { .compatible = "xlnx,axi-cdma-1.00.a",
- .data = (void *)XDMA_TYPE_CDMA },
- { .compatible = "xlnx,axi-vdma-1.00.a",
- .data = (void *)XDMA_TYPE_VDMA },
+ { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
+ { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
+ { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
{}
};
MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
@@ -1915,7 +1928,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct xilinx_dma_device *xdev;
- struct device_node *child;
+ struct device_node *child, *np = pdev->dev.of_node;
struct resource *io;
u32 num_frames, addr_width;
int i, err;
@@ -1926,7 +1939,13 @@ static int xilinx_dma_probe(struct platform_device *pdev)
return -ENOMEM;

xdev->dev = &pdev->dev;
- xdev->dmatype = (enum xdma_ip_type)of_device_get_match_data(&pdev->dev);
+ if (np) {
+ const struct of_device_id *match;
+
+ match = of_match_node(xilinx_dma_of_ids, np);
+ if (match && match->data)
+ xdev->dma_config = match->data;
+ }

/* Request and map I/O memory */
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1937,7 +1956,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
/* Retrieve the DMA engine properties from the device tree */
xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg");

- if (xdev->dmatype == XDMA_TYPE_VDMA) {
+ if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
err = of_property_read_u32(node, "xlnx,num-fstores",
&num_frames);
if (err < 0) {
@@ -1969,7 +1988,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
xdev->common.dev = &pdev->dev;

INIT_LIST_HEAD(&xdev->common.channels);
- if (!(xdev->dmatype == XDMA_TYPE_CDMA)) {
+ if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
}
@@ -1981,12 +2000,12 @@ static int xilinx_dma_probe(struct platform_device *pdev)
xdev->common.device_terminate_all = xilinx_dma_terminate_all;
xdev->common.device_tx_status = xilinx_dma_tx_status;
xdev->common.device_issue_pending = xilinx_dma_issue_pending;
- if (xdev->dmatype == XDMA_TYPE_AXIDMA) {
+ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
/* Residue calculation is supported by only AXI DMA */
xdev->common.residue_granularity =
DMA_RESIDUE_GRANULARITY_SEGMENT;
- } else if (xdev->dmatype == XDMA_TYPE_CDMA) {
+ } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
} else {
@@ -2003,7 +2022,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
goto error;
}

- if (xdev->dmatype == XDMA_TYPE_VDMA) {
+ if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++)
if (xdev->chan[i])
xdev->chan[i]->num_frms = num_frames;
--
2.1.2

2016-04-21 16:24:05

by Soren Brinkmann

[permalink] [raw]
Subject: Re: [PATCH v3 3/3] dmaengine: vdma: Add clock support

On Thu, 2016-04-21 at 16:08:38 +0530, Kedareswara rao Appana wrote:
> Added basic clock support for axi dma's.
> The clocks are requested at probe and released at remove.
>
> Reviewed-by: Shubhrajyoti Datta <[email protected]>
> Signed-off-by: Kedareswara rao Appana <[email protected]>
> ---
> Changes for v3:
> ---> Added clock support for all the AXI DMA's.
> ---> Fixed clk_unprepare leak during probe fail as suggested by Moritz.
> ---> Fixed comment driver specifically asks for the clocks it needs and return
> an error if a mandatory clock is missing as suggested by soren.
> Changes for v2:
> ---> None.
>
> drivers/dma/xilinx/xilinx_vdma.c | 225 ++++++++++++++++++++++++++++++++++++++-
> 1 file changed, 223 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
> index 5bfaa32..41bb5b3 100644
> --- a/drivers/dma/xilinx/xilinx_vdma.c
> +++ b/drivers/dma/xilinx/xilinx_vdma.c
> @@ -44,6 +44,7 @@
> #include <linux/of_platform.h>
> #include <linux/of_irq.h>
> #include <linux/slab.h>
> +#include <linux/clk.h>
>
> #include "../dmaengine.h"
>
> @@ -344,6 +345,9 @@ struct xilinx_dma_chan {
>
> struct dma_config {
> enum xdma_ip_type dmatype;
> + int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
> + struct clk **tx_clk, struct clk **txs_clk,
> + struct clk **rx_clk, struct clk **rxs_clk);
> };
>
> /**
> @@ -365,7 +369,13 @@ struct xilinx_dma_device {
> bool has_sg;
> u32 flush_on_fsync;
> bool ext_addr;
> + struct platform_device *pdev;
> const struct dma_config *dma_config;
> + struct clk *axi_clk;
> + struct clk *tx_clk;
> + struct clk *txs_clk;
> + struct clk *rx_clk;
> + struct clk *rxs_clk;
> };

the struct members could be documented

>
> /* Macros */
> @@ -1757,6 +1767,200 @@ static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
> list_del(&chan->common.device_node);
> }
>
> +static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
> + struct clk **tx_clk, struct clk **rx_clk,
> + struct clk **sg_clk, struct clk **tmp_clk)
> +{
> + int err;
> +
> + *tmp_clk = NULL;
> +
> + *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
> + if (IS_ERR(*axi_clk)) {
> + err = PTR_ERR(*axi_clk);
> + dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err);
> + return err;
> + }
> +
> + *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
> + if (IS_ERR(*tx_clk))
> + *tx_clk = NULL;
> +
> + *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
> + if (IS_ERR(*rx_clk))
> + *rx_clk = NULL;
> +
> + *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
> + if (IS_ERR(*sg_clk))
> + *sg_clk = NULL;
> +
> +
> + err = clk_prepare_enable(*axi_clk);

Should this be called if you know that the pointer might be NULL?

> + if (err) {
> + dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
> + return err;
> + }
> +
> + err = clk_prepare_enable(*tx_clk);
> + if (err) {
> + dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
> + goto err_disable_axiclk;
> + }
> +
> + err = clk_prepare_enable(*rx_clk);
> + if (err) {
> + dev_err(&pdev->dev, "failed to enable txs_clk (%u)\n", err);
> + goto err_disable_txclk;
> + }
> +
> + err = clk_prepare_enable(*sg_clk);
> + if (err) {
> + dev_err(&pdev->dev, "failed to enable rxs_clk (%u)\n", err);
> + goto err_disable_rxclk;
> + }
> +
> + return 0;
> +
> +err_disable_rxclk:
> + clk_disable_unprepare(*rx_clk);
> +err_disable_txclk:
> + clk_disable_unprepare(*tx_clk);
> +err_disable_axiclk:
> + clk_disable_unprepare(*axi_clk);
> +
> + return err;
> +}
> +
> +static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
> + struct clk **dev_clk, struct clk **tmp_clk,
> + struct clk **tmp1_clk, struct clk **tmp2_clk)
> +{
> + int err;
> +
> + *tmp_clk = NULL;
> + *tmp1_clk = NULL;
> + *tmp2_clk = NULL;
> +
> + *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
> + if (IS_ERR(*axi_clk)) {
> + err = PTR_ERR(*axi_clk);
> + dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err);
> + return err;
> + }
> +
> + *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
> + if (IS_ERR(*dev_clk))
> + *dev_clk = NULL;

This is a required clock according to binding but a failure is ignored
here.

> +
> +
> + err = clk_prepare_enable(*axi_clk);
> + if (err) {
> + dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
> + return err;
> + }
> +
> + err = clk_prepare_enable(*dev_clk);
> + if (err) {
> + dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
> + goto err_disable_axiclk;
> + }
> +
> +
> + return 0;
> +
> +err_disable_axiclk:
> + clk_disable_unprepare(*axi_clk);
> +
> + return err;
> +}
> +
> +static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
> + struct clk **tx_clk, struct clk **txs_clk,
> + struct clk **rx_clk, struct clk **rxs_clk)
> +{
> + int err;
> +
> + *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
> + if (IS_ERR(*axi_clk)) {
> + err = PTR_ERR(*axi_clk);
> + dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err);
> + return err;
> + }
> +
> + *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
> + if (IS_ERR(*tx_clk))
> + *tx_clk = NULL;
> +
> + *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
> + if (IS_ERR(*txs_clk))
> + *txs_clk = NULL;
> +
> + *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
> + if (IS_ERR(*rx_clk))
> + *rx_clk = NULL;
> +
> + *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
> + if (IS_ERR(*rxs_clk))
> + *rxs_clk = NULL;
> +
> +
> + err = clk_prepare_enable(*axi_clk);
> + if (err) {
> + dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
> + return err;
> + }
> +
> + err = clk_prepare_enable(*tx_clk);
> + if (err) {
> + dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
> + goto err_disable_axiclk;
> + }
> +
> + err = clk_prepare_enable(*txs_clk);
> + if (err) {
> + dev_err(&pdev->dev, "failed to enable txs_clk (%u)\n", err);
> + goto err_disable_txclk;
> + }
> +
> + err = clk_prepare_enable(*rx_clk);
> + if (err) {
> + dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
> + goto err_disable_txsclk;
> + }
> +
> + err = clk_prepare_enable(*rxs_clk);
> + if (err) {
> + dev_err(&pdev->dev, "failed to enable rxs_clk (%u)\n", err);
> + goto err_disable_rxclk;
> + }
> +
> + return 0;
> +
> +err_disable_rxclk:
> + clk_disable_unprepare(*rx_clk);
> +err_disable_txsclk:
> + clk_disable_unprepare(*txs_clk);
> +err_disable_txclk:
> + clk_disable_unprepare(*tx_clk);
> +err_disable_axiclk:
> + clk_disable_unprepare(*axi_clk);
> +
> + return err;
> +}
> +
> +static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
> +{
> + if (!IS_ERR(xdev->rxs_clk))

The init functions set the optional clocks to NULL if not present. So,
these pointers should be valid or NULL, but not an error pointer (I
think NULL is not considered an error pointer as there is a
IS_ERR_OR_NULL macro).

> + clk_disable_unprepare(xdev->rxs_clk);
> + if (!IS_ERR(xdev->rx_clk))
> + clk_disable_unprepare(xdev->rx_clk);
> + if (!IS_ERR(xdev->txs_clk))
> + clk_disable_unprepare(xdev->txs_clk);
> + if (!IS_ERR(xdev->tx_clk))
> + clk_disable_unprepare(xdev->tx_clk);
> + clk_disable_unprepare(xdev->axi_clk);
> +}
> +
> /**
> * xilinx_dma_chan_probe - Per Channel Probing
> * It get channel features from the device tree entry and
> @@ -1900,14 +2104,17 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
>
> static const struct dma_config axidma_config = {
> .dmatype = XDMA_TYPE_AXIDMA,
> + .clk_init = axidma_clk_init,
> };
>
> static const struct dma_config axicdma_config = {
> .dmatype = XDMA_TYPE_CDMA,
> + .clk_init = axicdma_clk_init,
> };
>
> static const struct dma_config axivdma_config = {
> .dmatype = XDMA_TYPE_VDMA,
> + .clk_init = axivdma_clk_init,
> };
>
> static const struct of_device_id xilinx_dma_of_ids[] = {
> @@ -1926,9 +2133,13 @@ MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
> */
> static int xilinx_dma_probe(struct platform_device *pdev)
> {
> + int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
> + struct clk **, struct clk **, struct clk **)
> + = axivdma_clk_init;
> struct device_node *node = pdev->dev.of_node;
> struct xilinx_dma_device *xdev;
> struct device_node *child, *np = pdev->dev.of_node;
> + struct clk *axi_clk, *tx_clk, *txs_clk, *rx_clk, *rxs_clk;

Are these local vars ever transferred into the struct xilinx_dma_device
(I actually think you can directly use the struct instead of these
locals).

> struct resource *io;
> u32 num_frames, addr_width;
> int i, err;
> @@ -1943,10 +2154,16 @@ static int xilinx_dma_probe(struct platform_device *pdev)
> const struct of_device_id *match;
>
> match = of_match_node(xilinx_dma_of_ids, np);
> - if (match && match->data)
> + if (match && match->data) {
> xdev->dma_config = match->data;
> + clk_init = xdev->dma_config->clk_init;
> + }
> }
>
> + err = clk_init(pdev, &axi_clk, &tx_clk, &txs_clk, &rx_clk, &rxs_clk);
> + if (err)
> + return err;
> +
> /* Request and map I/O memory */
> io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
> xdev->regs = devm_ioremap_resource(&pdev->dev, io);
> @@ -2019,7 +2236,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
> for_each_child_of_node(node, child) {
> err = xilinx_dma_chan_probe(xdev, child);
> if (err < 0)
> - goto error;
> + goto disable_clks;
> }
>
> if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
> @@ -2043,6 +2260,8 @@ static int xilinx_dma_probe(struct platform_device *pdev)
>
> return 0;
>
> +disable_clks:
> + xdma_disable_allclks(xdev);
> error:
> for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++)
> if (xdev->chan[i])
> @@ -2070,6 +2289,8 @@ static int xilinx_dma_remove(struct platform_device *pdev)
> if (xdev->chan[i])
> xilinx_dma_chan_remove(xdev->chan[i]);
>
> + xdma_disable_allclks(xdev);
> +
> return 0;
> }

Sören

Subject: RE: [PATCH v3 3/3] dmaengine: vdma: Add clock support

Hi Soren,

> -----Original Message-----
> From: Sören Brinkmann [mailto:[email protected]]
> Sent: Thursday, April 21, 2016 9:52 PM
> To: Appana Durga Kedareswara Rao <[email protected]>
> Cc: [email protected]; [email protected]; [email protected];
> [email protected]; [email protected]; Michal Simek
> <[email protected]>; [email protected]; [email protected];
> Appana Durga Kedareswara Rao <[email protected]>;
> [email protected]; [email protected];
> [email protected]; Anirudha Sarangi <[email protected]>; Punnaiah
> Choudary Kalluri <[email protected]>; Shubhrajyoti Datta
> <[email protected]>; [email protected]; linux-arm-
> [email protected]; [email protected];
> [email protected]
> Subject: Re: [PATCH v3 3/3] dmaengine: vdma: Add clock support
>
> On Thu, 2016-04-21 at 16:08:38 +0530, Kedareswara rao Appana wrote:
> > Added basic clock support for axi dma's.
> > The clocks are requested at probe and released at remove.
> >
> > Reviewed-by: Shubhrajyoti Datta <[email protected]>
> > Signed-off-by: Kedareswara rao Appana <[email protected]>
> > ---
> > Changes for v3:
> > ---> Added clock support for all the AXI DMA's.
> > ---> Fixed clk_unprepare leak during probe fail as suggested by Moritz.
> > ---> Fixed comment driver specifically asks for the clocks it needs
> > ---> and return
> > an error if a mandatory clock is missing as suggested by soren.
> > Changes for v2:
> > ---> None.
> >
> > drivers/dma/xilinx/xilinx_vdma.c | 225
> > ++++++++++++++++++++++++++++++++++++++-
> > 1 file changed, 223 insertions(+), 2 deletions(-)
> >
> > diff --git a/drivers/dma/xilinx/xilinx_vdma.c
> > b/drivers/dma/xilinx/xilinx_vdma.c
> > index 5bfaa32..41bb5b3 100644
> > --- a/drivers/dma/xilinx/xilinx_vdma.c
> > +++ b/drivers/dma/xilinx/xilinx_vdma.c
> > @@ -44,6 +44,7 @@
> > #include <linux/of_platform.h>
> > #include <linux/of_irq.h>
> > #include <linux/slab.h>
> > +#include <linux/clk.h>
> >
> > #include "../dmaengine.h"
> >
> > @@ -344,6 +345,9 @@ struct xilinx_dma_chan {
> >
> > struct dma_config {
> > enum xdma_ip_type dmatype;
> > + int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
> > + struct clk **tx_clk, struct clk **txs_clk,
> > + struct clk **rx_clk, struct clk **rxs_clk);
> > };
> >
> > /**
> > @@ -365,7 +369,13 @@ struct xilinx_dma_device {
> > bool has_sg;
> > u32 flush_on_fsync;
> > bool ext_addr;
> > + struct platform_device *pdev;
> > const struct dma_config *dma_config;
> > + struct clk *axi_clk;
> > + struct clk *tx_clk;
> > + struct clk *txs_clk;
> > + struct clk *rx_clk;
> > + struct clk *rxs_clk;
> > };
>
> the struct members could be documented

Ok Will document in the next version...

>
> >
> > /* Macros */
> > @@ -1757,6 +1767,200 @@ static void xilinx_dma_chan_remove(struct
> xilinx_dma_chan *chan)
> > list_del(&chan->common.device_node);
> > }
> >
> > +static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
> > + struct clk **tx_clk, struct clk **rx_clk,
> > + struct clk **sg_clk, struct clk **tmp_clk) {
> > + int err;
> > +
> > + *tmp_clk = NULL;
> > +
> > + *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
> > + if (IS_ERR(*axi_clk)) {
> > + err = PTR_ERR(*axi_clk);
> > + dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err);
> > + return err;
> > + }
> > +
> > + *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
> > + if (IS_ERR(*tx_clk))
> > + *tx_clk = NULL;
> > +
> > + *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
> > + if (IS_ERR(*rx_clk))
> > + *rx_clk = NULL;
> > +
> > + *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
> > + if (IS_ERR(*sg_clk))
> > + *sg_clk = NULL;
> > +
> > +
> > + err = clk_prepare_enable(*axi_clk);
>
> Should this be called if you know that the pointer might be NULL?

It is a mandatory clock and if this clk is not there in DT I am already returning error...
I didn't get your question could you please elaborate???

>
> > + if (err) {
> > + dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
> > + return err;
> > + }
> > +
> > + err = clk_prepare_enable(*tx_clk);
> > + if (err) {
> > + dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
> > + goto err_disable_axiclk;
> > + }
> > +
> > + err = clk_prepare_enable(*rx_clk);
> > + if (err) {
> > + dev_err(&pdev->dev, "failed to enable txs_clk (%u)\n", err);
> > + goto err_disable_txclk;
> > + }
> > +
> > + err = clk_prepare_enable(*sg_clk);
> > + if (err) {
> > + dev_err(&pdev->dev, "failed to enable rxs_clk (%u)\n", err);
> > + goto err_disable_rxclk;
> > + }
> > +
> > + return 0;
> > +
> > +err_disable_rxclk:
> > + clk_disable_unprepare(*rx_clk);
> > +err_disable_txclk:
> > + clk_disable_unprepare(*tx_clk);
> > +err_disable_axiclk:
> > + clk_disable_unprepare(*axi_clk);
> > +
> > + return err;
> > +}
> > +
> > +static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
> > + struct clk **dev_clk, struct clk **tmp_clk,
> > + struct clk **tmp1_clk, struct clk **tmp2_clk) {
> > + int err;
> > +
> > + *tmp_clk = NULL;
> > + *tmp1_clk = NULL;
> > + *tmp2_clk = NULL;
> > +
> > + *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
> > + if (IS_ERR(*axi_clk)) {
> > + err = PTR_ERR(*axi_clk);
> > + dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err);
> > + return err;
> > + }
> > +
> > + *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
> > + if (IS_ERR(*dev_clk))
> > + *dev_clk = NULL;
>
> This is a required clock according to binding but a failure is ignored here.

Hmm nice catch will fix in next version...

>
> > +
> > +
> > + err = clk_prepare_enable(*axi_clk);
> > + if (err) {
> > + dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
> > + return err;
> > + }
> > +
> > + err = clk_prepare_enable(*dev_clk);
> > + if (err) {
> > + dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
> > + goto err_disable_axiclk;
> > + }
> > +
> > +
> > + return 0;
> > +
> > +err_disable_axiclk:
> > + clk_disable_unprepare(*axi_clk);
> > +
> > + return err;
> > +}
> > +
> > +static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
> > + struct clk **tx_clk, struct clk **txs_clk,
> > + struct clk **rx_clk, struct clk **rxs_clk) {
> > + int err;
> > +
> > + *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
> > + if (IS_ERR(*axi_clk)) {
> > + err = PTR_ERR(*axi_clk);
> > + dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err);
> > + return err;
> > + }
> > +
> > + *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
> > + if (IS_ERR(*tx_clk))
> > + *tx_clk = NULL;
> > +
> > + *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
> > + if (IS_ERR(*txs_clk))
> > + *txs_clk = NULL;
> > +
> > + *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
> > + if (IS_ERR(*rx_clk))
> > + *rx_clk = NULL;
> > +
> > + *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
> > + if (IS_ERR(*rxs_clk))
> > + *rxs_clk = NULL;
> > +
> > +
> > + err = clk_prepare_enable(*axi_clk);
> > + if (err) {
> > + dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
> > + return err;
> > + }
> > +
> > + err = clk_prepare_enable(*tx_clk);
> > + if (err) {
> > + dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
> > + goto err_disable_axiclk;
> > + }
> > +
> > + err = clk_prepare_enable(*txs_clk);
> > + if (err) {
> > + dev_err(&pdev->dev, "failed to enable txs_clk (%u)\n", err);
> > + goto err_disable_txclk;
> > + }
> > +
> > + err = clk_prepare_enable(*rx_clk);
> > + if (err) {
> > + dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
> > + goto err_disable_txsclk;
> > + }
> > +
> > + err = clk_prepare_enable(*rxs_clk);
> > + if (err) {
> > + dev_err(&pdev->dev, "failed to enable rxs_clk (%u)\n", err);
> > + goto err_disable_rxclk;
> > + }
> > +
> > + return 0;
> > +
> > +err_disable_rxclk:
> > + clk_disable_unprepare(*rx_clk);
> > +err_disable_txsclk:
> > + clk_disable_unprepare(*txs_clk);
> > +err_disable_txclk:
> > + clk_disable_unprepare(*tx_clk);
> > +err_disable_axiclk:
> > + clk_disable_unprepare(*axi_clk);
> > +
> > + return err;
> > +}
> > +
> > +static void xdma_disable_allclks(struct xilinx_dma_device *xdev) {
> > + if (!IS_ERR(xdev->rxs_clk))
>
> The init functions set the optional clocks to NULL if not present. So, these
> pointers should be valid or NULL, but not an error pointer (I think NULL is not
> considered an error pointer as there is a IS_ERR_OR_NULL macro).

Ok will remove IS_ERR checks...

>
> > + clk_disable_unprepare(xdev->rxs_clk);
> > + if (!IS_ERR(xdev->rx_clk))
> > + clk_disable_unprepare(xdev->rx_clk);
> > + if (!IS_ERR(xdev->txs_clk))
> > + clk_disable_unprepare(xdev->txs_clk);
> > + if (!IS_ERR(xdev->tx_clk))
> > + clk_disable_unprepare(xdev->tx_clk);
> > + clk_disable_unprepare(xdev->axi_clk);
> > +}
> > +
> > /**
> > * xilinx_dma_chan_probe - Per Channel Probing
> > * It get channel features from the device tree entry and @@ -1900,14
> > +2104,17 @@ static struct dma_chan *of_dma_xilinx_xlate(struct
> > of_phandle_args *dma_spec,
> >
> > static const struct dma_config axidma_config = {
> > .dmatype = XDMA_TYPE_AXIDMA,
> > + .clk_init = axidma_clk_init,
> > };
> >
> > static const struct dma_config axicdma_config = {
> > .dmatype = XDMA_TYPE_CDMA,
> > + .clk_init = axicdma_clk_init,
> > };
> >
> > static const struct dma_config axivdma_config = {
> > .dmatype = XDMA_TYPE_VDMA,
> > + .clk_init = axivdma_clk_init,
> > };
> >
> > static const struct of_device_id xilinx_dma_of_ids[] = { @@ -1926,9
> > +2133,13 @@ MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
> > */
> > static int xilinx_dma_probe(struct platform_device *pdev) {
> > + int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
> > + struct clk **, struct clk **, struct clk **)
> > + = axivdma_clk_init;
> > struct device_node *node = pdev->dev.of_node;
> > struct xilinx_dma_device *xdev;
> > struct device_node *child, *np = pdev->dev.of_node;
> > + struct clk *axi_clk, *tx_clk, *txs_clk, *rx_clk, *rxs_clk;
>
> Are these local vars ever transferred into the struct xilinx_dma_device (I actually
> think you can directly use the struct instead of these locals).

Ok will fix in the next version...

Regards,
Kedar.

>
> > struct resource *io;
> > u32 num_frames, addr_width;
> > int i, err;
> > @@ -1943,10 +2154,16 @@ static int xilinx_dma_probe(struct
> platform_device *pdev)
> > const struct of_device_id *match;
> >
> > match = of_match_node(xilinx_dma_of_ids, np);
> > - if (match && match->data)
> > + if (match && match->data) {
> > xdev->dma_config = match->data;
> > + clk_init = xdev->dma_config->clk_init;
> > + }
> > }
> >
> > + err = clk_init(pdev, &axi_clk, &tx_clk, &txs_clk, &rx_clk, &rxs_clk);
> > + if (err)
> > + return err;
> > +
> > /* Request and map I/O memory */
> > io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
> > xdev->regs = devm_ioremap_resource(&pdev->dev, io); @@ -2019,7
> > +2236,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
> > for_each_child_of_node(node, child) {
> > err = xilinx_dma_chan_probe(xdev, child);
> > if (err < 0)
> > - goto error;
> > + goto disable_clks;
> > }
> >
> > if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { @@ -2043,6
> > +2260,8 @@ static int xilinx_dma_probe(struct platform_device *pdev)
> >
> > return 0;
> >
> > +disable_clks:
> > + xdma_disable_allclks(xdev);
> > error:
> > for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++)
> > if (xdev->chan[i])
> > @@ -2070,6 +2289,8 @@ static int xilinx_dma_remove(struct
> platform_device *pdev)
> > if (xdev->chan[i])
> > xilinx_dma_chan_remove(xdev->chan[i]);
> >
> > + xdma_disable_allclks(xdev);
> > +
> > return 0;
> > }
>
> Sören

2016-04-21 17:04:30

by Soren Brinkmann

[permalink] [raw]
Subject: Re: [PATCH v3 3/3] dmaengine: vdma: Add clock support

On Thu, 2016-04-21 at 09:32:44 -0700, Appana Durga Kedareswara Rao wrote:
> Hi Soren,
>
> > -----Original Message-----
> > From: Sören Brinkmann [mailto:[email protected]]
> > Sent: Thursday, April 21, 2016 9:52 PM
> > To: Appana Durga Kedareswara Rao <[email protected]>
> > Cc: [email protected]; [email protected]; [email protected];
> > [email protected]; [email protected]; Michal Simek
> > <[email protected]>; [email protected]; [email protected];
> > Appana Durga Kedareswara Rao <[email protected]>;
> > [email protected]; [email protected];
> > [email protected]; Anirudha Sarangi <[email protected]>; Punnaiah
> > Choudary Kalluri <[email protected]>; Shubhrajyoti Datta
> > <[email protected]>; [email protected]; linux-arm-
> > [email protected]; [email protected];
> > [email protected]
> > Subject: Re: [PATCH v3 3/3] dmaengine: vdma: Add clock support
> >
> > On Thu, 2016-04-21 at 16:08:38 +0530, Kedareswara rao Appana wrote:
[...]
> > > @@ -1757,6 +1767,200 @@ static void xilinx_dma_chan_remove(struct
> > xilinx_dma_chan *chan)
> > > list_del(&chan->common.device_node);
> > > }
> > >
> > > +static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
> > > + struct clk **tx_clk, struct clk **rx_clk,
> > > + struct clk **sg_clk, struct clk **tmp_clk) {
> > > + int err;
> > > +
> > > + *tmp_clk = NULL;
> > > +
> > > + *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
> > > + if (IS_ERR(*axi_clk)) {
> > > + err = PTR_ERR(*axi_clk);
> > > + dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err);
> > > + return err;
> > > + }
> > > +
> > > + *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
> > > + if (IS_ERR(*tx_clk))
> > > + *tx_clk = NULL;
> > > +
> > > + *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
> > > + if (IS_ERR(*rx_clk))
> > > + *rx_clk = NULL;
> > > +
> > > + *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
> > > + if (IS_ERR(*sg_clk))
> > > + *sg_clk = NULL;
> > > +
> > > +
> > > + err = clk_prepare_enable(*axi_clk);
> >
> > Should this be called if you know that the pointer might be NULL?
>
> It is a mandatory clock and if this clk is not there in DT I am already returning error...
> I didn't get your question could you please elaborate???

But for all the optional clocks. They could all be NULL and you're calling
clk_prepare_enable with a NULL pointer. That function is nice enough to
do a NULL check for you, but I wonder whether these calls should happen at
all when you already know that the pointer is not a valid clock.

Sören

Subject: RE: [PATCH v3 3/3] dmaengine: vdma: Add clock support

Hi Soren,

> -----Original Message-----
> From: Sören Brinkmann [mailto:[email protected]]
> Sent: Thursday, April 21, 2016 10:32 PM
> To: Appana Durga Kedareswara Rao <[email protected]>
> Cc: Soren Brinkmann <[email protected]>; [email protected];
> [email protected]; [email protected]; [email protected];
> [email protected]; Michal Simek <[email protected]>;
> [email protected]; [email protected]; [email protected];
> [email protected]; [email protected]; Anirudha
> Sarangi <[email protected]>; Punnaiah Choudary Kalluri
> <[email protected]>; Shubhrajyoti Datta <[email protected]>;
> [email protected]; [email protected]; linux-
> [email protected]; [email protected]
> Subject: Re: [PATCH v3 3/3] dmaengine: vdma: Add clock support
>
> On Thu, 2016-04-21 at 09:32:44 -0700, Appana Durga Kedareswara Rao wrote:
> > Hi Soren,
> >
> > > -----Original Message-----
> > > From: Sören Brinkmann [mailto:[email protected]]
> > > Sent: Thursday, April 21, 2016 9:52 PM
> > > To: Appana Durga Kedareswara Rao <[email protected]>
> > > Cc: [email protected]; [email protected]; [email protected];
> > > [email protected]; [email protected]; Michal Simek
> > > <[email protected]>; [email protected]; [email protected];
> > > Appana Durga Kedareswara Rao <[email protected]>;
> > > [email protected]; [email protected];
> > > [email protected]; Anirudha Sarangi <[email protected]>; Punnaiah
> > > Choudary Kalluri <[email protected]>; Shubhrajyoti Datta
> > > <[email protected]>; [email protected]; linux-arm-
> > > [email protected]; [email protected];
> > > [email protected]
> > > Subject: Re: [PATCH v3 3/3] dmaengine: vdma: Add clock support
> > >
> > > On Thu, 2016-04-21 at 16:08:38 +0530, Kedareswara rao Appana wrote:
> [...]
> > > > @@ -1757,6 +1767,200 @@ static void xilinx_dma_chan_remove(struct
> > > xilinx_dma_chan *chan)
> > > > list_del(&chan->common.device_node);
> > > > }
> > > >
> > > > +static int axidma_clk_init(struct platform_device *pdev, struct clk
> **axi_clk,
> > > > + struct clk **tx_clk, struct clk **rx_clk,
> > > > + struct clk **sg_clk, struct clk **tmp_clk) {
> > > > + int err;
> > > > +
> > > > + *tmp_clk = NULL;
> > > > +
> > > > + *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
> > > > + if (IS_ERR(*axi_clk)) {
> > > > + err = PTR_ERR(*axi_clk);
> > > > + dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err);
> > > > + return err;
> > > > + }
> > > > +
> > > > + *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
> > > > + if (IS_ERR(*tx_clk))
> > > > + *tx_clk = NULL;
> > > > +
> > > > + *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
> > > > + if (IS_ERR(*rx_clk))
> > > > + *rx_clk = NULL;
> > > > +
> > > > + *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
> > > > + if (IS_ERR(*sg_clk))
> > > > + *sg_clk = NULL;
> > > > +
> > > > +
> > > > + err = clk_prepare_enable(*axi_clk);
> > >
> > > Should this be called if you know that the pointer might be NULL?
> >
> > It is a mandatory clock and if this clk is not there in DT I am already returning
> error...
> > I didn't get your question could you please elaborate???
>
> But for all the optional clocks. They could all be NULL and you're calling
> clk_prepare_enable with a NULL pointer. That function is nice enough to
> do a NULL check for you, but I wonder whether these calls should happen at
> all when you already know that the pointer is not a valid clock.

I referred macb driver http://lxr.free-electrons.com/source/drivers/net/ethernet/cadence/macb.c
There tx_clk is optional and in this driver they are calling clk_prepare_enable for the optional clocks.
Please let me know if you are ok to call the clk_prepare_enable() for optional clocks with a NULL pointer.
Will fix rest of the comments and will send next version of the patch...

Regards,
Kedar.

>
> Sören