2024-02-28 04:30:21

by Sagar, Vishal

[permalink] [raw]
Subject: [PATCH v2 0/2] Xilinx DPDMA fixes and cyclic dma mode support

This patch series fixes the race condition in vsync IRQ and also adds
the support for cyclic dma mode. The cyclic dma mode is added as part of
preparation for adding audio streaming support.

v2
- 1/2
- Fix as per Laurent's comment in
https://lore.kernel.org/lkml/YftWeLCpAfN%[email protected]/
- Add taking lock in another place

Neel Gandhi (1):
dmaengine: xilinx: dpdma: Fix race condition in vsync IRQ

Rohit Visavalia (1):
dmaengine: xilinx: dpdma: Add support for cyclic dma mode

drivers/dma/xilinx/xilinx_dpdma.c | 107 +++++++++++++++++++++++++++++-
1 file changed, 105 insertions(+), 2 deletions(-)

--
2.25.1



2024-02-28 04:32:00

by Sagar, Vishal

[permalink] [raw]
Subject: [PATCH v2 2/2] dmaengine: xilinx: dpdma: Add support for cyclic dma mode

From: Rohit Visavalia <[email protected]>

This patch adds support for DPDMA cyclic dma mode,
DMA cyclic transfers are required by audio streaming.

Signed-off-by: Rohit Visavalia <[email protected]>
Signed-off-by: Radhey Shyam Pandey <[email protected]>
Signed-off-by: Vishal Sagar <[email protected]>

---
drivers/dma/xilinx/xilinx_dpdma.c | 97 +++++++++++++++++++++++++++++++
1 file changed, 97 insertions(+)

diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
index 28d9af8f00f0..88ad2f35538a 100644
--- a/drivers/dma/xilinx/xilinx_dpdma.c
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -669,6 +669,84 @@ static void xilinx_dpdma_chan_free_tx_desc(struct virt_dma_desc *vdesc)
kfree(desc);
}

+/**
+ * xilinx_dpdma_chan_prep_cyclic - Prepare a cyclic dma descriptor
+ * @chan: DPDMA channel
+ * @buf_addr: buffer address
+ * @buf_len: buffer length
+ * @period_len: number of periods
+ * @flags: tx flags argument passed in to prepare function
+ *
+ * Prepare a tx descriptor incudling internal software/hardware descriptors
+ * for the given cyclic transaction.
+ *
+ * Return: A dma async tx descriptor on success, or NULL.
+ */
+static struct dma_async_tx_descriptor *
+xilinx_dpdma_chan_prep_cyclic(struct xilinx_dpdma_chan *chan,
+ dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, unsigned long flags)
+{
+ struct xilinx_dpdma_tx_desc *tx_desc;
+ struct xilinx_dpdma_sw_desc *sw_desc, *last = NULL;
+ unsigned int periods = buf_len / period_len;
+ unsigned int i;
+
+ tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan);
+ if (!tx_desc)
+ return (void *)tx_desc;
+
+ for (i = 0; i < periods; i++) {
+ struct xilinx_dpdma_hw_desc *hw_desc;
+
+ if (!IS_ALIGNED(buf_addr, XILINX_DPDMA_ALIGN_BYTES)) {
+ dev_err(chan->xdev->dev,
+ "buffer should be aligned at %d B\n",
+ XILINX_DPDMA_ALIGN_BYTES);
+ goto error;
+ }
+
+ sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan);
+ if (!sw_desc)
+ goto error;
+
+ xilinx_dpdma_sw_desc_set_dma_addrs(chan->xdev, sw_desc, last,
+ &buf_addr, 1);
+ hw_desc = &sw_desc->hw;
+ hw_desc->xfer_size = period_len;
+ hw_desc->hsize_stride =
+ FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK,
+ period_len) |
+ FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK,
+ period_len);
+ hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_PREEMBLE;
+ hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE;
+ hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR;
+
+ list_add_tail(&sw_desc->node, &tx_desc->descriptors);
+
+ buf_addr += period_len;
+ last = sw_desc;
+ }
+
+ sw_desc = list_first_entry(&tx_desc->descriptors,
+ struct xilinx_dpdma_sw_desc, node);
+ last->hw.next_desc = lower_32_bits(sw_desc->dma_addr);
+ if (chan->xdev->ext_addr)
+ last->hw.addr_ext |=
+ FIELD_PREP(XILINX_DPDMA_DESC_ADDR_EXT_NEXT_ADDR_MASK,
+ upper_32_bits(sw_desc->dma_addr));
+
+ last->hw.control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME;
+
+ return vchan_tx_prep(&chan->vchan, &tx_desc->vdesc, flags);
+
+error:
+ xilinx_dpdma_chan_free_tx_desc(&tx_desc->vdesc);
+
+ return NULL;
+}
+
/**
* xilinx_dpdma_chan_prep_interleaved_dma - Prepare an interleaved dma
* descriptor
@@ -1190,6 +1268,23 @@ static void xilinx_dpdma_chan_handle_err(struct xilinx_dpdma_chan *chan)
/* -----------------------------------------------------------------------------
* DMA Engine Operations
*/
+static struct dma_async_tx_descriptor *
+xilinx_dpdma_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t buf_addr,
+ size_t buf_len, size_t period_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+
+ if (direction != DMA_MEM_TO_DEV)
+ return NULL;
+
+ if (buf_len % period_len)
+ return NULL;
+
+ return xilinx_dpdma_chan_prep_cyclic(chan, buf_addr, buf_len,
+ period_len, flags);
+}

static struct dma_async_tx_descriptor *
xilinx_dpdma_prep_interleaved_dma(struct dma_chan *dchan,
@@ -1673,6 +1768,7 @@ static int xilinx_dpdma_probe(struct platform_device *pdev)

dma_cap_set(DMA_SLAVE, ddev->cap_mask);
dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
+ dma_cap_set(DMA_CYCLIC, ddev->cap_mask);
dma_cap_set(DMA_INTERLEAVE, ddev->cap_mask);
dma_cap_set(DMA_REPEAT, ddev->cap_mask);
dma_cap_set(DMA_LOAD_EOT, ddev->cap_mask);
@@ -1680,6 +1776,7 @@ static int xilinx_dpdma_probe(struct platform_device *pdev)

ddev->device_alloc_chan_resources = xilinx_dpdma_alloc_chan_resources;
ddev->device_free_chan_resources = xilinx_dpdma_free_chan_resources;
+ ddev->device_prep_dma_cyclic = xilinx_dpdma_prep_dma_cyclic;
ddev->device_prep_interleaved_dma = xilinx_dpdma_prep_interleaved_dma;
/* TODO: Can we achieve better granularity ? */
ddev->device_tx_status = dma_cookie_status;
--
2.25.1


2024-03-13 07:19:29

by Tomi Valkeinen

[permalink] [raw]
Subject: Re: [PATCH v2 2/2] dmaengine: xilinx: dpdma: Add support for cyclic dma mode

On 28/02/2024 06:21, Vishal Sagar wrote:
> From: Rohit Visavalia <[email protected]>
>
> This patch adds support for DPDMA cyclic dma mode,
> DMA cyclic transfers are required by audio streaming.
>
> Signed-off-by: Rohit Visavalia <[email protected]>
> Signed-off-by: Radhey Shyam Pandey <[email protected]>
> Signed-off-by: Vishal Sagar <[email protected]>
>
> ---
> drivers/dma/xilinx/xilinx_dpdma.c | 97 +++++++++++++++++++++++++++++++
> 1 file changed, 97 insertions(+)

This works fine with the DP audio series I posted, so:

Tested-by: Tomi Valkeinen <[email protected]>

Tomi

> diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
> index 28d9af8f00f0..88ad2f35538a 100644
> --- a/drivers/dma/xilinx/xilinx_dpdma.c
> +++ b/drivers/dma/xilinx/xilinx_dpdma.c
> @@ -669,6 +669,84 @@ static void xilinx_dpdma_chan_free_tx_desc(struct virt_dma_desc *vdesc)
> kfree(desc);
> }
>
> +/**
> + * xilinx_dpdma_chan_prep_cyclic - Prepare a cyclic dma descriptor
> + * @chan: DPDMA channel
> + * @buf_addr: buffer address
> + * @buf_len: buffer length
> + * @period_len: number of periods
> + * @flags: tx flags argument passed in to prepare function
> + *
> + * Prepare a tx descriptor incudling internal software/hardware descriptors
> + * for the given cyclic transaction.
> + *
> + * Return: A dma async tx descriptor on success, or NULL.
> + */
> +static struct dma_async_tx_descriptor *
> +xilinx_dpdma_chan_prep_cyclic(struct xilinx_dpdma_chan *chan,
> + dma_addr_t buf_addr, size_t buf_len,
> + size_t period_len, unsigned long flags)
> +{
> + struct xilinx_dpdma_tx_desc *tx_desc;
> + struct xilinx_dpdma_sw_desc *sw_desc, *last = NULL;
> + unsigned int periods = buf_len / period_len;
> + unsigned int i;
> +
> + tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan);
> + if (!tx_desc)
> + return (void *)tx_desc;
> +
> + for (i = 0; i < periods; i++) {
> + struct xilinx_dpdma_hw_desc *hw_desc;
> +
> + if (!IS_ALIGNED(buf_addr, XILINX_DPDMA_ALIGN_BYTES)) {
> + dev_err(chan->xdev->dev,
> + "buffer should be aligned at %d B\n",
> + XILINX_DPDMA_ALIGN_BYTES);
> + goto error;
> + }
> +
> + sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan);
> + if (!sw_desc)
> + goto error;
> +
> + xilinx_dpdma_sw_desc_set_dma_addrs(chan->xdev, sw_desc, last,
> + &buf_addr, 1);
> + hw_desc = &sw_desc->hw;
> + hw_desc->xfer_size = period_len;
> + hw_desc->hsize_stride =
> + FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK,
> + period_len) |
> + FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK,
> + period_len);
> + hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_PREEMBLE;
> + hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE;
> + hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR;
> +
> + list_add_tail(&sw_desc->node, &tx_desc->descriptors);
> +
> + buf_addr += period_len;
> + last = sw_desc;
> + }
> +
> + sw_desc = list_first_entry(&tx_desc->descriptors,
> + struct xilinx_dpdma_sw_desc, node);
> + last->hw.next_desc = lower_32_bits(sw_desc->dma_addr);
> + if (chan->xdev->ext_addr)
> + last->hw.addr_ext |=
> + FIELD_PREP(XILINX_DPDMA_DESC_ADDR_EXT_NEXT_ADDR_MASK,
> + upper_32_bits(sw_desc->dma_addr));
> +
> + last->hw.control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME;
> +
> + return vchan_tx_prep(&chan->vchan, &tx_desc->vdesc, flags);
> +
> +error:
> + xilinx_dpdma_chan_free_tx_desc(&tx_desc->vdesc);
> +
> + return NULL;
> +}
> +
> /**
> * xilinx_dpdma_chan_prep_interleaved_dma - Prepare an interleaved dma
> * descriptor
> @@ -1190,6 +1268,23 @@ static void xilinx_dpdma_chan_handle_err(struct xilinx_dpdma_chan *chan)
> /* -----------------------------------------------------------------------------
> * DMA Engine Operations
> */
> +static struct dma_async_tx_descriptor *
> +xilinx_dpdma_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t buf_addr,
> + size_t buf_len, size_t period_len,
> + enum dma_transfer_direction direction,
> + unsigned long flags)
> +{
> + struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
> +
> + if (direction != DMA_MEM_TO_DEV)
> + return NULL;
> +
> + if (buf_len % period_len)
> + return NULL;
> +
> + return xilinx_dpdma_chan_prep_cyclic(chan, buf_addr, buf_len,
> + period_len, flags);
> +}
>
> static struct dma_async_tx_descriptor *
> xilinx_dpdma_prep_interleaved_dma(struct dma_chan *dchan,
> @@ -1673,6 +1768,7 @@ static int xilinx_dpdma_probe(struct platform_device *pdev)
>
> dma_cap_set(DMA_SLAVE, ddev->cap_mask);
> dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
> + dma_cap_set(DMA_CYCLIC, ddev->cap_mask);
> dma_cap_set(DMA_INTERLEAVE, ddev->cap_mask);
> dma_cap_set(DMA_REPEAT, ddev->cap_mask);
> dma_cap_set(DMA_LOAD_EOT, ddev->cap_mask);
> @@ -1680,6 +1776,7 @@ static int xilinx_dpdma_probe(struct platform_device *pdev)
>
> ddev->device_alloc_chan_resources = xilinx_dpdma_alloc_chan_resources;
> ddev->device_free_chan_resources = xilinx_dpdma_free_chan_resources;
> + ddev->device_prep_dma_cyclic = xilinx_dpdma_prep_dma_cyclic;
> ddev->device_prep_interleaved_dma = xilinx_dpdma_prep_interleaved_dma;
> /* TODO: Can we achieve better granularity ? */
> ddev->device_tx_status = dma_cookie_status;


2024-03-27 14:57:33

by Tomi Valkeinen

[permalink] [raw]
Subject: Re: [PATCH v2 2/2] dmaengine: xilinx: dpdma: Add support for cyclic dma mode

On 28/02/2024 06:21, Vishal Sagar wrote:
> From: Rohit Visavalia <[email protected]>
>
> This patch adds support for DPDMA cyclic dma mode,
> DMA cyclic transfers are required by audio streaming.
>
> Signed-off-by: Rohit Visavalia <[email protected]>
> Signed-off-by: Radhey Shyam Pandey <[email protected]>
> Signed-off-by: Vishal Sagar <[email protected]>
>
> ---
> drivers/dma/xilinx/xilinx_dpdma.c | 97 +++++++++++++++++++++++++++++++
> 1 file changed, 97 insertions(+)
>
> diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
> index 28d9af8f00f0..88ad2f35538a 100644
> --- a/drivers/dma/xilinx/xilinx_dpdma.c
> +++ b/drivers/dma/xilinx/xilinx_dpdma.c
> @@ -669,6 +669,84 @@ static void xilinx_dpdma_chan_free_tx_desc(struct virt_dma_desc *vdesc)
> kfree(desc);
> }
>
> +/**
> + * xilinx_dpdma_chan_prep_cyclic - Prepare a cyclic dma descriptor
> + * @chan: DPDMA channel
> + * @buf_addr: buffer address
> + * @buf_len: buffer length
> + * @period_len: number of periods
> + * @flags: tx flags argument passed in to prepare function
> + *
> + * Prepare a tx descriptor incudling internal software/hardware descriptors
> + * for the given cyclic transaction.
> + *
> + * Return: A dma async tx descriptor on success, or NULL.
> + */
> +static struct dma_async_tx_descriptor *
> +xilinx_dpdma_chan_prep_cyclic(struct xilinx_dpdma_chan *chan,
> + dma_addr_t buf_addr, size_t buf_len,
> + size_t period_len, unsigned long flags)
> +{
> + struct xilinx_dpdma_tx_desc *tx_desc;
> + struct xilinx_dpdma_sw_desc *sw_desc, *last = NULL;
> + unsigned int periods = buf_len / period_len;
> + unsigned int i;
> +
> + tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan);
> + if (!tx_desc)
> + return (void *)tx_desc;

Just return NULL here?

> +
> + for (i = 0; i < periods; i++) {
> + struct xilinx_dpdma_hw_desc *hw_desc;
> +
> + if (!IS_ALIGNED(buf_addr, XILINX_DPDMA_ALIGN_BYTES)) {
> + dev_err(chan->xdev->dev,
> + "buffer should be aligned at %d B\n",
> + XILINX_DPDMA_ALIGN_BYTES);
> + goto error;
> + }
> +
> + sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan);
> + if (!sw_desc)
> + goto error;
> +
> + xilinx_dpdma_sw_desc_set_dma_addrs(chan->xdev, sw_desc, last,
> + &buf_addr, 1);
> + hw_desc = &sw_desc->hw;
> + hw_desc->xfer_size = period_len;
> + hw_desc->hsize_stride =
> + FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK,
> + period_len) |
> + FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK,
> + period_len);
> + hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_PREEMBLE;
> + hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE;
> + hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR;

You could:

hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_PREEMBLE |
XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE |
XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR;

Although... Shouldn't control always be 0 here, so you can just
hw_desc->control = ...;

> +
> + list_add_tail(&sw_desc->node, &tx_desc->descriptors);
> +
> + buf_addr += period_len;
> + last = sw_desc;
> + }
> +
> + sw_desc = list_first_entry(&tx_desc->descriptors,
> + struct xilinx_dpdma_sw_desc, node);
> + last->hw.next_desc = lower_32_bits(sw_desc->dma_addr);
> + if (chan->xdev->ext_addr)
> + last->hw.addr_ext |=
> + FIELD_PREP(XILINX_DPDMA_DESC_ADDR_EXT_NEXT_ADDR_MASK,
> + upper_32_bits(sw_desc->dma_addr));
> +
> + last->hw.control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME;
> +
> + return vchan_tx_prep(&chan->vchan, &tx_desc->vdesc, flags);
> +
> +error:
> + xilinx_dpdma_chan_free_tx_desc(&tx_desc->vdesc);
> +
> + return NULL;
> +}
> +
> /**
> * xilinx_dpdma_chan_prep_interleaved_dma - Prepare an interleaved dma
> * descriptor
> @@ -1190,6 +1268,23 @@ static void xilinx_dpdma_chan_handle_err(struct xilinx_dpdma_chan *chan)
> /* -----------------------------------------------------------------------------
> * DMA Engine Operations
> */
> +static struct dma_async_tx_descriptor *
> +xilinx_dpdma_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t buf_addr,
> + size_t buf_len, size_t period_len,
> + enum dma_transfer_direction direction,
> + unsigned long flags)
> +{
> + struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
> +
> + if (direction != DMA_MEM_TO_DEV)
> + return NULL;
> +
> + if (buf_len % period_len)
> + return NULL;
> +
> + return xilinx_dpdma_chan_prep_cyclic(chan, buf_addr, buf_len,
> + period_len, flags);

The parameters should be aligned above.

> +}
>
> static struct dma_async_tx_descriptor *
> xilinx_dpdma_prep_interleaved_dma(struct dma_chan *dchan,
> @@ -1673,6 +1768,7 @@ static int xilinx_dpdma_probe(struct platform_device *pdev)
>
> dma_cap_set(DMA_SLAVE, ddev->cap_mask);
> dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
> + dma_cap_set(DMA_CYCLIC, ddev->cap_mask);
> dma_cap_set(DMA_INTERLEAVE, ddev->cap_mask);
> dma_cap_set(DMA_REPEAT, ddev->cap_mask);
> dma_cap_set(DMA_LOAD_EOT, ddev->cap_mask);
> @@ -1680,6 +1776,7 @@ static int xilinx_dpdma_probe(struct platform_device *pdev)
>
> ddev->device_alloc_chan_resources = xilinx_dpdma_alloc_chan_resources;
> ddev->device_free_chan_resources = xilinx_dpdma_free_chan_resources;
> + ddev->device_prep_dma_cyclic = xilinx_dpdma_prep_dma_cyclic;
> ddev->device_prep_interleaved_dma = xilinx_dpdma_prep_interleaved_dma;
> /* TODO: Can we achieve better granularity ? */
> ddev->device_tx_status = dma_cookie_status;

While I'm not too familiar with dma engines, this looks fine to me. So,
other than the few cosmetics comments:

Reviewed-by: Tomi Valkeinen <[email protected]>

Tomi