2021-06-02 17:26:11

by Mehta, Sanju

[permalink] [raw]
Subject: [PATCH v9 2/3] dmaengine: ptdma: register PTDMA controller as a DMA resource

From: Sanjay R Mehta <[email protected]>

Register ptdma queue to Linux dmaengine framework as general-purpose
DMA channels.

Signed-off-by: Sanjay R Mehta <[email protected]>
---
drivers/dma/ptdma/Kconfig | 2 +
drivers/dma/ptdma/Makefile | 2 +-
drivers/dma/ptdma/ptdma-dev.c | 32 +++
drivers/dma/ptdma/ptdma-dmaengine.c | 460 ++++++++++++++++++++++++++++++++++++
drivers/dma/ptdma/ptdma.h | 31 +++
5 files changed, 526 insertions(+), 1 deletion(-)
create mode 100644 drivers/dma/ptdma/ptdma-dmaengine.c

diff --git a/drivers/dma/ptdma/Kconfig b/drivers/dma/ptdma/Kconfig
index e6b8ca1..b430edd 100644
--- a/drivers/dma/ptdma/Kconfig
+++ b/drivers/dma/ptdma/Kconfig
@@ -2,6 +2,8 @@
config AMD_PTDMA
tristate "AMD PassThru DMA Engine"
depends on X86_64 && PCI
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
help
Enable support for the AMD PTDMA controller. This controller
provides DMA capabilities to perform high bandwidth memory to
diff --git a/drivers/dma/ptdma/Makefile b/drivers/dma/ptdma/Makefile
index 320fa82..a528cb0 100644
--- a/drivers/dma/ptdma/Makefile
+++ b/drivers/dma/ptdma/Makefile
@@ -5,6 +5,6 @@

obj-$(CONFIG_AMD_PTDMA) += ptdma.o

-ptdma-objs := ptdma-dev.o
+ptdma-objs := ptdma-dev.o ptdma-dmaengine.o

ptdma-$(CONFIG_PCI) += ptdma-pci.o
diff --git a/drivers/dma/ptdma/ptdma-dev.c b/drivers/dma/ptdma/ptdma-dev.c
index 4617550..7122933 100644
--- a/drivers/dma/ptdma/ptdma-dev.c
+++ b/drivers/dma/ptdma/ptdma-dev.c
@@ -124,6 +124,26 @@ static inline void pt_core_enable_queue_interrupts(struct pt_device *pt)
iowrite32(SUPPORTED_INTERRUPTS, pt->cmd_q.reg_int_enable);
}

+static void pt_do_cmd_complete(unsigned long data)
+{
+ struct pt_tasklet_data *tdata = (struct pt_tasklet_data *)data;
+ struct pt_cmd *cmd = tdata->cmd;
+ struct pt_cmd_queue *cmd_q = &cmd->pt->cmd_q;
+ u32 tail;
+
+ tail = lower_32_bits(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE);
+ if (cmd_q->cmd_error) {
+ /*
+ * Log the error and flush the queue by
+ * moving the head pointer
+ */
+ pt_log_error(cmd_q->pt, cmd_q->cmd_error);
+ iowrite32(tail, cmd_q->reg_head_lo);
+ }
+
+ cmd->pt_cmd_callback(cmd->data, cmd->ret);
+}
+
static irqreturn_t pt_core_irq_handler(int irq, void *data)
{
struct pt_device *pt = data;
@@ -147,6 +167,7 @@ static irqreturn_t pt_core_irq_handler(int irq, void *data)
}

pt_core_enable_queue_interrupts(pt);
+ pt_do_cmd_complete((ulong)&pt->tdata);

return IRQ_HANDLED;
}
@@ -246,8 +267,16 @@ int pt_core_init(struct pt_device *pt)

pt_core_enable_queue_interrupts(pt);

+ /* Register the DMA engine support */
+ ret = pt_dmaengine_register(pt);
+ if (ret)
+ goto e_dmaengine;
+
return 0;

+e_dmaengine:
+ free_irq(pt->pt_irq, pt);
+
e_dma_alloc:
dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase, cmd_q->qbase_dma);

@@ -264,6 +293,9 @@ void pt_core_destroy(struct pt_device *pt)
struct pt_cmd_queue *cmd_q = &pt->cmd_q;
struct pt_cmd *cmd;

+ /* Unregister the DMA engine */
+ pt_dmaengine_unregister(pt);
+
/* Disable and clear interrupts */
pt_core_disable_queue_interrupts(pt);

diff --git a/drivers/dma/ptdma/ptdma-dmaengine.c b/drivers/dma/ptdma/ptdma-dmaengine.c
new file mode 100644
index 0000000..c67a1ab
--- /dev/null
+++ b/drivers/dma/ptdma/ptdma-dmaengine.c
@@ -0,0 +1,460 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Passthrough DMA device driver
+ * -- Based on the CCP driver
+ *
+ * Copyright (C) 2016,2021 Advanced Micro Devices, Inc.
+ *
+ * Author: Sanjay R Mehta <[email protected]>
+ * Author: Gary R Hook <[email protected]>
+ */
+
+#include "ptdma.h"
+#include "../dmaengine.h"
+#include "../virt-dma.h"
+
+static inline struct pt_dma_chan *to_pt_chan(struct dma_chan *dma_chan)
+{
+ return container_of(dma_chan, struct pt_dma_chan, vc.chan);
+}
+
+static inline struct pt_dma_desc *to_pt_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct pt_dma_desc, vd);
+}
+
+static void pt_free_cmd_resources(struct pt_device *pt,
+ struct list_head *list)
+{
+ struct pt_dma_cmd *cmd, *ctmp;
+
+ list_for_each_entry_safe(cmd, ctmp, list, entry) {
+ list_del(&cmd->entry);
+ kmem_cache_free(pt->dma_cmd_cache, cmd);
+ }
+}
+
+static void pt_free_chan_resources(struct dma_chan *dma_chan)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+
+ vchan_free_chan_resources(&chan->vc);
+}
+
+static void pt_synchronize(struct dma_chan *dma_chan)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+
+ vchan_synchronize(&chan->vc);
+}
+
+static void pt_do_cleanup(struct virt_dma_desc *vd)
+{
+ struct pt_dma_desc *desc = to_pt_desc(vd);
+ struct pt_device *pt = desc->pt;
+
+ pt_free_cmd_resources(pt, &desc->cmdlist);
+ kmem_cache_free(pt->dma_desc_cache, desc);
+}
+
+static int pt_issue_next_cmd(struct pt_dma_desc *desc)
+{
+ struct pt_passthru_engine *pt_engine;
+ struct pt_dma_cmd *cmd;
+ struct pt_device *pt;
+ struct pt_cmd *pt_cmd;
+ struct pt_cmd_queue *cmd_q;
+
+ cmd = list_first_entry(&desc->cmdlist, struct pt_dma_cmd, entry);
+ desc->issued_to_hw = 1;
+
+ pt_cmd = &cmd->pt_cmd;
+ pt = pt_cmd->pt;
+ cmd_q = &pt->cmd_q;
+ pt_engine = &pt_cmd->passthru;
+
+ pt->tdata.cmd = pt_cmd;
+
+ /* Execute the command */
+ pt_cmd->ret = pt_core_perform_passthru(cmd_q, pt_engine);
+
+ return 0;
+}
+
+static void pt_free_active_cmd(struct pt_dma_desc *desc)
+{
+ struct pt_dma_cmd *cmd = NULL;
+
+ if (desc->issued_to_hw)
+ cmd = list_first_entry_or_null(&desc->cmdlist, struct pt_dma_cmd,
+ entry);
+ if (!cmd)
+ return;
+
+ list_del(&cmd->entry);
+ kmem_cache_free(desc->pt->dma_cmd_cache, cmd);
+}
+
+static struct pt_dma_desc *pt_next_dma_desc(struct pt_dma_chan *chan)
+{
+ /* Get the next DMA descriptor on the active list */
+ struct virt_dma_desc *vd = vchan_next_desc(&chan->vc);
+
+ return vd ? to_pt_desc(vd) : NULL;
+}
+
+static struct pt_dma_desc *__pt_next_dma_desc(struct pt_dma_chan *chan)
+{
+ /* Get the next DMA descriptor on the active list */
+ struct virt_dma_desc *vd = vchan_next_desc(&chan->vc);
+
+ if (list_empty(&chan->vc.desc_submitted))
+ return NULL;
+
+ vd = list_empty(&chan->vc.desc_issued) ?
+ list_first_entry(&chan->vc.desc_submitted,
+ struct virt_dma_desc, node) : NULL;
+
+ vchan_issue_pending(&chan->vc);
+
+ return vd ? to_pt_desc(vd) : NULL;
+}
+
+static struct pt_dma_desc *pt_handle_active_desc(struct pt_dma_chan *chan,
+ struct pt_dma_desc *desc)
+{
+ struct dma_async_tx_descriptor *tx_desc;
+ struct virt_dma_desc *vd;
+ unsigned long flags;
+
+ /* Loop over descriptors until one is found with commands */
+ do {
+ if (desc) {
+ /* Remove the DMA command from the list and free it */
+ pt_free_active_cmd(desc);
+ if (!desc->issued_to_hw) {
+ /* No errors, keep going */
+ if (desc->status != DMA_ERROR)
+ return desc;
+ /* Error, free remaining commands and move on */
+ pt_free_cmd_resources(desc->pt,
+ &desc->cmdlist);
+ }
+
+ tx_desc = &desc->vd.tx;
+ vd = &desc->vd;
+ } else {
+ tx_desc = NULL;
+ }
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+
+ if (desc) {
+ if (desc->status != DMA_ERROR)
+ desc->status = DMA_COMPLETE;
+
+ dma_cookie_complete(tx_desc);
+ dma_descriptor_unmap(tx_desc);
+ list_del(&desc->vd.node);
+ }
+
+ desc = pt_next_dma_desc(chan);
+
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ if (tx_desc) {
+ dmaengine_desc_get_callback_invoke(tx_desc, NULL);
+ dma_run_dependencies(tx_desc);
+ vchan_vdesc_fini(vd);
+ }
+ } while (desc);
+
+ return NULL;
+}
+
+static void pt_cmd_callback(void *data, int err)
+{
+ struct pt_dma_desc *desc = data;
+ struct dma_chan *dma_chan;
+ struct pt_dma_chan *chan;
+ int ret;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ dma_chan = desc->vd.tx.chan;
+ chan = to_pt_chan(dma_chan);
+
+ if (err)
+ desc->status = DMA_ERROR;
+
+ while (true) {
+ /* Check for DMA descriptor completion */
+ desc = pt_handle_active_desc(chan, desc);
+
+ /* Don't submit cmd if no descriptor or DMA is paused */
+ if (!desc)
+ break;
+
+ ret = pt_issue_next_cmd(desc);
+ if (!ret)
+ break;
+
+ desc->status = DMA_ERROR;
+ }
+}
+
+static struct pt_dma_cmd *pt_alloc_dma_cmd(struct pt_dma_chan *chan)
+{
+ struct pt_dma_cmd *cmd;
+
+ cmd = kmem_cache_zalloc(chan->pt->dma_cmd_cache, GFP_NOWAIT);
+
+ return cmd;
+}
+
+static struct pt_dma_desc *pt_alloc_dma_desc(struct pt_dma_chan *chan,
+ unsigned long flags)
+{
+ struct pt_dma_desc *desc;
+
+ desc = kmem_cache_zalloc(chan->pt->dma_desc_cache, GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ vchan_tx_prep(&chan->vc, &desc->vd, flags);
+
+ desc->pt = chan->pt;
+ desc->issued_to_hw = 0;
+ INIT_LIST_HEAD(&desc->cmdlist);
+ desc->status = DMA_IN_PROGRESS;
+
+ return desc;
+}
+
+static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan,
+ dma_addr_t dst,
+ dma_addr_t src,
+ unsigned int len,
+ unsigned long flags)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ struct pt_passthru_engine *pt_engine;
+ struct pt_device *pt = chan->pt;
+ struct pt_dma_desc *desc;
+ struct pt_dma_cmd *cmd;
+ struct pt_cmd *pt_cmd;
+
+ desc = pt_alloc_dma_desc(chan, flags);
+ if (!desc)
+ return NULL;
+
+ cmd = pt_alloc_dma_cmd(chan);
+ if (!cmd)
+ goto err;
+
+ pt_cmd = &cmd->pt_cmd;
+ pt_cmd->pt = chan->pt;
+ pt_engine = &pt_cmd->passthru;
+ pt_cmd->engine = PT_ENGINE_PASSTHRU;
+ pt_engine->src_dma = src;
+ pt_engine->dst_dma = dst;
+ pt_engine->src_len = len;
+ pt_cmd->pt_cmd_callback = pt_cmd_callback;
+ pt_cmd->data = desc;
+
+ list_add_tail(&cmd->entry, &desc->cmdlist);
+
+ desc->len = len;
+
+ if (list_empty(&desc->cmdlist))
+ goto err;
+
+ return desc;
+
+err:
+ pt_free_cmd_resources(pt, &desc->cmdlist);
+ kmem_cache_free(pt->dma_desc_cache, desc);
+
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *
+pt_prep_dma_memcpy(struct dma_chan *dma_chan, dma_addr_t dst,
+ dma_addr_t src, size_t len, unsigned long flags)
+{
+ struct pt_dma_desc *desc;
+
+ desc = pt_create_desc(dma_chan, dst, src, len, flags);
+ if (!desc)
+ return NULL;
+
+ return &desc->vd.tx;
+}
+
+static struct dma_async_tx_descriptor *
+pt_prep_dma_interrupt(struct dma_chan *dma_chan, unsigned long flags)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ struct pt_dma_desc *desc;
+
+ desc = pt_alloc_dma_desc(chan, flags);
+ if (!desc)
+ return NULL;
+
+ return &desc->vd.tx;
+}
+
+static void pt_issue_pending(struct dma_chan *dma_chan)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ struct pt_dma_desc *desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+
+ desc = __pt_next_dma_desc(chan);
+
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ /* If there was nothing active, start processing */
+ if (desc)
+ pt_cmd_callback(desc, 0);
+}
+
+static int pt_pause(struct dma_chan *dma_chan)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+ pt_stop_queue(&chan->pt->cmd_q);
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ return 0;
+}
+
+static int pt_resume(struct dma_chan *dma_chan)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ struct pt_dma_desc *desc = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+ pt_start_queue(&chan->pt->cmd_q);
+ desc = __pt_next_dma_desc(chan);
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ /* If there was something active, re-start */
+ if (desc)
+ pt_cmd_callback(desc, 0);
+
+ return 0;
+}
+
+static int pt_terminate_all(struct dma_chan *dma_chan)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+
+ vchan_free_chan_resources(&chan->vc);
+
+ return 0;
+}
+
+int pt_dmaengine_register(struct pt_device *pt)
+{
+ struct pt_dma_chan *chan;
+ struct dma_device *dma_dev = &pt->dma_dev;
+ char *cmd_cache_name;
+ char *desc_cache_name;
+ int ret;
+
+ pt->pt_dma_chan = devm_kzalloc(pt->dev, sizeof(*pt->pt_dma_chan),
+ GFP_KERNEL);
+ if (!pt->pt_dma_chan)
+ return -ENOMEM;
+
+ cmd_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
+ "%s-dmaengine-cmd-cache",
+ pt->name);
+ if (!cmd_cache_name)
+ return -ENOMEM;
+
+ pt->dma_cmd_cache = kmem_cache_create(cmd_cache_name,
+ sizeof(struct pt_dma_cmd),
+ sizeof(void *),
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!pt->dma_cmd_cache)
+ return -ENOMEM;
+
+ desc_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
+ "%s-dmaengine-desc-cache",
+ pt->name);
+ if (!desc_cache_name) {
+ ret = -ENOMEM;
+ goto err_cache;
+ }
+
+ pt->dma_desc_cache = kmem_cache_create(desc_cache_name,
+ sizeof(struct pt_dma_desc),
+ sizeof(void *),
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!pt->dma_desc_cache) {
+ ret = -ENOMEM;
+ goto err_cache;
+ }
+
+ dma_dev->dev = pt->dev;
+ dma_dev->src_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
+ dma_dev->dst_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
+ dma_dev->directions = DMA_MEM_TO_MEM;
+ dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+ dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
+ dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
+
+ INIT_LIST_HEAD(&dma_dev->channels);
+
+ chan = pt->pt_dma_chan;
+ chan->pt = pt;
+
+ /* Set base and prep routines */
+ dma_dev->device_free_chan_resources = pt_free_chan_resources;
+ dma_dev->device_prep_dma_memcpy = pt_prep_dma_memcpy;
+ dma_dev->device_prep_dma_interrupt = pt_prep_dma_interrupt;
+ dma_dev->device_issue_pending = pt_issue_pending;
+ dma_dev->device_tx_status = dma_cookie_status;
+ dma_dev->device_pause = pt_pause;
+ dma_dev->device_resume = pt_resume;
+ dma_dev->device_terminate_all = pt_terminate_all;
+ dma_dev->device_synchronize = pt_synchronize;
+
+ chan->vc.desc_free = pt_do_cleanup;
+ vchan_init(&chan->vc, dma_dev);
+
+ dma_set_mask_and_coherent(pt->dev, DMA_BIT_MASK(64));
+
+ ret = dma_async_device_register(dma_dev);
+ if (ret)
+ goto err_reg;
+
+ return 0;
+
+err_reg:
+ kmem_cache_destroy(pt->dma_desc_cache);
+
+err_cache:
+ kmem_cache_destroy(pt->dma_cmd_cache);
+
+ return ret;
+}
+
+void pt_dmaengine_unregister(struct pt_device *pt)
+{
+ struct dma_device *dma_dev = &pt->dma_dev;
+
+ dma_async_device_unregister(dma_dev);
+
+ kmem_cache_destroy(pt->dma_desc_cache);
+ kmem_cache_destroy(pt->dma_cmd_cache);
+}
diff --git a/drivers/dma/ptdma/ptdma.h b/drivers/dma/ptdma/ptdma.h
index a89a74ac..bc6676d 100644
--- a/drivers/dma/ptdma/ptdma.h
+++ b/drivers/dma/ptdma/ptdma.h
@@ -14,6 +14,7 @@
#define __PT_DEV_H__

#include <linux/device.h>
+#include <linux/dmaengine.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
@@ -21,6 +22,8 @@
#include <linux/wait.h>
#include <linux/dmapool.h>

+#include "../virt-dma.h"
+
#define MAX_PT_NAME_LEN 16
#define MAX_DMAPOOL_NAME_LEN 32

@@ -173,6 +176,25 @@ struct pt_cmd {
void *data;
};

+struct pt_dma_cmd {
+ struct list_head entry;
+ struct pt_cmd pt_cmd;
+};
+
+struct pt_dma_desc {
+ struct virt_dma_desc vd;
+ struct pt_device *pt;
+ struct list_head cmdlist;
+ enum dma_status status;
+ size_t len;
+ bool issued_to_hw;
+};
+
+struct pt_dma_chan {
+ struct virt_dma_chan vc;
+ struct pt_device *pt;
+};
+
struct pt_cmd_queue {
struct pt_device *pt;

@@ -241,6 +263,12 @@ struct pt_device {
*/
struct pt_cmd_queue cmd_q;

+ /* Support for the DMA Engine capabilities */
+ struct dma_device dma_dev;
+ struct pt_dma_chan *pt_dma_chan;
+ struct kmem_cache *dma_cmd_cache;
+ struct kmem_cache *dma_desc_cache;
+
wait_queue_head_t lsb_queue;

struct pt_tasklet_data tdata;
@@ -293,6 +321,9 @@ struct pt_dev_vdata {
const unsigned int bar;
};

+int pt_dmaengine_register(struct pt_device *pt);
+void pt_dmaengine_unregister(struct pt_device *pt);
+
int pt_core_init(struct pt_device *pt);
void pt_core_destroy(struct pt_device *pt);

--
2.7.4


2021-06-08 19:39:12

by Vinod Koul

[permalink] [raw]
Subject: Re: [PATCH v9 2/3] dmaengine: ptdma: register PTDMA controller as a DMA resource

On 02-06-21, 12:22, Sanjay R Mehta wrote:

> +static void pt_do_cmd_complete(unsigned long data)
> +{
> + struct pt_tasklet_data *tdata = (struct pt_tasklet_data *)data;
> + struct pt_cmd *cmd = tdata->cmd;
> + struct pt_cmd_queue *cmd_q = &cmd->pt->cmd_q;
> + u32 tail;
> +
> + tail = lower_32_bits(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE);

why extract tail in non error case?

> +static int pt_issue_next_cmd(struct pt_dma_desc *desc)
> +{
> + struct pt_passthru_engine *pt_engine;
> + struct pt_dma_cmd *cmd;
> + struct pt_device *pt;
> + struct pt_cmd *pt_cmd;
> + struct pt_cmd_queue *cmd_q;
> +
> + cmd = list_first_entry(&desc->cmdlist, struct pt_dma_cmd, entry);
> + desc->issued_to_hw = 1;

Why do you need this, the descriptor should be in vc.desc_issued list

> +static void pt_free_active_cmd(struct pt_dma_desc *desc)
> +{
> + struct pt_dma_cmd *cmd = NULL;
> +
> + if (desc->issued_to_hw)
> + cmd = list_first_entry_or_null(&desc->cmdlist, struct pt_dma_cmd,
> + entry);

single line pls and use lists provided..


> +static struct pt_dma_desc *pt_handle_active_desc(struct pt_dma_chan *chan,
> + struct pt_dma_desc *desc)
> +{
> + struct dma_async_tx_descriptor *tx_desc;
> + struct virt_dma_desc *vd;
> + unsigned long flags;
> +
> + /* Loop over descriptors until one is found with commands */
> + do {
> + if (desc) {
> + /* Remove the DMA command from the list and free it */
> + pt_free_active_cmd(desc);
> + if (!desc->issued_to_hw) {
> + /* No errors, keep going */
> + if (desc->status != DMA_ERROR)
> + return desc;
> + /* Error, free remaining commands and move on */
> + pt_free_cmd_resources(desc->pt,
> + &desc->cmdlist);

single line again

> +static struct pt_dma_desc *pt_alloc_dma_desc(struct pt_dma_chan *chan,
> + unsigned long flags)
> +{
> + struct pt_dma_desc *desc;
> +
> + desc = kmem_cache_zalloc(chan->pt->dma_desc_cache, GFP_NOWAIT);
> + if (!desc)
> + return NULL;
> +
> + vchan_tx_prep(&chan->vc, &desc->vd, flags);
> +
> + desc->pt = chan->pt;
> + desc->issued_to_hw = 0;
> + INIT_LIST_HEAD(&desc->cmdlist);

why do you need your own list, the lists in vc should suffice?

> +static int pt_resume(struct dma_chan *dma_chan)
> +{
> + struct pt_dma_chan *chan = to_pt_chan(dma_chan);
> + struct pt_dma_desc *desc = NULL;
> + unsigned long flags;
> +
> + spin_lock_irqsave(&chan->vc.lock, flags);
> + pt_start_queue(&chan->pt->cmd_q);
> + desc = __pt_next_dma_desc(chan);
> + spin_unlock_irqrestore(&chan->vc.lock, flags);
> +
> + /* If there was something active, re-start */
> + if (desc)
> + pt_cmd_callback(desc, 0);

this doesn't sound correct. In pause yoy stop the queue, so start of the
queue should be done here... Why grab a descriptor?

> +static int pt_terminate_all(struct dma_chan *dma_chan)
> +{
> + struct pt_dma_chan *chan = to_pt_chan(dma_chan);
> +
> + vchan_free_chan_resources(&chan->vc);

what about the descriptors, are you not going to clear the lists and
free them..

> +int pt_dmaengine_register(struct pt_device *pt)
> +{
> + struct pt_dma_chan *chan;
> + struct dma_device *dma_dev = &pt->dma_dev;
> + char *cmd_cache_name;
> + char *desc_cache_name;
> + int ret;
> +
> + pt->pt_dma_chan = devm_kzalloc(pt->dev, sizeof(*pt->pt_dma_chan),
> + GFP_KERNEL);
> + if (!pt->pt_dma_chan)
> + return -ENOMEM;
> +
> + cmd_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
> + "%s-dmaengine-cmd-cache",
> + pt->name);
> + if (!cmd_cache_name)
> + return -ENOMEM;
> +
> + pt->dma_cmd_cache = kmem_cache_create(cmd_cache_name,
> + sizeof(struct pt_dma_cmd),
> + sizeof(void *),
> + SLAB_HWCACHE_ALIGN, NULL);
> + if (!pt->dma_cmd_cache)
> + return -ENOMEM;
> +
> + desc_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
> + "%s-dmaengine-desc-cache",
> + pt->name);
> + if (!desc_cache_name) {
> + ret = -ENOMEM;
> + goto err_cache;
> + }
> +
> + pt->dma_desc_cache = kmem_cache_create(desc_cache_name,
> + sizeof(struct pt_dma_desc),
> + sizeof(void *),

sizeof void ptr?

> + SLAB_HWCACHE_ALIGN, NULL);
> + if (!pt->dma_desc_cache) {
> + ret = -ENOMEM;
> + goto err_cache;
> + }
> +
> + dma_dev->dev = pt->dev;
> + dma_dev->src_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
> + dma_dev->dst_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
> + dma_dev->directions = DMA_MEM_TO_MEM;
> + dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
> + dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
> + dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
> + dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);

Why DMA_PRIVATE ? this is a dma mempcy controller ...
--
~Vinod

2021-06-15 11:35:16

by Sanjay R Mehta

[permalink] [raw]
Subject: Re: [PATCH v9 2/3] dmaengine: ptdma: register PTDMA controller as a DMA resource



On 6/9/2021 12:26 AM, Vinod Koul wrote:

[snipped]

>> +static struct pt_dma_desc *pt_alloc_dma_desc(struct pt_dma_chan *chan,
>> + unsigned long flags)
>> +{
>> + struct pt_dma_desc *desc;
>> +
>> + desc = kmem_cache_zalloc(chan->pt->dma_desc_cache, GFP_NOWAIT);
>> + if (!desc)
>> + return NULL;
>> +
>> + vchan_tx_prep(&chan->vc, &desc->vd, flags);
>> +
>> + desc->pt = chan->pt;
>> + desc->issued_to_hw = 0;
>> + INIT_LIST_HEAD(&desc->cmdlist);
>
> why do you need your own list, the lists in vc should suffice?
>

Do you think this should be a major blocker for pulling this series in 5.14?
Would you be okay to accept this change in the subsequent driver updates?

>> +static int pt_resume(struct dma_chan *dma_chan)
>> +{
>> + struct pt_dma_chan *chan = to_pt_chan(dma_chan);
>> + struct pt_dma_desc *desc = NULL;
>> + unsigned long flags;
>> +
>> + spin_lock_irqsave(&chan->vc.lock, flags);
>> + pt_start_queue(&chan->pt->cmd_q);
>> + desc = __pt_next_dma_desc(chan);
>> + spin_unlock_irqrestore(&chan->vc.lock, flags);
>> +
>> + /* If there was something active, re-start */
>> + if (desc)
>> + pt_cmd_callback(desc, 0);
>
> this doesn't sound correct. In pause yoy stop the queue, so start of the
> queue should be done here... Why grab a descriptor?
>
>> +static int pt_terminate_all(struct dma_chan *dma_chan)
>> +{
>> + struct pt_dma_chan *chan = to_pt_chan(dma_chan);
>> +
>> + vchan_free_chan_resources(&chan->vc);
>
> what about the descriptors, are you not going to clear the lists and
> free them..
>
>> +int pt_dmaengine_register(struct pt_device *pt)
>> +{
>> + struct pt_dma_chan *chan;
>> + struct dma_device *dma_dev = &pt->dma_dev;
>> + char *cmd_cache_name;
>> + char *desc_cache_name;
>> + int ret;
>> +
>> + pt->pt_dma_chan = devm_kzalloc(pt->dev, sizeof(*pt->pt_dma_chan),
>> + GFP_KERNEL);
>> + if (!pt->pt_dma_chan)
>> + return -ENOMEM;
>> +
>> + cmd_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
>> + "%s-dmaengine-cmd-cache",
>> + pt->name);
>> + if (!cmd_cache_name)
>> + return -ENOMEM;
>> +
>> + pt->dma_cmd_cache = kmem_cache_create(cmd_cache_name,
>> + sizeof(struct pt_dma_cmd),
>> + sizeof(void *),
>> + SLAB_HWCACHE_ALIGN, NULL);
>> + if (!pt->dma_cmd_cache)
>> + return -ENOMEM;
>> +
>> + desc_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
>> + "%s-dmaengine-desc-cache",
>> + pt->name);
>> + if (!desc_cache_name) {
>> + ret = -ENOMEM;
>> + goto err_cache;
>> + }
>> +
>> + pt->dma_desc_cache = kmem_cache_create(desc_cache_name,
>> + sizeof(struct pt_dma_desc),
>> + sizeof(void *),
>
> sizeof void ptr?
>
>> + SLAB_HWCACHE_ALIGN, NULL);
>> + if (!pt->dma_desc_cache) {
>> + ret = -ENOMEM;
>> + goto err_cache;
>> + }
>> +
>> + dma_dev->dev = pt->dev;
>> + dma_dev->src_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
>> + dma_dev->dst_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
>> + dma_dev->directions = DMA_MEM_TO_MEM;
>> + dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
>> + dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
>> + dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
>> + dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
>
> Why DMA_PRIVATE ? this is a dma mempcy controller ...

This DMA controller is intended to be used with AMD Non-Transparent
Bridge devices and not for general purpose peripheral DMA. Hence marking
it as DMA_PRIVATE.

- Sanjay

2021-06-16 04:19:33

by Vinod Koul

[permalink] [raw]
Subject: Re: [PATCH v9 2/3] dmaengine: ptdma: register PTDMA controller as a DMA resource

On 15-06-21, 17:04, Sanjay R Mehta wrote:
>
>
> On 6/9/2021 12:26 AM, Vinod Koul wrote:
>
> [snipped]
>
> >> +static struct pt_dma_desc *pt_alloc_dma_desc(struct pt_dma_chan *chan,
> >> + unsigned long flags)
> >> +{
> >> + struct pt_dma_desc *desc;
> >> +
> >> + desc = kmem_cache_zalloc(chan->pt->dma_desc_cache, GFP_NOWAIT);
> >> + if (!desc)
> >> + return NULL;
> >> +
> >> + vchan_tx_prep(&chan->vc, &desc->vd, flags);
> >> +
> >> + desc->pt = chan->pt;
> >> + desc->issued_to_hw = 0;
> >> + INIT_LIST_HEAD(&desc->cmdlist);
> >
> > why do you need your own list, the lists in vc should suffice?
> >
>
> Do you think this should be a major blocker for pulling this series in 5.14?
> Would you be okay to accept this change in the subsequent driver updates?

Sorry that is not how upstream works, I would like things to be better
before we merge this

>
> >> +static int pt_resume(struct dma_chan *dma_chan)
> >> +{
> >> + struct pt_dma_chan *chan = to_pt_chan(dma_chan);
> >> + struct pt_dma_desc *desc = NULL;
> >> + unsigned long flags;
> >> +
> >> + spin_lock_irqsave(&chan->vc.lock, flags);
> >> + pt_start_queue(&chan->pt->cmd_q);
> >> + desc = __pt_next_dma_desc(chan);
> >> + spin_unlock_irqrestore(&chan->vc.lock, flags);
> >> +
> >> + /* If there was something active, re-start */
> >> + if (desc)
> >> + pt_cmd_callback(desc, 0);
> >
> > this doesn't sound correct. In pause yoy stop the queue, so start of the
> > queue should be done here... Why grab a descriptor?
> >
> >> +static int pt_terminate_all(struct dma_chan *dma_chan)
> >> +{
> >> + struct pt_dma_chan *chan = to_pt_chan(dma_chan);
> >> +
> >> + vchan_free_chan_resources(&chan->vc);
> >
> > what about the descriptors, are you not going to clear the lists and
> > free them..
> >
> >> +int pt_dmaengine_register(struct pt_device *pt)
> >> +{
> >> + struct pt_dma_chan *chan;
> >> + struct dma_device *dma_dev = &pt->dma_dev;
> >> + char *cmd_cache_name;
> >> + char *desc_cache_name;
> >> + int ret;
> >> +
> >> + pt->pt_dma_chan = devm_kzalloc(pt->dev, sizeof(*pt->pt_dma_chan),
> >> + GFP_KERNEL);
> >> + if (!pt->pt_dma_chan)
> >> + return -ENOMEM;
> >> +
> >> + cmd_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
> >> + "%s-dmaengine-cmd-cache",
> >> + pt->name);
> >> + if (!cmd_cache_name)
> >> + return -ENOMEM;
> >> +
> >> + pt->dma_cmd_cache = kmem_cache_create(cmd_cache_name,
> >> + sizeof(struct pt_dma_cmd),
> >> + sizeof(void *),
> >> + SLAB_HWCACHE_ALIGN, NULL);
> >> + if (!pt->dma_cmd_cache)
> >> + return -ENOMEM;
> >> +
> >> + desc_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
> >> + "%s-dmaengine-desc-cache",
> >> + pt->name);
> >> + if (!desc_cache_name) {
> >> + ret = -ENOMEM;
> >> + goto err_cache;
> >> + }
> >> +
> >> + pt->dma_desc_cache = kmem_cache_create(desc_cache_name,
> >> + sizeof(struct pt_dma_desc),
> >> + sizeof(void *),
> >
> > sizeof void ptr?

This and many more comments are left not replied, do you agree to them,
do you disagree, hard to tell from silence..

> >
> >> + SLAB_HWCACHE_ALIGN, NULL);
> >> + if (!pt->dma_desc_cache) {
> >> + ret = -ENOMEM;
> >> + goto err_cache;
> >> + }
> >> +
> >> + dma_dev->dev = pt->dev;
> >> + dma_dev->src_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
> >> + dma_dev->dst_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
> >> + dma_dev->directions = DMA_MEM_TO_MEM;
> >> + dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
> >> + dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
> >> + dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
> >> + dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
> >
> > Why DMA_PRIVATE ? this is a dma mempcy controller ...
>
> This DMA controller is intended to be used with AMD Non-Transparent
> Bridge devices and not for general purpose peripheral DMA. Hence marking
> it as DMA_PRIVATE.

Okay, maybe add a comment so that people would know

--
~Vinod

2021-06-16 05:24:37

by Sanjay R Mehta

[permalink] [raw]
Subject: Re: [PATCH v9 2/3] dmaengine: ptdma: register PTDMA controller as a DMA resource



On 6/16/2021 9:48 AM, Vinod Koul wrote:
> [CAUTION: External Email]
>
> On 15-06-21, 17:04, Sanjay R Mehta wrote:
>>
>>
>> On 6/9/2021 12:26 AM, Vinod Koul wrote:
>>
>> [snipped]
>>
>>>> +static struct pt_dma_desc *pt_alloc_dma_desc(struct pt_dma_chan *chan,
>>>> + unsigned long flags)
>>>> +{
>>>> + struct pt_dma_desc *desc;
>>>> +
>>>> + desc = kmem_cache_zalloc(chan->pt->dma_desc_cache, GFP_NOWAIT);
>>>> + if (!desc)
>>>> + return NULL;
>>>> +
>>>> + vchan_tx_prep(&chan->vc, &desc->vd, flags);
>>>> +
>>>> + desc->pt = chan->pt;
>>>> + desc->issued_to_hw = 0;
>>>> + INIT_LIST_HEAD(&desc->cmdlist);
>>>
>>> why do you need your own list, the lists in vc should suffice?
>>>
>>
>> Do you think this should be a major blocker for pulling this series in 5.14?
>> Would you be okay to accept this change in the subsequent driver updates?
>
> Sorry that is not how upstream works, I would like things to be better
> before we merge this
>

Sure Vinod, I will fix this and send the change in next version.

>>
>>>> +static int pt_resume(struct dma_chan *dma_chan)
>>>> +{
>>>> + struct pt_dma_chan *chan = to_pt_chan(dma_chan);
>>>> + struct pt_dma_desc *desc = NULL;
>>>> + unsigned long flags;
>>>> +
>>>> + spin_lock_irqsave(&chan->vc.lock, flags);
>>>> + pt_start_queue(&chan->pt->cmd_q);
>>>> + desc = __pt_next_dma_desc(chan);
>>>> + spin_unlock_irqrestore(&chan->vc.lock, flags);
>>>> +
>>>> + /* If there was something active, re-start */
>>>> + if (desc)
>>>> + pt_cmd_callback(desc, 0);
>>>
>>> this doesn't sound correct. In pause yoy stop the queue, so start of the
>>> queue should be done here... Why grab a descriptor?
>>>
>>>> +static int pt_terminate_all(struct dma_chan *dma_chan)
>>>> +{
>>>> + struct pt_dma_chan *chan = to_pt_chan(dma_chan);
>>>> +
>>>> + vchan_free_chan_resources(&chan->vc);
>>>
>>> what about the descriptors, are you not going to clear the lists and
>>> free them..
>>>
>>>> +int pt_dmaengine_register(struct pt_device *pt)
>>>> +{
>>>> + struct pt_dma_chan *chan;
>>>> + struct dma_device *dma_dev = &pt->dma_dev;
>>>> + char *cmd_cache_name;
>>>> + char *desc_cache_name;
>>>> + int ret;
>>>> +
>>>> + pt->pt_dma_chan = devm_kzalloc(pt->dev, sizeof(*pt->pt_dma_chan),
>>>> + GFP_KERNEL);
>>>> + if (!pt->pt_dma_chan)
>>>> + return -ENOMEM;
>>>> +
>>>> + cmd_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
>>>> + "%s-dmaengine-cmd-cache",
>>>> + pt->name);
>>>> + if (!cmd_cache_name)
>>>> + return -ENOMEM;
>>>> +
>>>> + pt->dma_cmd_cache = kmem_cache_create(cmd_cache_name,
>>>> + sizeof(struct pt_dma_cmd),
>>>> + sizeof(void *),
>>>> + SLAB_HWCACHE_ALIGN, NULL);
>>>> + if (!pt->dma_cmd_cache)
>>>> + return -ENOMEM;
>>>> +
>>>> + desc_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
>>>> + "%s-dmaengine-desc-cache",
>>>> + pt->name);
>>>> + if (!desc_cache_name) {
>>>> + ret = -ENOMEM;
>>>> + goto err_cache;
>>>> + }
>>>> +
>>>> + pt->dma_desc_cache = kmem_cache_create(desc_cache_name,
>>>> + sizeof(struct pt_dma_desc),
>>>> + sizeof(void *),
>>>
>>> sizeof void ptr?
>
> This and many more comments are left not replied, do you agree to them,
> do you disagree, hard to tell from silence..
>

Yes, I agree with all other comments and will send the changes in next
version of the patch series.

>>>
>>>> + SLAB_HWCACHE_ALIGN, NULL);
>>>> + if (!pt->dma_desc_cache) {
>>>> + ret = -ENOMEM;
>>>> + goto err_cache;
>>>> + }
>>>> +
>>>> + dma_dev->dev = pt->dev;
>>>> + dma_dev->src_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
>>>> + dma_dev->dst_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
>>>> + dma_dev->directions = DMA_MEM_TO_MEM;
>>>> + dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
>>>> + dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
>>>> + dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
>>>> + dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
>>>
>>> Why DMA_PRIVATE ? this is a dma mempcy controller ...
>>
>> This DMA controller is intended to be used with AMD Non-Transparent
>> Bridge devices and not for general purpose peripheral DMA. Hence marking
>> it as DMA_PRIVATE.
>
> Okay, maybe add a comment so that people would know
>

Sure. I will add comment here.

> --
> ~Vinod
>

2021-06-20 03:54:59

by Sanjay R Mehta

[permalink] [raw]
Subject: Re: [PATCH v9 2/3] dmaengine: ptdma: register PTDMA controller as a DMA resource

Hi Vinod,

I have addressed all the feedback provided by you. I will be send the
update in the next version of this series.

On 6/9/2021 12:26 AM, Vinod Koul wrote:
> [CAUTION: External Email]
>
> On 02-06-21, 12:22, Sanjay R Mehta wrote:
>
>> +static void pt_do_cmd_complete(unsigned long data)
>> +{
>> + struct pt_tasklet_data *tdata = (struct pt_tasklet_data *)data;
>> + struct pt_cmd *cmd = tdata->cmd;
>> + struct pt_cmd_queue *cmd_q = &cmd->pt->cmd_q;
>> + u32 tail;
>> +
>> + tail = lower_32_bits(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE);
>
> why extract tail in non error case?
>
You are right. it is unnecessary in a non-error case and I will correct
it in the next version.

>> +static int pt_issue_next_cmd(struct pt_dma_desc *desc)
>> +{
>> + struct pt_passthru_engine *pt_engine;
>> + struct pt_dma_cmd *cmd;
>> + struct pt_device *pt;
>> + struct pt_cmd *pt_cmd;
>> + struct pt_cmd_queue *cmd_q;
>> +
>> + cmd = list_first_entry(&desc->cmdlist, struct pt_dma_cmd, entry);
>> + desc->issued_to_hw = 1;
>
> Why do you need this, the descriptor should be in vc.desc_issued list
>
Agree. I have removed the dependency of cmdlist everywhere from the code
and used the vc.desc_issued instead.

>> +static void pt_free_active_cmd(struct pt_dma_desc *desc)
>> +{
>> + struct pt_dma_cmd *cmd = NULL;
>> +
>> + if (desc->issued_to_hw)
>> + cmd = list_first_entry_or_null(&desc->cmdlist, struct pt_dma_cmd,
>> + entry);
>
> single line pls and use lists provided..
>
>
>> +static struct pt_dma_desc *pt_handle_active_desc(struct pt_dma_chan *chan,
>> + struct pt_dma_desc *desc)
>> +{
>> + struct dma_async_tx_descriptor *tx_desc;
>> + struct virt_dma_desc *vd;
>> + unsigned long flags;
>> +
>> + /* Loop over descriptors until one is found with commands */
>> + do {
>> + if (desc) {
>> + /* Remove the DMA command from the list and free it */
>> + pt_free_active_cmd(desc);
>> + if (!desc->issued_to_hw) {
>> + /* No errors, keep going */
>> + if (desc->status != DMA_ERROR)
>> + return desc;
>> + /* Error, free remaining commands and move on */
>> + pt_free_cmd_resources(desc->pt,
>> + &desc->cmdlist);
>
> single line again
>
Sure. I will addressand send in the next version.

>> +static struct pt_dma_desc *pt_alloc_dma_desc(struct pt_dma_chan *chan,
>> + unsigned long flags)
>> +{
>> + struct pt_dma_desc *desc;
>> +
>> + desc = kmem_cache_zalloc(chan->pt->dma_desc_cache, GFP_NOWAIT);
>> + if (!desc)
>> + return NULL;
>> +
>> + vchan_tx_prep(&chan->vc, &desc->vd, flags);
>> +
>> + desc->pt = chan->pt;
>> + desc->issued_to_hw = 0;
>> + INIT_LIST_HEAD(&desc->cmdlist);
>
> why do you need your own list, the lists in vc should suffice?
>
Agree. I have removed the dependency of this list everywhere from the code.

>> +static int pt_resume(struct dma_chan *dma_chan)
>> +{
>> + struct pt_dma_chan *chan = to_pt_chan(dma_chan);
>> + struct pt_dma_desc *desc = NULL;
>> + unsigned long flags;
>> +
>> + spin_lock_irqsave(&chan->vc.lock, flags);
>> + pt_start_queue(&chan->pt->cmd_q);
>> + desc = __pt_next_dma_desc(chan);
>> + spin_unlock_irqrestore(&chan->vc.lock, flags);
>> +
>> + /* If there was something active, re-start */
>> + if (desc)
>> + pt_cmd_callback(desc, 0);
>
> this doesn't sound correct. In pause yoy stop the queue, so start of the
> queue should be done here... Why grab a descriptor?
>
The start of queue is already done here.

If there was any active descriptor when pause occured, that is needs to
be started again during resume. Hence getting that descriptor and
restarting the transaction.

>> +static int pt_terminate_all(struct dma_chan *dma_chan)
>> +{
>> + struct pt_dma_chan *chan = to_pt_chan(dma_chan);
>> +
>> + vchan_free_chan_resources(&chan->vc);
>
> what about the descriptors, are you not going to clear the lists and
> free them..
>
Agree. I will address this by clearing the lists & free them and will
send the update in the next version.

>> +int pt_dmaengine_register(struct pt_device *pt)
>> +{
>> + struct pt_dma_chan *chan;
>> + struct dma_device *dma_dev = &pt->dma_dev;
>> + char *cmd_cache_name;
>> + char *desc_cache_name;
>> + int ret;
>> +
>> + pt->pt_dma_chan = devm_kzalloc(pt->dev, sizeof(*pt->pt_dma_chan),
>> + GFP_KERNEL);
>> + if (!pt->pt_dma_chan)
>> + return -ENOMEM;
>> +
>> + cmd_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
>> + "%s-dmaengine-cmd-cache",
>> + pt->name);
>> + if (!cmd_cache_name)
>> + return -ENOMEM;
>> +
>> + pt->dma_cmd_cache = kmem_cache_create(cmd_cache_name,
>> + sizeof(struct pt_dma_cmd),
>> + sizeof(void *),
>> + SLAB_HWCACHE_ALIGN, NULL);
>> + if (!pt->dma_cmd_cache)
>> + return -ENOMEM;
>> +
>> + desc_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
>> + "%s-dmaengine-desc-cache",
>> + pt->name);
>> + if (!desc_cache_name) {
>> + ret = -ENOMEM;
>> + goto err_cache;
>> + }
>> +
>> + pt->dma_desc_cache = kmem_cache_create(desc_cache_name,
>> + sizeof(struct pt_dma_desc),
>> + sizeof(void *),
>
> sizeof void ptr?
>
Sure. I will address and send in the next version.

>> + SLAB_HWCACHE_ALIGN, NULL);
>> + if (!pt->dma_desc_cache) {
>> + ret = -ENOMEM;
>> + goto err_cache;
>> + }
>> +
>> + dma_dev->dev = pt->dev;
>> + dma_dev->src_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
>> + dma_dev->dst_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
>> + dma_dev->directions = DMA_MEM_TO_MEM;
>> + dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
>> + dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
>> + dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
>> + dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
>
> Why DMA_PRIVATE ? this is a dma mempcy controller ...
> --
> ~Vinod
>