This series adds a new dmaengine api, dma_get_channels_caps(), which
may be used by a driver to get channel-specific capabilities. This is
based on a starting point suggested by Vinod Koul, but only implements
the minimal sets of channel capabilities to fulfill the needs of the
EDMA DMA Engine driver at this time.
Specifically, this implementation supports reporting of a set of
transfer type operations, maximum number of SG segments, and the
maximum size of an SG segment that a channel can support.
The call is implemented as follows:
struct dmaengine_chan_caps
*dma_get_channel_caps(struct dma_chan *chan,
enum dma_transfer_direction dir);
The dma transfer direction parameter may appear a bit out of place
but it is necessary since the direction field in struct
dma_slave_config was deprecated. In some cases, EDMA for one, it
is necessary for the dmaengine driver to have the burst and address
width slave configuration parameters available in order to compute
the maximum segment size that can be handle. Due to this requirement,
the calling order of this api is as follows:
1. Allocate a DMA slave channel
1a. [Optionally] Get channel capabilities
2. Set slave and controller specific parameters
3. Get a descriptor for transaction
4. Submit the transaction
5. Issue pending requests and wait for callback notification
Along with the API implementation, this series implements the
backend device_channel_caps() in the EDMA DMA Engine driver and
converts the davinci_mmc driver to use dma_get_channel_caps() to
replace hardcoded limits.
This is tested on the AM1808-EVM.
Matt Porter (3):
dmaengine: add dma_get_channel_caps()
dma: edma: add device_channel_caps() support
mmc: davinci: get SG segment limits with dma_get_channel_caps()
drivers/dma/edma.c | 26 ++++++++++++
drivers/mmc/host/davinci_mmc.c | 66 +++++++++--------------------
include/linux/dmaengine.h | 52 +++++++++++++++++++++++
include/linux/platform_data/mmc-davinci.h | 3 --
4 files changed, 99 insertions(+), 48 deletions(-)
--
1.7.9.5
Replace the hardcoded values used to set max_segs/max_seg_size with
a dma_get_channel_caps() query to the dmaengine driver.
Signed-off-by: Matt Porter <[email protected]>
---
drivers/mmc/host/davinci_mmc.c | 66 +++++++++--------------------
include/linux/platform_data/mmc-davinci.h | 3 --
2 files changed, 21 insertions(+), 48 deletions(-)
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index f5d46ea..d1efacc 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -145,18 +145,6 @@
/* MMCSD Init clock in Hz in opendrain mode */
#define MMCSD_INIT_CLOCK 200000
-/*
- * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units,
- * and we handle up to MAX_NR_SG segments. MMC_BLOCK_BOUNCE kicks in only
- * for drivers with max_segs == 1, making the segments bigger (64KB)
- * than the page or two that's otherwise typical. nr_sg (passed from
- * platform data) == 16 gives at least the same throughput boost, using
- * EDMA transfer linkage instead of spending CPU time copying pages.
- */
-#define MAX_CCNT ((1 << 16) - 1)
-
-#define MAX_NR_SG 16
-
static unsigned rw_threshold = 32;
module_param(rw_threshold, uint, S_IRUGO);
MODULE_PARM_DESC(rw_threshold,
@@ -217,8 +205,6 @@ struct mmc_davinci_host {
u8 version;
/* for ns in one cycle calculation */
unsigned ns_in_one_cycle;
- /* Number of sg segments */
- u8 nr_sg;
#ifdef CONFIG_CPU_FREQ
struct notifier_block freq_transition;
#endif
@@ -422,16 +408,7 @@ static int mmc_davinci_send_dma_request(struct mmc_davinci_host *host,
int ret = 0;
if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
- struct dma_slave_config dma_tx_conf = {
- .direction = DMA_MEM_TO_DEV,
- .dst_addr = host->mem_res->start + DAVINCI_MMCDXR,
- .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
- .dst_maxburst =
- rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES,
- };
chan = host->dma_tx;
- dmaengine_slave_config(host->dma_tx, &dma_tx_conf);
-
desc = dmaengine_prep_slave_sg(host->dma_tx,
data->sg,
host->sg_len,
@@ -444,16 +421,7 @@ static int mmc_davinci_send_dma_request(struct mmc_davinci_host *host,
goto out;
}
} else {
- struct dma_slave_config dma_rx_conf = {
- .direction = DMA_DEV_TO_MEM,
- .src_addr = host->mem_res->start + DAVINCI_MMCDRR,
- .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
- .src_maxburst =
- rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES,
- };
chan = host->dma_rx;
- dmaengine_slave_config(host->dma_rx, &dma_rx_conf);
-
desc = dmaengine_prep_slave_sg(host->dma_rx,
data->sg,
host->sg_len,
@@ -1166,6 +1134,7 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
struct resource *r, *mem = NULL;
int ret = 0, irq = 0;
size_t mem_size;
+ struct dmaengine_chan_caps *dma_chan_caps;
/* REVISIT: when we're fully converted, fail if pdata is NULL */
@@ -1215,12 +1184,6 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
init_mmcsd_host(host);
- if (pdata->nr_sg)
- host->nr_sg = pdata->nr_sg - 1;
-
- if (host->nr_sg > MAX_NR_SG || !host->nr_sg)
- host->nr_sg = MAX_NR_SG;
-
host->use_dma = use_dma;
host->mmc_irq = irq;
host->sdio_irq = platform_get_irq(pdev, 1);
@@ -1249,14 +1212,27 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
mmc->caps |= pdata->caps;
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
- /* With no iommu coalescing pages, each phys_seg is a hw_seg.
- * Each hw_seg uses one EDMA parameter RAM slot, always one
- * channel and then usually some linked slots.
- */
- mmc->max_segs = MAX_NR_SG;
+ {
+ struct dma_slave_config dma_txrx_conf = {
+ .src_addr = host->mem_res->start + DAVINCI_MMCDRR,
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .src_maxburst =
+ rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .dst_addr = host->mem_res->start + DAVINCI_MMCDXR,
+ .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .dst_maxburst =
+ rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES,
+ };
+ dmaengine_slave_config(host->dma_tx, &dma_txrx_conf);
+ dmaengine_slave_config(host->dma_rx, &dma_txrx_conf);
+ }
- /* EDMA limit per hw segment (one or two MBytes) */
- mmc->max_seg_size = MAX_CCNT * rw_threshold;
+ /* Just check one channel for the DMA SG limits */
+ dma_chan_caps = dma_get_channel_caps(host->dma_tx, DMA_MEM_TO_DEV);
+ if (dma_chan_caps) {
+ mmc->max_segs = dma_chan_caps->seg_nr;
+ mmc->max_seg_size = dma_chan_caps->seg_len;
+ }
/* MMC/SD controller limits for multiblock requests */
mmc->max_blk_size = 4095; /* BLEN is 12 bits */
diff --git a/include/linux/platform_data/mmc-davinci.h b/include/linux/platform_data/mmc-davinci.h
index 5ba6b22..6910209 100644
--- a/include/linux/platform_data/mmc-davinci.h
+++ b/include/linux/platform_data/mmc-davinci.h
@@ -25,9 +25,6 @@ struct davinci_mmc_config {
/* Version of the MMC/SD controller */
u8 version;
-
- /* Number of sg segments */
- u8 nr_sg;
};
void davinci_setup_mmc(int module, struct davinci_mmc_config *config);
--
1.7.9.5
Implement device_channel_caps().
EDMA has a finite set of PaRAM slots available for linking
a multi-segment SG transfer. In order to prevent any one
channel from consuming all PaRAM slots to fulfill a large SG
transfer, the driver reports a static per-channel max number
of SG segments it will handle.
The maximum size of SG segment is limited by the slave config
maxburst and addr_width for the channel in question. These values
are used from the current channel config to calculate and return
the max segment length cap.
Signed-off-by: Matt Porter <[email protected]>
---
drivers/dma/edma.c | 26 ++++++++++++++++++++++++++
1 file changed, 26 insertions(+)
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 47ba7bf..8b41045 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -70,6 +70,7 @@ struct edma_chan {
bool alloced;
int slot[EDMA_MAX_SLOTS];
struct dma_slave_config cfg;
+ struct dmaengine_chan_caps caps;
};
struct edma_cc {
@@ -462,6 +463,28 @@ static void edma_issue_pending(struct dma_chan *chan)
spin_unlock_irqrestore(&echan->vchan.lock, flags);
}
+static struct dmaengine_chan_caps
+*edma_get_channel_caps(struct dma_chan *chan, enum dma_transfer_direction dir)
+{
+ struct edma_chan *echan;
+ enum dma_slave_buswidth width = 0;
+ u32 burst = 0;
+
+ if (chan) {
+ echan = to_edma_chan(chan);
+ if (dir == DMA_MEM_TO_DEV) {
+ width = echan->cfg.dst_addr_width;
+ burst = echan->cfg.dst_maxburst;
+ } else if (dir == DMA_DEV_TO_MEM) {
+ width = echan->cfg.src_addr_width;
+ burst = echan->cfg.src_maxburst;
+ }
+ echan->caps.seg_len = (SZ_64K - 1) * width * burst;
+ return &echan->caps;
+ }
+ return NULL;
+}
+
static size_t edma_desc_size(struct edma_desc *edesc)
{
int i;
@@ -521,6 +544,8 @@ static void __init edma_chan_init(struct edma_cc *ecc,
echan->ch_num = EDMA_CTLR_CHAN(ecc->ctlr, i);
echan->ecc = ecc;
echan->vchan.desc_free = edma_desc_free;
+ echan->caps.ops = DMAENGINE_SLAVE | DMAENGINE_SG;
+ echan->caps.seg_nr = MAX_NR_SG;
vchan_init(&echan->vchan, dma);
@@ -537,6 +562,7 @@ static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
dma->device_alloc_chan_resources = edma_alloc_chan_resources;
dma->device_free_chan_resources = edma_free_chan_resources;
dma->device_issue_pending = edma_issue_pending;
+ dma->device_channel_caps = edma_get_channel_caps;
dma->device_tx_status = edma_tx_status;
dma->device_control = edma_control;
dma->dev = dev;
--
1.7.9.5
Add a dmaengine API to retrieve per channel capabilities.
Currently, only channel ops and SG segment limitations are
implemented caps.
The API is optionally implemented by drivers and when
unimplemented will return a NULL pointer. It is intended
to be executed after a channel has been requested and, if
the channel is intended to be used with slave SG transfers,
then it may only be called after dmaengine_slave_config()
has executed. The slave driver provides parameters such as
burst size and address width which may be necessary for
the dmaengine driver to use in order to properly return SG
segment limit caps.
Suggested-by: Vinod Koul <[email protected]>
Signed-off-by: Matt Porter <[email protected]>
---
include/linux/dmaengine.h | 52 +++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 52 insertions(+)
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 11d9e25..0181887 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -371,6 +371,38 @@ struct dma_slave_config {
unsigned int slave_id;
};
+enum dmaengine_apis {
+ DMAENGINE_MEMCPY = 0x0001,
+ DMAENGINE_XOR = 0x0002,
+ DMAENGINE_XOR_VAL = 0x0004,
+ DMAENGINE_PQ = 0x0008,
+ DMAENGINE_PQ_VAL = 0x0010,
+ DMAENGINE_MEMSET = 0x0020,
+ DMAENGINE_SLAVE = 0x0040,
+ DMAENGINE_CYCLIC = 0x0080,
+ DMAENGINE_INTERLEAVED = 0x0100,
+ DMAENGINE_SG = 0x0200,
+};
+
+/* struct dmaengine_chan_caps - expose capability of a channel
+ * Note: each channel can have same or different capabilities
+ *
+ * This primarily classifies capabilities into
+ * a) APIs/ops supported
+ * b) channel physical capabilities
+ *
+ * @ops: or'ed api capability
+ * @seg_nr: maximum number of SG segments supported on a SG/SLAVE
+ * channel (0 for no maximum or not a SG/SLAVE channel)
+ * @seg_len: maximum length of SG segments supported on a SG/SLAVE
+ * channel (0 for no maximum or not a SG/SLAVE channel)
+ */
+struct dmaengine_chan_caps {
+ enum dmaengine_apis ops;
+ int seg_nr;
+ int seg_len;
+};
+
static inline const char *dma_chan_name(struct dma_chan *chan)
{
return dev_name(&chan->dev->device);
@@ -534,6 +566,7 @@ struct dma_tx_state {
* struct with auxiliary transfer status information, otherwise the call
* will just return a simple status code
* @device_issue_pending: push pending transactions to hardware
+ * @device_channel_caps: return the channel capabilities
*/
struct dma_device {
@@ -602,6 +635,8 @@ struct dma_device {
dma_cookie_t cookie,
struct dma_tx_state *txstate);
void (*device_issue_pending)(struct dma_chan *chan);
+ struct dmaengine_chan_caps *(*device_channel_caps)(
+ struct dma_chan *chan, enum dma_transfer_direction direction);
};
static inline int dmaengine_device_control(struct dma_chan *chan,
@@ -969,6 +1004,23 @@ dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used,
}
}
+/**
+ * dma_get_channel_caps - flush pending transactions to HW
+ * @chan: target DMA channel
+ * @dir: direction of transfer
+ *
+ * Get the channel-specific capabilities. If the dmaengine
+ * driver does not implement per channel capbilities then
+ * NULL is returned.
+ */
+static inline struct dmaengine_chan_caps
+*dma_get_channel_caps(struct dma_chan *chan, enum dma_transfer_direction dir)
+{
+ if (chan->device->device_channel_caps)
+ return chan->device->device_channel_caps(chan, dir);
+ return NULL;
+}
+
enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
#ifdef CONFIG_DMA_ENGINE
enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
--
1.7.9.5
On Fri, Oct 19, 2012 at 3:51 AM, Matt Porter <[email protected]> wrote:
> Add a dmaengine API to retrieve per channel capabilities.
> Currently, only channel ops and SG segment limitations are
> implemented caps.
>
> The API is optionally implemented by drivers and when
> unimplemented will return a NULL pointer. It is intended
> to be executed after a channel has been requested and, if
> the channel is intended to be used with slave SG transfers,
> then it may only be called after dmaengine_slave_config()
> has executed. The slave driver provides parameters such as
> burst size and address width which may be necessary for
> the dmaengine driver to use in order to properly return SG
> segment limit caps.
>
> Suggested-by: Vinod Koul <[email protected]>
> Signed-off-by: Matt Porter <[email protected]>
Looks okay to me. Minor comment below...
> +/**
> + * dma_get_channel_caps - flush pending transactions to HW
> + * @chan: target DMA channel
> + * @dir: direction of transfer
> + *
> + * Get the channel-specific capabilities. If the dmaengine
> + * driver does not implement per channel capbilities then
> + * NULL is returned.
> + */
> +static inline struct dmaengine_chan_caps
> +*dma_get_channel_caps(struct dma_chan *chan, enum dma_transfer_direction dir)
Looks to me like the returned pointer be a const. Ditto for the
callback prototype in the dma_device structure.
> +{
> + if (chan->device->device_channel_caps)
> + return chan->device->device_channel_caps(chan, dir);
> + return NULL;
> +}
> +
> enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
> #ifdef CONFIG_DMA_ENGINE
> enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
> --
> 1.7.9.5
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at http://www.tux.org/lkml/
--
Grant Likely, B.Sc., P.Eng.
Secret Lab Technologies Ltd.
On Fri, Oct 19, 2012 at 3:51 AM, Matt Porter <[email protected]> wrote:
> Replace the hardcoded values used to set max_segs/max_seg_size with
> a dma_get_channel_caps() query to the dmaengine driver.
>
> Signed-off-by: Matt Porter <[email protected]>
Series looks reasonable to me.
Reviewed-by: Grant Likely <[email protected]>
> ---
> drivers/mmc/host/davinci_mmc.c | 66 +++++++++--------------------
> include/linux/platform_data/mmc-davinci.h | 3 --
> 2 files changed, 21 insertions(+), 48 deletions(-)
>
> diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
> index f5d46ea..d1efacc 100644
> --- a/drivers/mmc/host/davinci_mmc.c
> +++ b/drivers/mmc/host/davinci_mmc.c
> @@ -145,18 +145,6 @@
> /* MMCSD Init clock in Hz in opendrain mode */
> #define MMCSD_INIT_CLOCK 200000
>
> -/*
> - * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units,
> - * and we handle up to MAX_NR_SG segments. MMC_BLOCK_BOUNCE kicks in only
> - * for drivers with max_segs == 1, making the segments bigger (64KB)
> - * than the page or two that's otherwise typical. nr_sg (passed from
> - * platform data) == 16 gives at least the same throughput boost, using
> - * EDMA transfer linkage instead of spending CPU time copying pages.
> - */
> -#define MAX_CCNT ((1 << 16) - 1)
> -
> -#define MAX_NR_SG 16
> -
> static unsigned rw_threshold = 32;
> module_param(rw_threshold, uint, S_IRUGO);
> MODULE_PARM_DESC(rw_threshold,
> @@ -217,8 +205,6 @@ struct mmc_davinci_host {
> u8 version;
> /* for ns in one cycle calculation */
> unsigned ns_in_one_cycle;
> - /* Number of sg segments */
> - u8 nr_sg;
> #ifdef CONFIG_CPU_FREQ
> struct notifier_block freq_transition;
> #endif
> @@ -422,16 +408,7 @@ static int mmc_davinci_send_dma_request(struct mmc_davinci_host *host,
> int ret = 0;
>
> if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
> - struct dma_slave_config dma_tx_conf = {
> - .direction = DMA_MEM_TO_DEV,
> - .dst_addr = host->mem_res->start + DAVINCI_MMCDXR,
> - .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
> - .dst_maxburst =
> - rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES,
> - };
> chan = host->dma_tx;
> - dmaengine_slave_config(host->dma_tx, &dma_tx_conf);
> -
> desc = dmaengine_prep_slave_sg(host->dma_tx,
> data->sg,
> host->sg_len,
> @@ -444,16 +421,7 @@ static int mmc_davinci_send_dma_request(struct mmc_davinci_host *host,
> goto out;
> }
> } else {
> - struct dma_slave_config dma_rx_conf = {
> - .direction = DMA_DEV_TO_MEM,
> - .src_addr = host->mem_res->start + DAVINCI_MMCDRR,
> - .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
> - .src_maxburst =
> - rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES,
> - };
> chan = host->dma_rx;
> - dmaengine_slave_config(host->dma_rx, &dma_rx_conf);
> -
> desc = dmaengine_prep_slave_sg(host->dma_rx,
> data->sg,
> host->sg_len,
> @@ -1166,6 +1134,7 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
> struct resource *r, *mem = NULL;
> int ret = 0, irq = 0;
> size_t mem_size;
> + struct dmaengine_chan_caps *dma_chan_caps;
>
> /* REVISIT: when we're fully converted, fail if pdata is NULL */
>
> @@ -1215,12 +1184,6 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
>
> init_mmcsd_host(host);
>
> - if (pdata->nr_sg)
> - host->nr_sg = pdata->nr_sg - 1;
> -
> - if (host->nr_sg > MAX_NR_SG || !host->nr_sg)
> - host->nr_sg = MAX_NR_SG;
> -
> host->use_dma = use_dma;
> host->mmc_irq = irq;
> host->sdio_irq = platform_get_irq(pdev, 1);
> @@ -1249,14 +1212,27 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
> mmc->caps |= pdata->caps;
> mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
>
> - /* With no iommu coalescing pages, each phys_seg is a hw_seg.
> - * Each hw_seg uses one EDMA parameter RAM slot, always one
> - * channel and then usually some linked slots.
> - */
> - mmc->max_segs = MAX_NR_SG;
> + {
> + struct dma_slave_config dma_txrx_conf = {
> + .src_addr = host->mem_res->start + DAVINCI_MMCDRR,
> + .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
> + .src_maxburst =
> + rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES,
> + .dst_addr = host->mem_res->start + DAVINCI_MMCDXR,
> + .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
> + .dst_maxburst =
> + rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES,
> + };
> + dmaengine_slave_config(host->dma_tx, &dma_txrx_conf);
> + dmaengine_slave_config(host->dma_rx, &dma_txrx_conf);
> + }
>
> - /* EDMA limit per hw segment (one or two MBytes) */
> - mmc->max_seg_size = MAX_CCNT * rw_threshold;
> + /* Just check one channel for the DMA SG limits */
> + dma_chan_caps = dma_get_channel_caps(host->dma_tx, DMA_MEM_TO_DEV);
> + if (dma_chan_caps) {
> + mmc->max_segs = dma_chan_caps->seg_nr;
> + mmc->max_seg_size = dma_chan_caps->seg_len;
> + }
>
> /* MMC/SD controller limits for multiblock requests */
> mmc->max_blk_size = 4095; /* BLEN is 12 bits */
> diff --git a/include/linux/platform_data/mmc-davinci.h b/include/linux/platform_data/mmc-davinci.h
> index 5ba6b22..6910209 100644
> --- a/include/linux/platform_data/mmc-davinci.h
> +++ b/include/linux/platform_data/mmc-davinci.h
> @@ -25,9 +25,6 @@ struct davinci_mmc_config {
>
> /* Version of the MMC/SD controller */
> u8 version;
> -
> - /* Number of sg segments */
> - u8 nr_sg;
> };
> void davinci_setup_mmc(int module, struct davinci_mmc_config *config);
>
> --
> 1.7.9.5
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at http://www.tux.org/lkml/
--
Grant Likely, B.Sc., P.Eng.
Secret Lab Technologies Ltd.
On Fri, Oct 19, 2012 at 3:51 AM, Matt Porter <[email protected]> wrote:
> Add a dmaengine API to retrieve per channel capabilities.
> Currently, only channel ops and SG segment limitations are
> implemented caps.
>
> The API is optionally implemented by drivers and when
> unimplemented will return a NULL pointer. It is intended
> to be executed after a channel has been requested and, if
> the channel is intended to be used with slave SG transfers,
> then it may only be called after dmaengine_slave_config()
> has executed. The slave driver provides parameters such as
> burst size and address width which may be necessary for
> the dmaengine driver to use in order to properly return SG
> segment limit caps.
>
> Suggested-by: Vinod Koul <[email protected]>
> Signed-off-by: Matt Porter <[email protected]>
> ---
> include/linux/dmaengine.h | 52 +++++++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 52 insertions(+)
>
> diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
> index 11d9e25..0181887 100644
> --- a/include/linux/dmaengine.h
> +++ b/include/linux/dmaengine.h
> @@ -371,6 +371,38 @@ struct dma_slave_config {
> unsigned int slave_id;
> };
>
> +enum dmaengine_apis {
> + DMAENGINE_MEMCPY = 0x0001,
> + DMAENGINE_XOR = 0x0002,
> + DMAENGINE_XOR_VAL = 0x0004,
> + DMAENGINE_PQ = 0x0008,
> + DMAENGINE_PQ_VAL = 0x0010,
> + DMAENGINE_MEMSET = 0x0020,
> + DMAENGINE_SLAVE = 0x0040,
> + DMAENGINE_CYCLIC = 0x0080,
> + DMAENGINE_INTERLEAVED = 0x0100,
> + DMAENGINE_SG = 0x0200,
> +};
Actually, one more comment. Why the new enum? Why can't the
dma_transaction_type enum be used directly along with dma_cap_mask_t?
> +
> +/* struct dmaengine_chan_caps - expose capability of a channel
> + * Note: each channel can have same or different capabilities
> + *
> + * This primarily classifies capabilities into
> + * a) APIs/ops supported
> + * b) channel physical capabilities
> + *
> + * @ops: or'ed api capability
> + * @seg_nr: maximum number of SG segments supported on a SG/SLAVE
> + * channel (0 for no maximum or not a SG/SLAVE channel)
> + * @seg_len: maximum length of SG segments supported on a SG/SLAVE
> + * channel (0 for no maximum or not a SG/SLAVE channel)
> + */
> +struct dmaengine_chan_caps {
> + enum dmaengine_apis ops;
> + int seg_nr;
> + int seg_len;
> +};
> +
> static inline const char *dma_chan_name(struct dma_chan *chan)
> {
> return dev_name(&chan->dev->device);
> @@ -534,6 +566,7 @@ struct dma_tx_state {
> * struct with auxiliary transfer status information, otherwise the call
> * will just return a simple status code
> * @device_issue_pending: push pending transactions to hardware
> + * @device_channel_caps: return the channel capabilities
> */
> struct dma_device {
>
> @@ -602,6 +635,8 @@ struct dma_device {
> dma_cookie_t cookie,
> struct dma_tx_state *txstate);
> void (*device_issue_pending)(struct dma_chan *chan);
> + struct dmaengine_chan_caps *(*device_channel_caps)(
> + struct dma_chan *chan, enum dma_transfer_direction direction);
> };
>
> static inline int dmaengine_device_control(struct dma_chan *chan,
> @@ -969,6 +1004,23 @@ dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used,
> }
> }
>
> +/**
> + * dma_get_channel_caps - flush pending transactions to HW
> + * @chan: target DMA channel
> + * @dir: direction of transfer
> + *
> + * Get the channel-specific capabilities. If the dmaengine
> + * driver does not implement per channel capbilities then
> + * NULL is returned.
> + */
> +static inline struct dmaengine_chan_caps
> +*dma_get_channel_caps(struct dma_chan *chan, enum dma_transfer_direction dir)
> +{
> + if (chan->device->device_channel_caps)
> + return chan->device->device_channel_caps(chan, dir);
> + return NULL;
> +}
> +
> enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
> #ifdef CONFIG_DMA_ENGINE
> enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
> --
> 1.7.9.5
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at http://www.tux.org/lkml/
--
Grant Likely, B.Sc., P.Eng.
Secret Lab Technologies Ltd.
On Thu, Oct 18, 2012 at 7:51 PM, Matt Porter <[email protected]> wrote:
> Add a dmaengine API to retrieve per channel capabilities.
> Currently, only channel ops and SG segment limitations are
> implemented caps.
>
> The API is optionally implemented by drivers and when
> unimplemented will return a NULL pointer. It is intended
> to be executed after a channel has been requested and, if
> the channel is intended to be used with slave SG transfers,
> then it may only be called after dmaengine_slave_config()
> has executed. The slave driver provides parameters such as
> burst size and address width which may be necessary for
> the dmaengine driver to use in order to properly return SG
> segment limit caps.
>
> Suggested-by: Vinod Koul <[email protected]>
> Signed-off-by: Matt Porter <[email protected]>
> ---
> include/linux/dmaengine.h | 52 +++++++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 52 insertions(+)
>
> diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
> index 11d9e25..0181887 100644
> --- a/include/linux/dmaengine.h
> +++ b/include/linux/dmaengine.h
> @@ -371,6 +371,38 @@ struct dma_slave_config {
> unsigned int slave_id;
> };
>
> +enum dmaengine_apis {
> + DMAENGINE_MEMCPY = 0x0001,
> + DMAENGINE_XOR = 0x0002,
> + DMAENGINE_XOR_VAL = 0x0004,
> + DMAENGINE_PQ = 0x0008,
> + DMAENGINE_PQ_VAL = 0x0010,
> + DMAENGINE_MEMSET = 0x0020,
> + DMAENGINE_SLAVE = 0x0040,
> + DMAENGINE_CYCLIC = 0x0080,
> + DMAENGINE_INTERLEAVED = 0x0100,
> + DMAENGINE_SG = 0x0200,
> +};
> +
> +/* struct dmaengine_chan_caps - expose capability of a channel
> + * Note: each channel can have same or different capabilities
> + *
> + * This primarily classifies capabilities into
> + * a) APIs/ops supported
> + * b) channel physical capabilities
> + *
> + * @ops: or'ed api capability
> + * @seg_nr: maximum number of SG segments supported on a SG/SLAVE
> + * channel (0 for no maximum or not a SG/SLAVE channel)
> + * @seg_len: maximum length of SG segments supported on a SG/SLAVE
> + * channel (0 for no maximum or not a SG/SLAVE channel)
> + */
> +struct dmaengine_chan_caps {
> + enum dmaengine_apis ops;
> + int seg_nr;
> + int seg_len;
> +};
This makes sense for the potentially dynamic capability properties
that get set after configuration, but why do we need the operation
types here? They can be retrieved from the parent device.
--
Dan
On Tue, 2012-10-23 at 15:54 -0700, Dan Williams wrote:
> > +struct dmaengine_chan_caps {
> > + enum dmaengine_apis ops;
> > + int seg_nr;
> > + int seg_len;
> > +};
>
> This makes sense for the potentially dynamic capability properties
> that get set after configuration, but why do we need the operation
> types here? They can be retrieved from the parent device.
I was thinking that each channel can have different capabilities.
You can assign one channel for mempcy operations exclusively and some
others for slave usage exclusively.
I believe some h/w do have such assignment so would help in doing that.
--
Vinod Koul
Intel Corp.
On Tue, 2012-10-23 at 23:49 +0100, Grant Likely wrote:
> > +enum dmaengine_apis {
> > + DMAENGINE_MEMCPY = 0x0001,
> > + DMAENGINE_XOR = 0x0002,
> > + DMAENGINE_XOR_VAL = 0x0004,
> > + DMAENGINE_PQ = 0x0008,
> > + DMAENGINE_PQ_VAL = 0x0010,
> > + DMAENGINE_MEMSET = 0x0020,
> > + DMAENGINE_SLAVE = 0x0040,
> > + DMAENGINE_CYCLIC = 0x0080,
> > + DMAENGINE_INTERLEAVED = 0x0100,
> > + DMAENGINE_SG = 0x0200,
> > +};
>
> Actually, one more comment. Why the new enum? Why can't the
> dma_transaction_type enum be used directly along with dma_cap_mask_t?
Some of the capabilities above are not there in dma_caps_t like DMA_SG.
Also DMA_INTERRUPT and DMA_PRIVATE would not make much sense here.
BUT would help to keep things simpler if have one definition which
includes all.
--
Vinod Koul
Intel Corp.