Add DMA support for Vybrid.
Signed-off-by: Sanchayan Maity <[email protected]>
---
Changes since v2:
1. Rebase on top of Shawn's latest for-next branch
2. Make DMA mode the default for Vybrid. We no longer use the EOQ mode.
Since devtype_data has been constantified it's no longer makes sense to
change the trans_mode at run time.
Tested on Toradex Colibri Vybrid VF61 module using spidev and MCP CAN.
v1 Patch:
https://patchwork.kernel.org/patch/9360583/
v2 Patch:
https://patchwork.kernel.org/patch/9361601/
Regards,
Sanchayan.
---
drivers/spi/spi-fsl-dspi.c | 301 ++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 300 insertions(+), 1 deletion(-)
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 35c0dd9..bc64700 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -15,6 +15,8 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
@@ -40,6 +42,7 @@
#define TRAN_STATE_WORD_ODD_NUM 0x04
#define DSPI_FIFO_SIZE 4
+#define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024)
#define SPI_MCR 0x00
#define SPI_MCR_MASTER (1 << 31)
@@ -71,6 +74,11 @@
#define SPI_SR_EOQF 0x10000000
#define SPI_SR_TCFQF 0x80000000
+#define SPI_RSER_TFFFE BIT(25)
+#define SPI_RSER_TFFFD BIT(24)
+#define SPI_RSER_RFDFE BIT(17)
+#define SPI_RSER_RFDFD BIT(16)
+
#define SPI_RSER 0x30
#define SPI_RSER_EOQFE 0x10000000
#define SPI_RSER_TCFQE 0x80000000
@@ -108,6 +116,8 @@
#define SPI_TCR_TCNT_MAX 0x10000
+#define DMA_COMPLETION_TIMEOUT msecs_to_jiffies(3000)
+
struct chip_data {
u32 mcr_val;
u32 ctar_val;
@@ -117,6 +127,7 @@ struct chip_data {
enum dspi_trans_mode {
DSPI_EOQ_MODE = 0,
DSPI_TCFQ_MODE,
+ DSPI_DMA_MODE,
};
struct fsl_dspi_devtype_data {
@@ -125,7 +136,7 @@ struct fsl_dspi_devtype_data {
};
static const struct fsl_dspi_devtype_data vf610_data = {
- .trans_mode = DSPI_EOQ_MODE,
+ .trans_mode = DSPI_DMA_MODE,
.max_clock_factor = 2,
};
@@ -139,6 +150,22 @@ static const struct fsl_dspi_devtype_data ls2085a_data = {
.max_clock_factor = 8,
};
+struct fsl_dspi_dma {
+ u32 curr_xfer_len;
+
+ u32 *tx_dma_buf;
+ struct dma_chan *chan_tx;
+ dma_addr_t tx_dma_phys;
+ struct completion cmd_tx_complete;
+ struct dma_async_tx_descriptor *tx_desc;
+
+ u32 *rx_dma_buf;
+ struct dma_chan *chan_rx;
+ dma_addr_t rx_dma_phys;
+ struct completion cmd_rx_complete;
+ struct dma_async_tx_descriptor *rx_desc;
+};
+
struct fsl_dspi {
struct spi_master *master;
struct platform_device *pdev;
@@ -165,6 +192,7 @@ struct fsl_dspi {
u32 waitflags;
u32 spi_tcnt;
+ struct fsl_dspi_dma *dma;
};
static inline int is_double_byte_mode(struct fsl_dspi *dspi)
@@ -176,6 +204,263 @@ static inline int is_double_byte_mode(struct fsl_dspi *dspi)
return ((val & SPI_FRAME_BITS_MASK) == SPI_FRAME_BITS(8)) ? 0 : 1;
}
+static void dspi_tx_dma_callback(void *arg)
+{
+ struct fsl_dspi *dspi = arg;
+ struct fsl_dspi_dma *dma = dspi->dma;
+
+ complete(&dma->cmd_tx_complete);
+}
+
+static void dspi_rx_dma_callback(void *arg)
+{
+ struct fsl_dspi *dspi = arg;
+ struct fsl_dspi_dma *dma = dspi->dma;
+ int rx_word;
+ int i, len;
+ u16 d;
+
+ rx_word = is_double_byte_mode(dspi);
+
+ len = rx_word ? (dma->curr_xfer_len / 2) : dma->curr_xfer_len;
+
+ if (!(dspi->dataflags & TRAN_STATE_RX_VOID)) {
+ for (i = 0; i < len; i++) {
+ d = dspi->dma->rx_dma_buf[i];
+ rx_word ? (*(u16 *)dspi->rx = d) :
+ (*(u8 *)dspi->rx = d);
+ dspi->rx += rx_word + 1;
+ }
+ }
+
+ complete(&dma->cmd_rx_complete);
+}
+
+static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
+{
+ struct fsl_dspi_dma *dma = dspi->dma;
+ struct device *dev = &dspi->pdev->dev;
+ int time_left;
+ int tx_word;
+ int i, len;
+ u16 val;
+
+ tx_word = is_double_byte_mode(dspi);
+
+ len = tx_word ? (dma->curr_xfer_len / 2) : dma->curr_xfer_len;
+
+ for (i = 0; i < len - 1; i++) {
+ val = tx_word ? *(u16 *) dspi->tx : *(u8 *) dspi->tx;
+ dspi->dma->tx_dma_buf[i] =
+ SPI_PUSHR_TXDATA(val) | SPI_PUSHR_PCS(dspi->cs) |
+ SPI_PUSHR_CTAS(0) | SPI_PUSHR_CONT;
+ dspi->tx += tx_word + 1;
+ }
+
+ val = tx_word ? *(u16 *) dspi->tx : *(u8 *) dspi->tx;
+ dspi->dma->tx_dma_buf[i] = SPI_PUSHR_TXDATA(val) |
+ SPI_PUSHR_PCS(dspi->cs) |
+ SPI_PUSHR_CTAS(0);
+ dspi->tx += tx_word + 1;
+
+ dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
+ dma->tx_dma_phys,
+ DSPI_DMA_BUFSIZE, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!dma->tx_desc) {
+ dev_err(dev, "Not able to get desc for DMA xfer\n");
+ return -EIO;
+ }
+
+ dma->tx_desc->callback = dspi_tx_dma_callback;
+ dma->tx_desc->callback_param = dspi;
+ if (dma_submit_error(dmaengine_submit(dma->tx_desc))) {
+ dev_err(dev, "DMA submit failed\n");
+ return -EINVAL;
+ }
+
+ dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx,
+ dma->rx_dma_phys,
+ DSPI_DMA_BUFSIZE, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!dma->rx_desc) {
+ dev_err(dev, "Not able to get desc for DMA xfer\n");
+ return -EIO;
+ }
+
+ dma->rx_desc->callback = dspi_rx_dma_callback;
+ dma->rx_desc->callback_param = dspi;
+ if (dma_submit_error(dmaengine_submit(dma->rx_desc))) {
+ dev_err(dev, "DMA submit failed\n");
+ return -EINVAL;
+ }
+
+ reinit_completion(&dspi->dma->cmd_rx_complete);
+ reinit_completion(&dspi->dma->cmd_tx_complete);
+
+ dma_async_issue_pending(dma->chan_rx);
+ dma_async_issue_pending(dma->chan_tx);
+
+ time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete,
+ DMA_COMPLETION_TIMEOUT);
+ if (time_left == 0) {
+ dev_err(dev, "DMA tx timeout\n");
+ dmaengine_terminate_all(dma->chan_tx);
+ dmaengine_terminate_all(dma->chan_rx);
+ return -ETIMEDOUT;
+ }
+
+ time_left = wait_for_completion_timeout(&dspi->dma->cmd_rx_complete,
+ DMA_COMPLETION_TIMEOUT);
+ if (time_left == 0) {
+ dev_err(dev, "DMA rx timeout\n");
+ dmaengine_terminate_all(dma->chan_tx);
+ dmaengine_terminate_all(dma->chan_rx);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int dspi_dma_xfer(struct fsl_dspi *dspi)
+{
+ struct fsl_dspi_dma *dma = dspi->dma;
+ struct device *dev = &dspi->pdev->dev;
+ int curr_remaining_bytes;
+ int bytes_per_buffer;
+ int tx_word;
+ int ret = 0;
+
+ tx_word = is_double_byte_mode(dspi);
+ curr_remaining_bytes = dspi->len;
+ while (curr_remaining_bytes) {
+ /* Check if current transfer fits the DMA buffer */
+ dma->curr_xfer_len = curr_remaining_bytes;
+ bytes_per_buffer = DSPI_DMA_BUFSIZE /
+ (DSPI_FIFO_SIZE / (tx_word ? 2 : 1));
+ if (curr_remaining_bytes > bytes_per_buffer)
+ dma->curr_xfer_len = bytes_per_buffer;
+
+ ret = dspi_next_xfer_dma_submit(dspi);
+ if (ret) {
+ dev_err(dev, "DMA transfer failed\n");
+ goto exit;
+
+ } else {
+ curr_remaining_bytes -= dma->curr_xfer_len;
+ if (curr_remaining_bytes < 0)
+ curr_remaining_bytes = 0;
+ dspi->len = curr_remaining_bytes;
+ }
+ }
+
+exit:
+ return ret;
+}
+
+static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
+{
+ struct fsl_dspi_dma *dma;
+ struct dma_slave_config cfg;
+ struct device *dev = &dspi->pdev->dev;
+ int ret;
+
+ dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
+ if (!dma)
+ return -ENOMEM;
+
+ dma->chan_rx = dma_request_slave_channel(dev, "rx");
+ if (!dma->chan_rx) {
+ dev_err(dev, "rx dma channel not available\n");
+ ret = -ENODEV;
+ return ret;
+ }
+
+ dma->chan_tx = dma_request_slave_channel(dev, "tx");
+ if (!dma->chan_tx) {
+ dev_err(dev, "tx dma channel not available\n");
+ ret = -ENODEV;
+ goto err_tx_channel;
+ }
+
+ dma->tx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
+ &dma->tx_dma_phys, GFP_KERNEL);
+ if (!dma->tx_dma_buf) {
+ ret = -ENOMEM;
+ goto err_tx_dma_buf;
+ }
+
+ dma->rx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
+ &dma->rx_dma_phys, GFP_KERNEL);
+ if (!dma->rx_dma_buf) {
+ ret = -ENOMEM;
+ goto err_rx_dma_buf;
+ }
+
+ cfg.src_addr = phy_addr + SPI_POPR;
+ cfg.dst_addr = phy_addr + SPI_PUSHR;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.src_maxburst = 1;
+ cfg.dst_maxburst = 1;
+
+ cfg.direction = DMA_DEV_TO_MEM;
+ ret = dmaengine_slave_config(dma->chan_rx, &cfg);
+ if (ret) {
+ dev_err(dev, "can't configure rx dma channel\n");
+ ret = -EINVAL;
+ goto err_slave_config;
+ }
+
+ cfg.direction = DMA_MEM_TO_DEV;
+ ret = dmaengine_slave_config(dma->chan_tx, &cfg);
+ if (ret) {
+ dev_err(dev, "can't configure tx dma channel\n");
+ ret = -EINVAL;
+ goto err_slave_config;
+ }
+
+ dspi->dma = dma;
+ init_completion(&dma->cmd_tx_complete);
+ init_completion(&dma->cmd_rx_complete);
+
+ return 0;
+
+err_slave_config:
+ devm_kfree(dev, dma->rx_dma_buf);
+err_rx_dma_buf:
+ devm_kfree(dev, dma->tx_dma_buf);
+err_tx_dma_buf:
+ dma_release_channel(dma->chan_tx);
+err_tx_channel:
+ dma_release_channel(dma->chan_rx);
+
+ devm_kfree(dev, dma);
+ dspi->dma = NULL;
+
+ return ret;
+}
+
+static void dspi_release_dma(struct fsl_dspi *dspi)
+{
+ struct fsl_dspi_dma *dma = dspi->dma;
+ struct device *dev = &dspi->pdev->dev;
+
+ if (dma) {
+ if (dma->chan_tx) {
+ dma_unmap_single(dev, dma->tx_dma_phys,
+ DSPI_DMA_BUFSIZE, DMA_TO_DEVICE);
+ dma_release_channel(dma->chan_tx);
+ }
+
+ if (dma->chan_rx) {
+ dma_unmap_single(dev, dma->rx_dma_phys,
+ DSPI_DMA_BUFSIZE, DMA_FROM_DEVICE);
+ dma_release_channel(dma->chan_rx);
+ }
+ }
+}
+
static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
unsigned long clkrate)
{
@@ -424,6 +709,12 @@ static int dspi_transfer_one_message(struct spi_master *master,
regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_TCFQE);
dspi_tcfq_write(dspi);
break;
+ case DSPI_DMA_MODE:
+ regmap_write(dspi->regmap, SPI_RSER,
+ SPI_RSER_TFFFE | SPI_RSER_TFFFD |
+ SPI_RSER_RFDFE | SPI_RSER_RFDFD);
+ status = dspi_dma_xfer(dspi);
+ goto out;
default:
dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
trans_mode);
@@ -733,6 +1024,13 @@ static int dspi_probe(struct platform_device *pdev)
if (ret)
goto out_master_put;
+ if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
+ if (dspi_request_dma(dspi, res->start)) {
+ dev_err(&pdev->dev, "can't get dma channels\n");
+ goto out_clk_put;
+ }
+ }
+
master->max_speed_hz =
clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor;
@@ -761,6 +1059,7 @@ static int dspi_remove(struct platform_device *pdev)
struct fsl_dspi *dspi = spi_master_get_devdata(master);
/* Disconnect from the SPI framework */
+ dspi_release_dma(dspi);
clk_disable_unprepare(dspi->clk);
spi_unregister_master(dspi->master);
--
2.10.2
On Thu, Nov 10, 2016 at 05:49:15PM +0530, Sanchayan Maity wrote:
A couple of small things, please send followup patches fixing them.
> + rx_word = is_double_byte_mode(dspi);
> +
> + len = rx_word ? (dma->curr_xfer_len / 2) : dma->curr_xfer_len;
Please use normal if statements, they're much easier to read.
> +err_slave_config:
> + devm_kfree(dev, dma->rx_dma_buf);
> +err_rx_dma_buf:
> + devm_kfree(dev, dma->tx_dma_buf);
You really shouldn't need to explicitly free things like this if you're
using devm_, especially in the error path from the probe function like
this where a failure is just going to result in the device failing to
instantiate so you won't have the allocation sitting around unused for
any length of time.
The patch
spi: spi-fsl-dspi: Add DMA support for Vybrid
has been applied to the spi tree at
git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi.git
All being well this means that it will be integrated into the linux-next
tree (usually sometime in the next 24 hours) and sent to Linus during
the next merge window (or sooner if it is a bug fix), however if
problems are discovered then the patch may be dropped or reverted.
You may get further e-mails resulting from automated or manual testing
and review of the tree, please engage with people reporting problems and
send followup patches addressing any issues that are reported if needed.
If any updates are required or you are submitting further changes they
should be sent as incremental updates against current git, existing
patches will not be replaced.
Please add any relevant lists and maintainers to the CCs when replying
to this mail.
Thanks,
Mark
>From 90ba37033cb94207e97c4ced9be575770438213b Mon Sep 17 00:00:00 2001
From: Sanchayan Maity <[email protected]>
Date: Thu, 10 Nov 2016 17:49:15 +0530
Subject: [PATCH] spi: spi-fsl-dspi: Add DMA support for Vybrid
Add DMA support for Vybrid.
Signed-off-by: Sanchayan Maity <[email protected]>
Signed-off-by: Mark Brown <[email protected]>
---
drivers/spi/spi-fsl-dspi.c | 301 ++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 300 insertions(+), 1 deletion(-)
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 35c0dd945668..bc64700b514d 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -15,6 +15,8 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
@@ -40,6 +42,7 @@
#define TRAN_STATE_WORD_ODD_NUM 0x04
#define DSPI_FIFO_SIZE 4
+#define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024)
#define SPI_MCR 0x00
#define SPI_MCR_MASTER (1 << 31)
@@ -71,6 +74,11 @@
#define SPI_SR_EOQF 0x10000000
#define SPI_SR_TCFQF 0x80000000
+#define SPI_RSER_TFFFE BIT(25)
+#define SPI_RSER_TFFFD BIT(24)
+#define SPI_RSER_RFDFE BIT(17)
+#define SPI_RSER_RFDFD BIT(16)
+
#define SPI_RSER 0x30
#define SPI_RSER_EOQFE 0x10000000
#define SPI_RSER_TCFQE 0x80000000
@@ -108,6 +116,8 @@
#define SPI_TCR_TCNT_MAX 0x10000
+#define DMA_COMPLETION_TIMEOUT msecs_to_jiffies(3000)
+
struct chip_data {
u32 mcr_val;
u32 ctar_val;
@@ -117,6 +127,7 @@ struct chip_data {
enum dspi_trans_mode {
DSPI_EOQ_MODE = 0,
DSPI_TCFQ_MODE,
+ DSPI_DMA_MODE,
};
struct fsl_dspi_devtype_data {
@@ -125,7 +136,7 @@ struct fsl_dspi_devtype_data {
};
static const struct fsl_dspi_devtype_data vf610_data = {
- .trans_mode = DSPI_EOQ_MODE,
+ .trans_mode = DSPI_DMA_MODE,
.max_clock_factor = 2,
};
@@ -139,6 +150,22 @@ static const struct fsl_dspi_devtype_data ls2085a_data = {
.max_clock_factor = 8,
};
+struct fsl_dspi_dma {
+ u32 curr_xfer_len;
+
+ u32 *tx_dma_buf;
+ struct dma_chan *chan_tx;
+ dma_addr_t tx_dma_phys;
+ struct completion cmd_tx_complete;
+ struct dma_async_tx_descriptor *tx_desc;
+
+ u32 *rx_dma_buf;
+ struct dma_chan *chan_rx;
+ dma_addr_t rx_dma_phys;
+ struct completion cmd_rx_complete;
+ struct dma_async_tx_descriptor *rx_desc;
+};
+
struct fsl_dspi {
struct spi_master *master;
struct platform_device *pdev;
@@ -165,6 +192,7 @@ struct fsl_dspi {
u32 waitflags;
u32 spi_tcnt;
+ struct fsl_dspi_dma *dma;
};
static inline int is_double_byte_mode(struct fsl_dspi *dspi)
@@ -176,6 +204,263 @@ static inline int is_double_byte_mode(struct fsl_dspi *dspi)
return ((val & SPI_FRAME_BITS_MASK) == SPI_FRAME_BITS(8)) ? 0 : 1;
}
+static void dspi_tx_dma_callback(void *arg)
+{
+ struct fsl_dspi *dspi = arg;
+ struct fsl_dspi_dma *dma = dspi->dma;
+
+ complete(&dma->cmd_tx_complete);
+}
+
+static void dspi_rx_dma_callback(void *arg)
+{
+ struct fsl_dspi *dspi = arg;
+ struct fsl_dspi_dma *dma = dspi->dma;
+ int rx_word;
+ int i, len;
+ u16 d;
+
+ rx_word = is_double_byte_mode(dspi);
+
+ len = rx_word ? (dma->curr_xfer_len / 2) : dma->curr_xfer_len;
+
+ if (!(dspi->dataflags & TRAN_STATE_RX_VOID)) {
+ for (i = 0; i < len; i++) {
+ d = dspi->dma->rx_dma_buf[i];
+ rx_word ? (*(u16 *)dspi->rx = d) :
+ (*(u8 *)dspi->rx = d);
+ dspi->rx += rx_word + 1;
+ }
+ }
+
+ complete(&dma->cmd_rx_complete);
+}
+
+static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
+{
+ struct fsl_dspi_dma *dma = dspi->dma;
+ struct device *dev = &dspi->pdev->dev;
+ int time_left;
+ int tx_word;
+ int i, len;
+ u16 val;
+
+ tx_word = is_double_byte_mode(dspi);
+
+ len = tx_word ? (dma->curr_xfer_len / 2) : dma->curr_xfer_len;
+
+ for (i = 0; i < len - 1; i++) {
+ val = tx_word ? *(u16 *) dspi->tx : *(u8 *) dspi->tx;
+ dspi->dma->tx_dma_buf[i] =
+ SPI_PUSHR_TXDATA(val) | SPI_PUSHR_PCS(dspi->cs) |
+ SPI_PUSHR_CTAS(0) | SPI_PUSHR_CONT;
+ dspi->tx += tx_word + 1;
+ }
+
+ val = tx_word ? *(u16 *) dspi->tx : *(u8 *) dspi->tx;
+ dspi->dma->tx_dma_buf[i] = SPI_PUSHR_TXDATA(val) |
+ SPI_PUSHR_PCS(dspi->cs) |
+ SPI_PUSHR_CTAS(0);
+ dspi->tx += tx_word + 1;
+
+ dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
+ dma->tx_dma_phys,
+ DSPI_DMA_BUFSIZE, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!dma->tx_desc) {
+ dev_err(dev, "Not able to get desc for DMA xfer\n");
+ return -EIO;
+ }
+
+ dma->tx_desc->callback = dspi_tx_dma_callback;
+ dma->tx_desc->callback_param = dspi;
+ if (dma_submit_error(dmaengine_submit(dma->tx_desc))) {
+ dev_err(dev, "DMA submit failed\n");
+ return -EINVAL;
+ }
+
+ dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx,
+ dma->rx_dma_phys,
+ DSPI_DMA_BUFSIZE, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!dma->rx_desc) {
+ dev_err(dev, "Not able to get desc for DMA xfer\n");
+ return -EIO;
+ }
+
+ dma->rx_desc->callback = dspi_rx_dma_callback;
+ dma->rx_desc->callback_param = dspi;
+ if (dma_submit_error(dmaengine_submit(dma->rx_desc))) {
+ dev_err(dev, "DMA submit failed\n");
+ return -EINVAL;
+ }
+
+ reinit_completion(&dspi->dma->cmd_rx_complete);
+ reinit_completion(&dspi->dma->cmd_tx_complete);
+
+ dma_async_issue_pending(dma->chan_rx);
+ dma_async_issue_pending(dma->chan_tx);
+
+ time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete,
+ DMA_COMPLETION_TIMEOUT);
+ if (time_left == 0) {
+ dev_err(dev, "DMA tx timeout\n");
+ dmaengine_terminate_all(dma->chan_tx);
+ dmaengine_terminate_all(dma->chan_rx);
+ return -ETIMEDOUT;
+ }
+
+ time_left = wait_for_completion_timeout(&dspi->dma->cmd_rx_complete,
+ DMA_COMPLETION_TIMEOUT);
+ if (time_left == 0) {
+ dev_err(dev, "DMA rx timeout\n");
+ dmaengine_terminate_all(dma->chan_tx);
+ dmaengine_terminate_all(dma->chan_rx);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int dspi_dma_xfer(struct fsl_dspi *dspi)
+{
+ struct fsl_dspi_dma *dma = dspi->dma;
+ struct device *dev = &dspi->pdev->dev;
+ int curr_remaining_bytes;
+ int bytes_per_buffer;
+ int tx_word;
+ int ret = 0;
+
+ tx_word = is_double_byte_mode(dspi);
+ curr_remaining_bytes = dspi->len;
+ while (curr_remaining_bytes) {
+ /* Check if current transfer fits the DMA buffer */
+ dma->curr_xfer_len = curr_remaining_bytes;
+ bytes_per_buffer = DSPI_DMA_BUFSIZE /
+ (DSPI_FIFO_SIZE / (tx_word ? 2 : 1));
+ if (curr_remaining_bytes > bytes_per_buffer)
+ dma->curr_xfer_len = bytes_per_buffer;
+
+ ret = dspi_next_xfer_dma_submit(dspi);
+ if (ret) {
+ dev_err(dev, "DMA transfer failed\n");
+ goto exit;
+
+ } else {
+ curr_remaining_bytes -= dma->curr_xfer_len;
+ if (curr_remaining_bytes < 0)
+ curr_remaining_bytes = 0;
+ dspi->len = curr_remaining_bytes;
+ }
+ }
+
+exit:
+ return ret;
+}
+
+static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
+{
+ struct fsl_dspi_dma *dma;
+ struct dma_slave_config cfg;
+ struct device *dev = &dspi->pdev->dev;
+ int ret;
+
+ dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
+ if (!dma)
+ return -ENOMEM;
+
+ dma->chan_rx = dma_request_slave_channel(dev, "rx");
+ if (!dma->chan_rx) {
+ dev_err(dev, "rx dma channel not available\n");
+ ret = -ENODEV;
+ return ret;
+ }
+
+ dma->chan_tx = dma_request_slave_channel(dev, "tx");
+ if (!dma->chan_tx) {
+ dev_err(dev, "tx dma channel not available\n");
+ ret = -ENODEV;
+ goto err_tx_channel;
+ }
+
+ dma->tx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
+ &dma->tx_dma_phys, GFP_KERNEL);
+ if (!dma->tx_dma_buf) {
+ ret = -ENOMEM;
+ goto err_tx_dma_buf;
+ }
+
+ dma->rx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
+ &dma->rx_dma_phys, GFP_KERNEL);
+ if (!dma->rx_dma_buf) {
+ ret = -ENOMEM;
+ goto err_rx_dma_buf;
+ }
+
+ cfg.src_addr = phy_addr + SPI_POPR;
+ cfg.dst_addr = phy_addr + SPI_PUSHR;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.src_maxburst = 1;
+ cfg.dst_maxburst = 1;
+
+ cfg.direction = DMA_DEV_TO_MEM;
+ ret = dmaengine_slave_config(dma->chan_rx, &cfg);
+ if (ret) {
+ dev_err(dev, "can't configure rx dma channel\n");
+ ret = -EINVAL;
+ goto err_slave_config;
+ }
+
+ cfg.direction = DMA_MEM_TO_DEV;
+ ret = dmaengine_slave_config(dma->chan_tx, &cfg);
+ if (ret) {
+ dev_err(dev, "can't configure tx dma channel\n");
+ ret = -EINVAL;
+ goto err_slave_config;
+ }
+
+ dspi->dma = dma;
+ init_completion(&dma->cmd_tx_complete);
+ init_completion(&dma->cmd_rx_complete);
+
+ return 0;
+
+err_slave_config:
+ devm_kfree(dev, dma->rx_dma_buf);
+err_rx_dma_buf:
+ devm_kfree(dev, dma->tx_dma_buf);
+err_tx_dma_buf:
+ dma_release_channel(dma->chan_tx);
+err_tx_channel:
+ dma_release_channel(dma->chan_rx);
+
+ devm_kfree(dev, dma);
+ dspi->dma = NULL;
+
+ return ret;
+}
+
+static void dspi_release_dma(struct fsl_dspi *dspi)
+{
+ struct fsl_dspi_dma *dma = dspi->dma;
+ struct device *dev = &dspi->pdev->dev;
+
+ if (dma) {
+ if (dma->chan_tx) {
+ dma_unmap_single(dev, dma->tx_dma_phys,
+ DSPI_DMA_BUFSIZE, DMA_TO_DEVICE);
+ dma_release_channel(dma->chan_tx);
+ }
+
+ if (dma->chan_rx) {
+ dma_unmap_single(dev, dma->rx_dma_phys,
+ DSPI_DMA_BUFSIZE, DMA_FROM_DEVICE);
+ dma_release_channel(dma->chan_rx);
+ }
+ }
+}
+
static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
unsigned long clkrate)
{
@@ -424,6 +709,12 @@ static int dspi_transfer_one_message(struct spi_master *master,
regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_TCFQE);
dspi_tcfq_write(dspi);
break;
+ case DSPI_DMA_MODE:
+ regmap_write(dspi->regmap, SPI_RSER,
+ SPI_RSER_TFFFE | SPI_RSER_TFFFD |
+ SPI_RSER_RFDFE | SPI_RSER_RFDFD);
+ status = dspi_dma_xfer(dspi);
+ goto out;
default:
dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
trans_mode);
@@ -733,6 +1024,13 @@ static int dspi_probe(struct platform_device *pdev)
if (ret)
goto out_master_put;
+ if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
+ if (dspi_request_dma(dspi, res->start)) {
+ dev_err(&pdev->dev, "can't get dma channels\n");
+ goto out_clk_put;
+ }
+ }
+
master->max_speed_hz =
clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor;
@@ -761,6 +1059,7 @@ static int dspi_remove(struct platform_device *pdev)
struct fsl_dspi *dspi = spi_master_get_devdata(master);
/* Disconnect from the SPI framework */
+ dspi_release_dma(dspi);
clk_disable_unprepare(dspi->clk);
spi_unregister_master(dspi->master);
--
2.10.2
Current DMA implementation had a bug where the DMA transfer would
exit the loop in dspi_transfer_one_message after the completion of
a single transfer. This results in a multi message transfer submitted
with SPI_IOC_MESSAGE to terminate incorrectly without an error.
Signed-off-by: Sanchayan Maity <[email protected]>
---
drivers/spi/spi-fsl-dspi.c | 12 ++++++++----
1 file changed, 8 insertions(+), 4 deletions(-)
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index bc64700..b1ee1f5 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -714,7 +714,7 @@ static int dspi_transfer_one_message(struct spi_master *master,
SPI_RSER_TFFFE | SPI_RSER_TFFFD |
SPI_RSER_RFDFE | SPI_RSER_RFDFD);
status = dspi_dma_xfer(dspi);
- goto out;
+ break;
default:
dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
trans_mode);
@@ -722,9 +722,13 @@ static int dspi_transfer_one_message(struct spi_master *master,
goto out;
}
- if (wait_event_interruptible(dspi->waitq, dspi->waitflags))
- dev_err(&dspi->pdev->dev, "wait transfer complete fail!\n");
- dspi->waitflags = 0;
+ if (trans_mode != DSPI_DMA_MODE) {
+ if (wait_event_interruptible(dspi->waitq,
+ dspi->waitflags))
+ dev_err(&dspi->pdev->dev,
+ "wait transfer complete fail!\n");
+ dspi->waitflags = 0;
+ }
if (transfer->delay_usecs)
udelay(transfer->delay_usecs);
--
2.10.2
Current DMA implementation was not handling the continuous selection
format viz. SPI chip select would be deasserted even between sequential
serial transfers. Use the cs_change variable and correctly set or
reset the CONT bit accordingly for case where peripherals require
the chip select to be asserted between sequential transfers.
Signed-off-by: Sanchayan Maity <[email protected]>
---
drivers/spi/spi-fsl-dspi.c | 13 ++++++++++---
1 file changed, 10 insertions(+), 3 deletions(-)
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index aee8c88..164e2e1 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -258,9 +258,16 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
}
val = tx_word ? *(u16 *) dspi->tx : *(u8 *) dspi->tx;
- dspi->dma->tx_dma_buf[i] = SPI_PUSHR_TXDATA(val) |
- SPI_PUSHR_PCS(dspi->cs) |
- SPI_PUSHR_CTAS(0);
+ if (dspi->cs_change) {
+ dspi->dma->tx_dma_buf[i] = SPI_PUSHR_TXDATA(val) |
+ SPI_PUSHR_PCS(dspi->cs) |
+ SPI_PUSHR_CTAS(0);
+ } else {
+ dspi->dma->tx_dma_buf[i] = SPI_PUSHR_TXDATA(val) |
+ SPI_PUSHR_PCS(dspi->cs) |
+ SPI_PUSHR_CTAS(0) |
+ SPI_PUSHR_CONT;
+ }
dspi->tx += tx_word + 1;
dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
--
2.10.2
Hello,
The following set of patches have fixes for Vybrid SPI DMA
implementation along with some minor clean ups requested
at time when v3 version of SPI DMA support patch was accepted.
This series of patches is based on top of branch topic/fsl-dspi.
http://git.kernel.org/cgit/linux/kernel/git/broonie/spi.git/log/?h=topic/fsl-dspi
The patches have been tested on a Toradex Colibri Vybrid VF61 module.
Thanks & Regards,
Sanchayan.
Sanchayan Maity (4):
spi: spi-fsl-dspi: Fix SPI transfer issue when using multiple SPI_IOC_MESSAGE
spi: spi-fsl-dspi: Fix incorrect DMA setup
spi: spi-fsl-dspi: Fix continuous selection format
spi: spi-fsl-dspi: Minor code cleanup and error path fixes
drivers/spi/spi-fsl-dspi.c | 69 ++++++++++++++++++++++++++++++++++------------
1 file changed, 51 insertions(+), 18 deletions(-)
--
2.10.2
Currently dmaengine_prep_slave_single was being called with length
set to the complete DMA buffer size. This resulted in unwanted bytes
being transferred to the SPI register leading to clock and MOSI lines
having unwanted data even after chip select got deasserted and the
required bytes having been transferred.
Signed-off-by: Sanchayan Maity <[email protected]>
---
drivers/spi/spi-fsl-dspi.c | 10 ++++++++--
1 file changed, 8 insertions(+), 2 deletions(-)
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index b1ee1f5..aee8c88 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -265,7 +265,10 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
dma->tx_dma_phys,
- DSPI_DMA_BUFSIZE, DMA_MEM_TO_DEV,
+ dma->curr_xfer_len *
+ DMA_SLAVE_BUSWIDTH_4_BYTES /
+ (tx_word ? 2 : 1),
+ DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!dma->tx_desc) {
dev_err(dev, "Not able to get desc for DMA xfer\n");
@@ -281,7 +284,10 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx,
dma->rx_dma_phys,
- DSPI_DMA_BUFSIZE, DMA_DEV_TO_MEM,
+ dma->curr_xfer_len *
+ DMA_SLAVE_BUSWIDTH_4_BYTES /
+ (tx_word ? 2 : 1),
+ DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!dma->rx_desc) {
dev_err(dev, "Not able to get desc for DMA xfer\n");
--
2.10.2
Code cleanup for improving code readability and error path fixes
and cleanup removing use of devm_kfree.
Signed-off-by: Sanchayan Maity <[email protected]>
---
drivers/spi/spi-fsl-dspi.c | 34 +++++++++++++++++++++++++---------
1 file changed, 25 insertions(+), 9 deletions(-)
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 164e2e1..382a7f9 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -222,13 +222,18 @@ static void dspi_rx_dma_callback(void *arg)
rx_word = is_double_byte_mode(dspi);
- len = rx_word ? (dma->curr_xfer_len / 2) : dma->curr_xfer_len;
+ if (rx_word)
+ len = dma->curr_xfer_len / 2;
+ else
+ len = dma->curr_xfer_len;
if (!(dspi->dataflags & TRAN_STATE_RX_VOID)) {
for (i = 0; i < len; i++) {
d = dspi->dma->rx_dma_buf[i];
- rx_word ? (*(u16 *)dspi->rx = d) :
- (*(u8 *)dspi->rx = d);
+ if (rx_word)
+ *(u16 *)dspi->rx = d;
+ else
+ *(u8 *)dspi->rx = d;
dspi->rx += rx_word + 1;
}
}
@@ -247,17 +252,27 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
tx_word = is_double_byte_mode(dspi);
- len = tx_word ? (dma->curr_xfer_len / 2) : dma->curr_xfer_len;
+ if (tx_word)
+ len = dma->curr_xfer_len / 2;
+ else
+ len = dma->curr_xfer_len;
for (i = 0; i < len - 1; i++) {
- val = tx_word ? *(u16 *) dspi->tx : *(u8 *) dspi->tx;
+ if (tx_word)
+ val = *(u16 *) dspi->tx;
+ else
+ val = *(u8 *) dspi->tx;
dspi->dma->tx_dma_buf[i] =
SPI_PUSHR_TXDATA(val) | SPI_PUSHR_PCS(dspi->cs) |
SPI_PUSHR_CTAS(0) | SPI_PUSHR_CONT;
dspi->tx += tx_word + 1;
}
- val = tx_word ? *(u16 *) dspi->tx : *(u8 *) dspi->tx;
+ if (tx_word)
+ val = *(u16 *) dspi->tx;
+ else
+ val = *(u8 *) dspi->tx;
+
if (dspi->cs_change) {
dspi->dma->tx_dma_buf[i] = SPI_PUSHR_TXDATA(val) |
SPI_PUSHR_PCS(dspi->cs) |
@@ -440,15 +455,16 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
return 0;
err_slave_config:
- devm_kfree(dev, dma->rx_dma_buf);
+ dma_free_coherent(dev, DSPI_DMA_BUFSIZE,
+ dma->rx_dma_buf, dma->rx_dma_phys);
err_rx_dma_buf:
- devm_kfree(dev, dma->tx_dma_buf);
+ dma_free_coherent(dev, DSPI_DMA_BUFSIZE,
+ dma->tx_dma_buf, dma->tx_dma_phys);
err_tx_dma_buf:
dma_release_channel(dma->chan_tx);
err_tx_channel:
dma_release_channel(dma->chan_rx);
- devm_kfree(dev, dma);
dspi->dma = NULL;
return ret;
--
2.10.2
On 2016-11-17 04:16, Sanchayan Maity wrote:
> Current DMA implementation had a bug where the DMA transfer would
> exit the loop in dspi_transfer_one_message after the completion of
> a single transfer. This results in a multi message transfer submitted
> with SPI_IOC_MESSAGE to terminate incorrectly without an error.
Looks good to me:
Reviewed-by: Stefan Agner <[email protected]>
>
> Signed-off-by: Sanchayan Maity <[email protected]>
> ---
> drivers/spi/spi-fsl-dspi.c | 12 ++++++++----
> 1 file changed, 8 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
> index bc64700..b1ee1f5 100644
> --- a/drivers/spi/spi-fsl-dspi.c
> +++ b/drivers/spi/spi-fsl-dspi.c
> @@ -714,7 +714,7 @@ static int dspi_transfer_one_message(struct
> spi_master *master,
> SPI_RSER_TFFFE | SPI_RSER_TFFFD |
> SPI_RSER_RFDFE | SPI_RSER_RFDFD);
> status = dspi_dma_xfer(dspi);
> - goto out;
> + break;
> default:
> dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
> trans_mode);
> @@ -722,9 +722,13 @@ static int dspi_transfer_one_message(struct
> spi_master *master,
> goto out;
> }
>
> - if (wait_event_interruptible(dspi->waitq, dspi->waitflags))
> - dev_err(&dspi->pdev->dev, "wait transfer complete fail!\n");
> - dspi->waitflags = 0;
> + if (trans_mode != DSPI_DMA_MODE) {
> + if (wait_event_interruptible(dspi->waitq,
> + dspi->waitflags))
> + dev_err(&dspi->pdev->dev,
> + "wait transfer complete fail!\n");
> + dspi->waitflags = 0;
> + }
>
> if (transfer->delay_usecs)
> udelay(transfer->delay_usecs);
On 2016-11-17 04:16, Sanchayan Maity wrote:
> Currently dmaengine_prep_slave_single was being called with length
> set to the complete DMA buffer size. This resulted in unwanted bytes
> being transferred to the SPI register leading to clock and MOSI lines
> having unwanted data even after chip select got deasserted and the
> required bytes having been transferred.
>
> Signed-off-by: Sanchayan Maity <[email protected]>
> ---
> drivers/spi/spi-fsl-dspi.c | 10 ++++++++--
> 1 file changed, 8 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
> index b1ee1f5..aee8c88 100644
> --- a/drivers/spi/spi-fsl-dspi.c
> +++ b/drivers/spi/spi-fsl-dspi.c
> @@ -265,7 +265,10 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
>
> dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
> dma->tx_dma_phys,
> - DSPI_DMA_BUFSIZE, DMA_MEM_TO_DEV,
> + dma->curr_xfer_len *
> + DMA_SLAVE_BUSWIDTH_4_BYTES /
> + (tx_word ? 2 : 1),
> + DMA_MEM_TO_DEV,
Hm, this is getting ridiculous, I think we convert curr_xfer_len from
bytes to DMA transfers in almost every use.
Can we make it be transfer length in actual 4 byte transfers? We then
probably have to convert it to bytes once to subtract from
curr_remaining_bytes, but I think it would simplify code overall...
--
Stefan
> DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
> if (!dma->tx_desc) {
> dev_err(dev, "Not able to get desc for DMA xfer\n");
> @@ -281,7 +284,10 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
>
> dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx,
> dma->rx_dma_phys,
> - DSPI_DMA_BUFSIZE, DMA_DEV_TO_MEM,
> + dma->curr_xfer_len *
> + DMA_SLAVE_BUSWIDTH_4_BYTES /
> + (tx_word ? 2 : 1),
> + DMA_DEV_TO_MEM,
> DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
> if (!dma->rx_desc) {
> dev_err(dev, "Not able to get desc for DMA xfer\n");
On 2016-11-17 04:16, Sanchayan Maity wrote:
> Current DMA implementation was not handling the continuous selection
> format viz. SPI chip select would be deasserted even between sequential
> serial transfers. Use the cs_change variable and correctly set or
> reset the CONT bit accordingly for case where peripherals require
> the chip select to be asserted between sequential transfers.
>
> Signed-off-by: Sanchayan Maity <[email protected]>
> ---
> drivers/spi/spi-fsl-dspi.c | 13 ++++++++++---
> 1 file changed, 10 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
> index aee8c88..164e2e1 100644
> --- a/drivers/spi/spi-fsl-dspi.c
> +++ b/drivers/spi/spi-fsl-dspi.c
> @@ -258,9 +258,16 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
> }
>
> val = tx_word ? *(u16 *) dspi->tx : *(u8 *) dspi->tx;
> - dspi->dma->tx_dma_buf[i] = SPI_PUSHR_TXDATA(val) |
> - SPI_PUSHR_PCS(dspi->cs) |
> - SPI_PUSHR_CTAS(0);
> + if (dspi->cs_change) {
> + dspi->dma->tx_dma_buf[i] = SPI_PUSHR_TXDATA(val) |
> + SPI_PUSHR_PCS(dspi->cs) |
> + SPI_PUSHR_CTAS(0);
> + } else {
> + dspi->dma->tx_dma_buf[i] = SPI_PUSHR_TXDATA(val) |
> + SPI_PUSHR_PCS(dspi->cs) |
> + SPI_PUSHR_CTAS(0) |
> + SPI_PUSHR_CONT;
> + }
How about:
dspi->dma->tx_dma_buf[i] = SPI_PUSHR_TXDATA(val) |
SPI_PUSHR_PCS(dspi->cs) |
SPI_PUSHR_CTAS(0);
+ if (dspi->cs_change)
+ dspi->dma->tx_dma_buf[i] |= SPI_PUSHR_CONT;
Avoids code duplication...
--
Stefan
> dspi->tx += tx_word + 1;
>
> dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
Hello Stefan,
On 16-11-17 17:07:24, Stefan Agner wrote:
> On 2016-11-17 04:16, Sanchayan Maity wrote:
> > Current DMA implementation was not handling the continuous selection
> > format viz. SPI chip select would be deasserted even between sequential
> > serial transfers. Use the cs_change variable and correctly set or
> > reset the CONT bit accordingly for case where peripherals require
> > the chip select to be asserted between sequential transfers.
> >
> > Signed-off-by: Sanchayan Maity <[email protected]>
> > ---
> > drivers/spi/spi-fsl-dspi.c | 13 ++++++++++---
> > 1 file changed, 10 insertions(+), 3 deletions(-)
> >
> > diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
> > index aee8c88..164e2e1 100644
> > --- a/drivers/spi/spi-fsl-dspi.c
> > +++ b/drivers/spi/spi-fsl-dspi.c
> > @@ -258,9 +258,16 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
> > }
> >
> > val = tx_word ? *(u16 *) dspi->tx : *(u8 *) dspi->tx;
> > - dspi->dma->tx_dma_buf[i] = SPI_PUSHR_TXDATA(val) |
> > - SPI_PUSHR_PCS(dspi->cs) |
> > - SPI_PUSHR_CTAS(0);
> > + if (dspi->cs_change) {
> > + dspi->dma->tx_dma_buf[i] = SPI_PUSHR_TXDATA(val) |
> > + SPI_PUSHR_PCS(dspi->cs) |
> > + SPI_PUSHR_CTAS(0);
> > + } else {
> > + dspi->dma->tx_dma_buf[i] = SPI_PUSHR_TXDATA(val) |
> > + SPI_PUSHR_PCS(dspi->cs) |
> > + SPI_PUSHR_CTAS(0) |
> > + SPI_PUSHR_CONT;
> > + }
>
> How about:
>
>
> dspi->dma->tx_dma_buf[i] = SPI_PUSHR_TXDATA(val) |
> SPI_PUSHR_PCS(dspi->cs) |
> SPI_PUSHR_CTAS(0);
>
> + if (dspi->cs_change)
> + dspi->dma->tx_dma_buf[i] |= SPI_PUSHR_CONT;
>
>
> Avoids code duplication...
Agreed. It's much better. Should be !dspi->cs_change though.
Will include it in next iteration.
Thanks & Regards,
Sanchayan.
>
> --
> Stefan
>
>
>
> > dspi->tx += tx_word + 1;
> >
> > dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
On 16-11-17 17:03:19, Stefan Agner wrote:
> On 2016-11-17 04:16, Sanchayan Maity wrote:
> > Currently dmaengine_prep_slave_single was being called with length
> > set to the complete DMA buffer size. This resulted in unwanted bytes
> > being transferred to the SPI register leading to clock and MOSI lines
> > having unwanted data even after chip select got deasserted and the
> > required bytes having been transferred.
> >
> > Signed-off-by: Sanchayan Maity <[email protected]>
> > ---
> > drivers/spi/spi-fsl-dspi.c | 10 ++++++++--
> > 1 file changed, 8 insertions(+), 2 deletions(-)
> >
> > diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
> > index b1ee1f5..aee8c88 100644
> > --- a/drivers/spi/spi-fsl-dspi.c
> > +++ b/drivers/spi/spi-fsl-dspi.c
> > @@ -265,7 +265,10 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
> >
> > dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
> > dma->tx_dma_phys,
> > - DSPI_DMA_BUFSIZE, DMA_MEM_TO_DEV,
> > + dma->curr_xfer_len *
> > + DMA_SLAVE_BUSWIDTH_4_BYTES /
> > + (tx_word ? 2 : 1),
> > + DMA_MEM_TO_DEV,
>
> Hm, this is getting ridiculous, I think we convert curr_xfer_len from
> bytes to DMA transfers in almost every use.
>
> Can we make it be transfer length in actual 4 byte transfers? We then
> probably have to convert it to bytes once to subtract from
> curr_remaining_bytes, but I think it would simplify code overall...
Will the below be acceptable fix?
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 41422cd..db7f091 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -217,15 +217,13 @@ static void dspi_rx_dma_callback(void *arg)
struct fsl_dspi *dspi = arg;
struct fsl_dspi_dma *dma = dspi->dma;
int rx_word;
- int i, len;
+ int i;
u16 d;
rx_word = is_double_byte_mode(dspi);
- len = rx_word ? (dma->curr_xfer_len / 2) : dma->curr_xfer_len;
-
if (!(dspi->dataflags & TRAN_STATE_RX_VOID)) {
- for (i = 0; i < len; i++) {
+ for (i = 0; i < dma->curr_xfer_len; i++) {
d = dspi->dma->rx_dma_buf[i];
rx_word ? (*(u16 *)dspi->rx = d) :
(*(u8 *)dspi->rx = d);
@@ -242,14 +240,12 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
struct device *dev = &dspi->pdev->dev;
int time_left;
int tx_word;
- int i, len;
+ int i;
u16 val;
tx_word = is_double_byte_mode(dspi);
- len = tx_word ? (dma->curr_xfer_len / 2) : dma->curr_xfer_len;
-
- for (i = 0; i < len - 1; i++) {
+ for (i = 0; i < dma->curr_xfer_len - 1; i++) {
val = tx_word ? *(u16 *) dspi->tx : *(u8 *) dspi->tx;
dspi->dma->tx_dma_buf[i] =
SPI_PUSHR_TXDATA(val) | SPI_PUSHR_PCS(dspi->cs) |
@@ -267,7 +263,9 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
dma->tx_dma_phys,
- DSPI_DMA_BUFSIZE, DMA_MEM_TO_DEV,
+ dma->curr_xfer_len *
+ DMA_SLAVE_BUSWIDTH_4_BYTES,
+ DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!dma->tx_desc) {
dev_err(dev, "Not able to get desc for DMA xfer\n");
@@ -283,7 +281,9 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx,
dma->rx_dma_phys,
- DSPI_DMA_BUFSIZE, DMA_DEV_TO_MEM,
+ dma->curr_xfer_len *
+ DMA_SLAVE_BUSWIDTH_4_BYTES,
+ DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!dma->rx_desc) {
dev_err(dev, "Not able to get desc for DMA xfer\n");
@@ -330,17 +330,17 @@ static int dspi_dma_xfer(struct fsl_dspi *dspi)
struct device *dev = &dspi->pdev->dev;
int curr_remaining_bytes;
int bytes_per_buffer;
- int tx_word;
+ int tx_word = 1;
int ret = 0;
- tx_word = is_double_byte_mode(dspi);
+ if (is_double_byte_mode(dspi))
+ tx_word = 2;
curr_remaining_bytes = dspi->len;
+ bytes_per_buffer = DSPI_DMA_BUFSIZE / DSPI_FIFO_SIZE;
while (curr_remaining_bytes) {
/* Check if current transfer fits the DMA buffer */
- dma->curr_xfer_len = curr_remaining_bytes;
- bytes_per_buffer = DSPI_DMA_BUFSIZE /
- (DSPI_FIFO_SIZE / (tx_word ? 2 : 1));
- if (curr_remaining_bytes > bytes_per_buffer)
+ dma->curr_xfer_len = curr_remaining_bytes / tx_word;
+ if (dma->curr_xfer_len > bytes_per_buffer)
dma->curr_xfer_len = bytes_per_buffer;
ret = dspi_next_xfer_dma_submit(dspi);
@@ -349,7 +349,7 @@ static int dspi_dma_xfer(struct fsl_dspi *dspi)
goto exit;
} else {
- curr_remaining_bytes -= dma->curr_xfer_len;
+ curr_remaining_bytes -= dma->curr_xfer_len * tx_word;
if (curr_remaining_bytes < 0)
curr_remaining_bytes = 0;
dspi->len = curr_remaining_bytes;
Regards,
Sanchayan.
>
> --
> Stefan
>
>
> > DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
> > if (!dma->tx_desc) {
> > dev_err(dev, "Not able to get desc for DMA xfer\n");
> > @@ -281,7 +284,10 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
> >
> > dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx,
> > dma->rx_dma_phys,
> > - DSPI_DMA_BUFSIZE, DMA_DEV_TO_MEM,
> > + dma->curr_xfer_len *
> > + DMA_SLAVE_BUSWIDTH_4_BYTES /
> > + (tx_word ? 2 : 1),
> > + DMA_DEV_TO_MEM,
> > DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
> > if (!dma->rx_desc) {
> > dev_err(dev, "Not able to get desc for DMA xfer\n");
On 2016-11-18 00:04, [email protected] wrote:
> On 16-11-17 17:03:19, Stefan Agner wrote:
>> On 2016-11-17 04:16, Sanchayan Maity wrote:
>> > Currently dmaengine_prep_slave_single was being called with length
>> > set to the complete DMA buffer size. This resulted in unwanted bytes
>> > being transferred to the SPI register leading to clock and MOSI lines
>> > having unwanted data even after chip select got deasserted and the
>> > required bytes having been transferred.
>> >
>> > Signed-off-by: Sanchayan Maity <[email protected]>
>> > ---
>> > drivers/spi/spi-fsl-dspi.c | 10 ++++++++--
>> > 1 file changed, 8 insertions(+), 2 deletions(-)
>> >
>> > diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
>> > index b1ee1f5..aee8c88 100644
>> > --- a/drivers/spi/spi-fsl-dspi.c
>> > +++ b/drivers/spi/spi-fsl-dspi.c
>> > @@ -265,7 +265,10 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
>> >
>> > dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
>> > dma->tx_dma_phys,
>> > - DSPI_DMA_BUFSIZE, DMA_MEM_TO_DEV,
>> > + dma->curr_xfer_len *
>> > + DMA_SLAVE_BUSWIDTH_4_BYTES /
>> > + (tx_word ? 2 : 1),
>> > + DMA_MEM_TO_DEV,
>>
>> Hm, this is getting ridiculous, I think we convert curr_xfer_len from
>> bytes to DMA transfers in almost every use.
>>
>> Can we make it be transfer length in actual 4 byte transfers? We then
>> probably have to convert it to bytes once to subtract from
>> curr_remaining_bytes, but I think it would simplify code overall...
>
> Will the below be acceptable fix?
>
> diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
> index 41422cd..db7f091 100644
> --- a/drivers/spi/spi-fsl-dspi.c
> +++ b/drivers/spi/spi-fsl-dspi.c
> @@ -217,15 +217,13 @@ static void dspi_rx_dma_callback(void *arg)
> struct fsl_dspi *dspi = arg;
> struct fsl_dspi_dma *dma = dspi->dma;
> int rx_word;
> - int i, len;
> + int i;
> u16 d;
>
> rx_word = is_double_byte_mode(dspi);
>
> - len = rx_word ? (dma->curr_xfer_len / 2) : dma->curr_xfer_len;
> -
> if (!(dspi->dataflags & TRAN_STATE_RX_VOID)) {
> - for (i = 0; i < len; i++) {
> + for (i = 0; i < dma->curr_xfer_len; i++) {
> d = dspi->dma->rx_dma_buf[i];
> rx_word ? (*(u16 *)dspi->rx = d) :
> (*(u8 *)dspi->rx = d);
> @@ -242,14 +240,12 @@ static int /(struct
> fsl_dspi *dspi)
> struct device *dev = &dspi->pdev->dev;
> int time_left;
> int tx_word;
> - int i, len;
> + int i;
> u16 val;
>
> tx_word = is_double_byte_mode(dspi);
>
> - len = tx_word ? (dma->curr_xfer_len / 2) : dma->curr_xfer_len;
> -
> - for (i = 0; i < len - 1; i++) {
> + for (i = 0; i < dma->curr_xfer_len - 1; i++) {
> val = tx_word ? *(u16 *) dspi->tx : *(u8 *) dspi->tx;
> dspi->dma->tx_dma_buf[i] =
> SPI_PUSHR_TXDATA(val) | SPI_PUSHR_PCS(dspi->cs) |
> @@ -267,7 +263,9 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
>
> dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
> dma->tx_dma_phys,
> - DSPI_DMA_BUFSIZE, DMA_MEM_TO_DEV,
> + dma->curr_xfer_len *
> + DMA_SLAVE_BUSWIDTH_4_BYTES,
> + DMA_MEM_TO_DEV,
> DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
> if (!dma->tx_desc) {
> dev_err(dev, "Not able to get desc for DMA xfer\n");
> @@ -283,7 +281,9 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
>
> dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx,
> dma->rx_dma_phys,
> - DSPI_DMA_BUFSIZE, DMA_DEV_TO_MEM,
> + dma->curr_xfer_len *
> + DMA_SLAVE_BUSWIDTH_4_BYTES,
> + DMA_DEV_TO_MEM,
> DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
> if (!dma->rx_desc) {
> dev_err(dev, "Not able to get desc for DMA xfer\n");
> @@ -330,17 +330,17 @@ static int dspi_dma_xfer(struct fsl_dspi *dspi)
> struct device *dev = &dspi->pdev->dev;
> int curr_remaining_bytes;
> int bytes_per_buffer;
> - int tx_word;
> + int tx_word = 1;
> int ret = 0;
>
> - tx_word = is_double_byte_mode(dspi);
> + if (is_double_byte_mode(dspi))
> + tx_word = 2;
I would try to be consistent with tx_word. In most other cases it is
used as boolean, whether this is a 2 byte word or not.
Here you change it to represent the length of a single transfer/frame.
Nothing wrong with that, just if you do such changes, also change the
name of the variable so the reader does not get miss lead from other
uses of "tx_word"...
But otherwise looks good, like this variant much better!
Maybe we should add a comment in struct fsl_dspi_dma:
/* Length of transfer in words of DSPI_FIFO_SIZE */
--
Stefan
> curr_remaining_bytes = dspi->len;
> + bytes_per_buffer = DSPI_DMA_BUFSIZE / DSPI_FIFO_SIZE;
> while (curr_remaining_bytes) {
> /* Check if current transfer fits the DMA buffer */
> - dma->curr_xfer_len = curr_remaining_bytes;
> - bytes_per_buffer = DSPI_DMA_BUFSIZE /
> - (DSPI_FIFO_SIZE / (tx_word ? 2 : 1));
> - if (curr_remaining_bytes > bytes_per_buffer)
> + dma->curr_xfer_len = curr_remaining_bytes / tx_word;
> + if (dma->curr_xfer_len > bytes_per_buffer)
> dma->curr_xfer_len = bytes_per_buffer;
>
> ret = dspi_next_xfer_dma_submit(dspi);
> @@ -349,7 +349,7 @@ static int dspi_dma_xfer(struct fsl_dspi *dspi)
> goto exit;
>
> } else {
> - curr_remaining_bytes -= dma->curr_xfer_len;
> + curr_remaining_bytes -= dma->curr_xfer_len * tx_word;
> if (curr_remaining_bytes < 0)
> curr_remaining_bytes = 0;
> dspi->len = curr_remaining_bytes;
>
>
> Regards,
> Sanchayan.
>>
>> --
>> Stefan
>>
>>
>> > DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
>> > if (!dma->tx_desc) {
>> > dev_err(dev, "Not able to get desc for DMA xfer\n");
>> > @@ -281,7 +284,10 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
>> >
>> > dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx,
>> > dma->rx_dma_phys,
>> > - DSPI_DMA_BUFSIZE, DMA_DEV_TO_MEM,
>> > + dma->curr_xfer_len *
>> > + DMA_SLAVE_BUSWIDTH_4_BYTES /
>> > + (tx_word ? 2 : 1),
>> > + DMA_DEV_TO_MEM,
>> > DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
>> > if (!dma->rx_desc) {
>> > dev_err(dev, "Not able to get desc for DMA xfer\n");
The patch
spi: spi-fsl-dspi: Fix SPI transfer issue when using multiple SPI_IOC_MESSAGE
has been applied to the spi tree at
git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi.git
All being well this means that it will be integrated into the linux-next
tree (usually sometime in the next 24 hours) and sent to Linus during
the next merge window (or sooner if it is a bug fix), however if
problems are discovered then the patch may be dropped or reverted.
You may get further e-mails resulting from automated or manual testing
and review of the tree, please engage with people reporting problems and
send followup patches addressing any issues that are reported if needed.
If any updates are required or you are submitting further changes they
should be sent as incremental updates against current git, existing
patches will not be replaced.
Please add any relevant lists and maintainers to the CCs when replying
to this mail.
Thanks,
Mark
>From 9811430465fccae17862213d07eba017c149eb9c Mon Sep 17 00:00:00 2001
From: Sanchayan Maity <[email protected]>
Date: Thu, 17 Nov 2016 17:46:48 +0530
Subject: [PATCH] spi: spi-fsl-dspi: Fix SPI transfer issue when using multiple
SPI_IOC_MESSAGE
Current DMA implementation had a bug where the DMA transfer would
exit the loop in dspi_transfer_one_message after the completion of
a single transfer. This results in a multi message transfer submitted
with SPI_IOC_MESSAGE to terminate incorrectly without an error.
Signed-off-by: Sanchayan Maity <[email protected]>
Reviewed-by: Stefan Agner <[email protected]>
Signed-off-by: Mark Brown <[email protected]>
---
drivers/spi/spi-fsl-dspi.c | 12 ++++++++----
1 file changed, 8 insertions(+), 4 deletions(-)
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index bc64700b514d..b1ee1f521ba0 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -714,7 +714,7 @@ static int dspi_transfer_one_message(struct spi_master *master,
SPI_RSER_TFFFE | SPI_RSER_TFFFD |
SPI_RSER_RFDFE | SPI_RSER_RFDFD);
status = dspi_dma_xfer(dspi);
- goto out;
+ break;
default:
dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
trans_mode);
@@ -722,9 +722,13 @@ static int dspi_transfer_one_message(struct spi_master *master,
goto out;
}
- if (wait_event_interruptible(dspi->waitq, dspi->waitflags))
- dev_err(&dspi->pdev->dev, "wait transfer complete fail!\n");
- dspi->waitflags = 0;
+ if (trans_mode != DSPI_DMA_MODE) {
+ if (wait_event_interruptible(dspi->waitq,
+ dspi->waitflags))
+ dev_err(&dspi->pdev->dev,
+ "wait transfer complete fail!\n");
+ dspi->waitflags = 0;
+ }
if (transfer->delay_usecs)
udelay(transfer->delay_usecs);
--
2.10.2