2024-04-23 12:16:29

by Marek Szyprowski

[permalink] [raw]
Subject: [PATCH] serial: msm: Unify TX and RX DMA paths

Use scatterlist-based API also for RX mode to unify TX and RX DMA paths
as well as simplify the whole driver code a bit.

Signed-off-by: Marek Szyprowski <[email protected]>
---
drivers/tty/serial/msm_serial.c | 86 +++++++++++++--------------------
1 file changed, 34 insertions(+), 52 deletions(-)

diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 0a9c5219df88..d5e00e613f73 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -161,14 +161,8 @@ enum {
struct msm_dma {
struct dma_chan *chan;
enum dma_data_direction dir;
- union {
- struct {
- dma_addr_t phys;
- unsigned char *virt;
- unsigned int count;
- } rx;
- struct scatterlist tx_sg;
- };
+ unsigned char *virt;
+ struct scatterlist sg;
dma_cookie_t cookie;
u32 enable_bit;
struct dma_async_tx_descriptor *desc;
@@ -254,13 +248,7 @@ static void msm_stop_dma(struct uart_port *port, struct msm_dma *dma)
unsigned int mapped;
u32 val;

- if (dma->dir == DMA_TO_DEVICE) {
- mapped = sg_dma_len(&dma->tx_sg);
- } else {
- mapped = dma->rx.count;
- dma->rx.count = 0;
- }
-
+ mapped = sg_dma_len(&dma->sg);
dmaengine_terminate_all(dma->chan);

/*
@@ -275,11 +263,8 @@ static void msm_stop_dma(struct uart_port *port, struct msm_dma *dma)
msm_write(port, val, UARTDM_DMEN);

if (mapped) {
- if (dma->dir == DMA_TO_DEVICE) {
- dma_unmap_sg(dev, &dma->tx_sg, 1, dma->dir);
- sg_init_table(&dma->tx_sg, 1);
- } else
- dma_unmap_single(dev, dma->rx.phys, mapped, dma->dir);
+ dma_unmap_sg(dev, &dma->sg, 1, dma->dir);
+ sg_init_table(&dma->sg, 1);
}
}

@@ -299,7 +284,7 @@ static void msm_release_dma(struct msm_port *msm_port)
if (dma->chan) {
msm_stop_dma(&msm_port->uart, dma);
dma_release_channel(dma->chan);
- kfree(dma->rx.virt);
+ kfree(dma->virt);
}

memset(dma, 0, sizeof(*dma));
@@ -371,8 +356,8 @@ static void msm_request_rx_dma(struct msm_port *msm_port, resource_size_t base)

of_property_read_u32(dev->of_node, "qcom,rx-crci", &crci);

- dma->rx.virt = kzalloc(UARTDM_RX_SIZE, GFP_KERNEL);
- if (!dma->rx.virt)
+ dma->virt = kzalloc(UARTDM_RX_SIZE, GFP_KERNEL);
+ if (!dma->virt)
goto rel_rx;

memset(&conf, 0, sizeof(conf));
@@ -399,7 +384,7 @@ static void msm_request_rx_dma(struct msm_port *msm_port, resource_size_t base)

return;
err:
- kfree(dma->rx.virt);
+ kfree(dma->virt);
rel_rx:
dma_release_channel(dma->chan);
no_rx:
@@ -434,7 +419,7 @@ static void msm_start_tx(struct uart_port *port)
struct msm_dma *dma = &msm_port->tx_dma;

/* Already started in DMA mode */
- if (sg_dma_len(&dma->tx_sg))
+ if (sg_dma_len(&dma->sg))
return;

msm_port->imr |= MSM_UART_IMR_TXLEV;
@@ -462,12 +447,12 @@ static void msm_complete_tx_dma(void *args)
uart_port_lock_irqsave(port, &flags);

/* Already stopped */
- if (!sg_dma_len(&dma->tx_sg))
+ if (!sg_dma_len(&dma->sg))
goto done;

dmaengine_tx_status(dma->chan, dma->cookie, &state);

- dma_unmap_sg(port->dev, &dma->tx_sg, 1, dma->dir);
+ dma_unmap_sg(port->dev, &dma->sg, 1, dma->dir);

val = msm_read(port, UARTDM_DMEN);
val &= ~dma->enable_bit;
@@ -478,9 +463,9 @@ static void msm_complete_tx_dma(void *args)
msm_write(port, MSM_UART_CR_TX_ENABLE, MSM_UART_CR);
}

- count = sg_dma_len(&dma->tx_sg) - state.residue;
+ count = sg_dma_len(&dma->sg) - state.residue;
uart_xmit_advance(port, count);
- sg_init_table(&dma->tx_sg, 1);
+ sg_init_table(&dma->sg, 1);

/* Restore "Tx FIFO below watermark" interrupt */
msm_port->imr |= MSM_UART_IMR_TXLEV;
@@ -503,16 +488,16 @@ static int msm_handle_tx_dma(struct msm_port *msm_port, unsigned int count)
int ret;
u32 val;

- sg_init_table(&dma->tx_sg, 1);
- kfifo_dma_out_prepare(&tport->xmit_fifo, &dma->tx_sg, 1, count);
+ sg_init_table(&dma->sg, 1);
+ kfifo_dma_out_prepare(&tport->xmit_fifo, &dma->sg, 1, count);

- mapped = dma_map_sg(port->dev, &dma->tx_sg, 1, dma->dir);
+ mapped = dma_map_sg(port->dev, &dma->sg, 1, dma->dir);
if (!mapped) {
ret = -EIO;
goto zero_sg;
}

- dma->desc = dmaengine_prep_slave_sg(dma->chan, &dma->tx_sg, 1,
+ dma->desc = dmaengine_prep_slave_sg(dma->chan, &dma->sg, 1,
DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT |
DMA_PREP_FENCE);
@@ -550,9 +535,9 @@ static int msm_handle_tx_dma(struct msm_port *msm_port, unsigned int count)
dma_async_issue_pending(dma->chan);
return 0;
unmap:
- dma_unmap_sg(port->dev, &dma->tx_sg, 1, dma->dir);
+ dma_unmap_sg(port->dev, &dma->sg, 1, dma->dir);
zero_sg:
- sg_init_table(&dma->tx_sg, 1);
+ sg_init_table(&dma->sg, 1);
return ret;
}

@@ -569,7 +554,7 @@ static void msm_complete_rx_dma(void *args)
uart_port_lock_irqsave(port, &flags);

/* Already stopped */
- if (!dma->rx.count)
+ if (!sg_dma_len(&dma->sg))
goto done;

val = msm_read(port, UARTDM_DMEN);
@@ -586,14 +571,13 @@ static void msm_complete_rx_dma(void *args)

port->icount.rx += count;

- dma->rx.count = 0;
-
- dma_unmap_single(port->dev, dma->rx.phys, UARTDM_RX_SIZE, dma->dir);
+ dma_unmap_sg(port->dev, &dma->sg, 1, dma->dir);
+ sg_init_table(&dma->sg, 1);

for (i = 0; i < count; i++) {
char flag = TTY_NORMAL;

- if (msm_port->break_detected && dma->rx.virt[i] == 0) {
+ if (msm_port->break_detected && dma->virt[i] == 0) {
port->icount.brk++;
flag = TTY_BREAK;
msm_port->break_detected = false;
@@ -604,9 +588,9 @@ static void msm_complete_rx_dma(void *args)
if (!(port->read_status_mask & MSM_UART_SR_RX_BREAK))
flag = TTY_NORMAL;

- sysrq = uart_prepare_sysrq_char(port, dma->rx.virt[i]);
+ sysrq = uart_prepare_sysrq_char(port, dma->virt[i]);
if (!sysrq)
- tty_insert_flip_char(tport, dma->rx.virt[i], flag);
+ tty_insert_flip_char(tport, dma->virt[i], flag);
}

msm_start_rx_dma(msm_port);
@@ -630,14 +614,13 @@ static void msm_start_rx_dma(struct msm_port *msm_port)
if (!dma->chan)
return;

- dma->rx.phys = dma_map_single(uart->dev, dma->rx.virt,
- UARTDM_RX_SIZE, dma->dir);
- ret = dma_mapping_error(uart->dev, dma->rx.phys);
- if (ret)
+ sg_init_one(&dma->sg, dma->virt, UARTDM_RX_SIZE);
+ ret = dma_map_sg(uart->dev, &dma->sg, 1, dma->dir);
+ if (!ret)
goto sw_mode;

- dma->desc = dmaengine_prep_slave_single(dma->chan, dma->rx.phys,
- UARTDM_RX_SIZE, DMA_DEV_TO_MEM,
+ dma->desc = dmaengine_prep_slave_sg(dma->chan, &dma->sg, 1,
+ DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT);
if (!dma->desc)
goto unmap;
@@ -664,8 +647,6 @@ static void msm_start_rx_dma(struct msm_port *msm_port)

msm_write(uart, msm_port->imr, MSM_UART_IMR);

- dma->rx.count = UARTDM_RX_SIZE;
-
dma_async_issue_pending(dma->chan);

msm_write(uart, MSM_UART_CR_CMD_RESET_STALE_INT, MSM_UART_CR);
@@ -684,7 +665,8 @@ static void msm_start_rx_dma(struct msm_port *msm_port)

return;
unmap:
- dma_unmap_single(uart->dev, dma->rx.phys, UARTDM_RX_SIZE, dma->dir);
+ dma_unmap_sg(uart->dev, &dma->sg, 1, dma->dir);
+ sg_init_table(&dma->sg, 1);

sw_mode:
/*
@@ -967,7 +949,7 @@ static irqreturn_t msm_uart_irq(int irq, void *dev_id)
}

if (misr & (MSM_UART_IMR_RXLEV | MSM_UART_IMR_RXSTALE)) {
- if (dma->rx.count) {
+ if (sg_dma_len(&dma->sg)) {
val = MSM_UART_CR_CMD_STALE_EVENT_DISABLE;
msm_write(port, val, MSM_UART_CR);
val = MSM_UART_CR_CMD_RESET_STALE_INT;
--
2.34.1



2024-04-23 14:09:38

by Neil Armstrong

[permalink] [raw]
Subject: Re: [PATCH] serial: msm: Unify TX and RX DMA paths

Hi Marek,

On 23/04/2024 14:08, Marek Szyprowski wrote:
> Use scatterlist-based API also for RX mode to unify TX and RX DMA paths
> as well as simplify the whole driver code a bit.

Thanks for the patch, I have no idea if this is right or wrong.

Greg, I think we should wait until this change is fully tested on multiple
platforms including DMA usage (bluetooth) before aplying it.

Thanks,
Neil

>
> Signed-off-by: Marek Szyprowski <[email protected]>
> ---
> drivers/tty/serial/msm_serial.c | 86 +++++++++++++--------------------
> 1 file changed, 34 insertions(+), 52 deletions(-)
>
> diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
> index 0a9c5219df88..d5e00e613f73 100644
> --- a/drivers/tty/serial/msm_serial.c
> +++ b/drivers/tty/serial/msm_serial.c
> @@ -161,14 +161,8 @@ enum {
> struct msm_dma {
> struct dma_chan *chan;
> enum dma_data_direction dir;
> - union {
> - struct {
> - dma_addr_t phys;
> - unsigned char *virt;
> - unsigned int count;
> - } rx;
> - struct scatterlist tx_sg;
> - };
> + unsigned char *virt;
> + struct scatterlist sg;
> dma_cookie_t cookie;
> u32 enable_bit;
> struct dma_async_tx_descriptor *desc;
> @@ -254,13 +248,7 @@ static void msm_stop_dma(struct uart_port *port, struct msm_dma *dma)
> unsigned int mapped;
> u32 val;
>
> - if (dma->dir == DMA_TO_DEVICE) {
> - mapped = sg_dma_len(&dma->tx_sg);
> - } else {
> - mapped = dma->rx.count;
> - dma->rx.count = 0;
> - }
> -
> + mapped = sg_dma_len(&dma->sg);
> dmaengine_terminate_all(dma->chan);
>
> /*
> @@ -275,11 +263,8 @@ static void msm_stop_dma(struct uart_port *port, struct msm_dma *dma)
> msm_write(port, val, UARTDM_DMEN);
>
> if (mapped) {
> - if (dma->dir == DMA_TO_DEVICE) {
> - dma_unmap_sg(dev, &dma->tx_sg, 1, dma->dir);
> - sg_init_table(&dma->tx_sg, 1);
> - } else
> - dma_unmap_single(dev, dma->rx.phys, mapped, dma->dir);
> + dma_unmap_sg(dev, &dma->sg, 1, dma->dir);
> + sg_init_table(&dma->sg, 1);
> }
> }
>
> @@ -299,7 +284,7 @@ static void msm_release_dma(struct msm_port *msm_port)
> if (dma->chan) {
> msm_stop_dma(&msm_port->uart, dma);
> dma_release_channel(dma->chan);
> - kfree(dma->rx.virt);
> + kfree(dma->virt);
> }
>
> memset(dma, 0, sizeof(*dma));
> @@ -371,8 +356,8 @@ static void msm_request_rx_dma(struct msm_port *msm_port, resource_size_t base)
>
> of_property_read_u32(dev->of_node, "qcom,rx-crci", &crci);
>
> - dma->rx.virt = kzalloc(UARTDM_RX_SIZE, GFP_KERNEL);
> - if (!dma->rx.virt)
> + dma->virt = kzalloc(UARTDM_RX_SIZE, GFP_KERNEL);
> + if (!dma->virt)
> goto rel_rx;
>
> memset(&conf, 0, sizeof(conf));
> @@ -399,7 +384,7 @@ static void msm_request_rx_dma(struct msm_port *msm_port, resource_size_t base)
>
> return;
> err:
> - kfree(dma->rx.virt);
> + kfree(dma->virt);
> rel_rx:
> dma_release_channel(dma->chan);
> no_rx:
> @@ -434,7 +419,7 @@ static void msm_start_tx(struct uart_port *port)
> struct msm_dma *dma = &msm_port->tx_dma;
>
> /* Already started in DMA mode */
> - if (sg_dma_len(&dma->tx_sg))
> + if (sg_dma_len(&dma->sg))
> return;
>
> msm_port->imr |= MSM_UART_IMR_TXLEV;
> @@ -462,12 +447,12 @@ static void msm_complete_tx_dma(void *args)
> uart_port_lock_irqsave(port, &flags);
>
> /* Already stopped */
> - if (!sg_dma_len(&dma->tx_sg))
> + if (!sg_dma_len(&dma->sg))
> goto done;
>
> dmaengine_tx_status(dma->chan, dma->cookie, &state);
>
> - dma_unmap_sg(port->dev, &dma->tx_sg, 1, dma->dir);
> + dma_unmap_sg(port->dev, &dma->sg, 1, dma->dir);
>
> val = msm_read(port, UARTDM_DMEN);
> val &= ~dma->enable_bit;
> @@ -478,9 +463,9 @@ static void msm_complete_tx_dma(void *args)
> msm_write(port, MSM_UART_CR_TX_ENABLE, MSM_UART_CR);
> }
>
> - count = sg_dma_len(&dma->tx_sg) - state.residue;
> + count = sg_dma_len(&dma->sg) - state.residue;
> uart_xmit_advance(port, count);
> - sg_init_table(&dma->tx_sg, 1);
> + sg_init_table(&dma->sg, 1);
>
> /* Restore "Tx FIFO below watermark" interrupt */
> msm_port->imr |= MSM_UART_IMR_TXLEV;
> @@ -503,16 +488,16 @@ static int msm_handle_tx_dma(struct msm_port *msm_port, unsigned int count)
> int ret;
> u32 val;
>
> - sg_init_table(&dma->tx_sg, 1);
> - kfifo_dma_out_prepare(&tport->xmit_fifo, &dma->tx_sg, 1, count);
> + sg_init_table(&dma->sg, 1);
> + kfifo_dma_out_prepare(&tport->xmit_fifo, &dma->sg, 1, count);
>
> - mapped = dma_map_sg(port->dev, &dma->tx_sg, 1, dma->dir);
> + mapped = dma_map_sg(port->dev, &dma->sg, 1, dma->dir);
> if (!mapped) {
> ret = -EIO;
> goto zero_sg;
> }
>
> - dma->desc = dmaengine_prep_slave_sg(dma->chan, &dma->tx_sg, 1,
> + dma->desc = dmaengine_prep_slave_sg(dma->chan, &dma->sg, 1,
> DMA_MEM_TO_DEV,
> DMA_PREP_INTERRUPT |
> DMA_PREP_FENCE);
> @@ -550,9 +535,9 @@ static int msm_handle_tx_dma(struct msm_port *msm_port, unsigned int count)
> dma_async_issue_pending(dma->chan);
> return 0;
> unmap:
> - dma_unmap_sg(port->dev, &dma->tx_sg, 1, dma->dir);
> + dma_unmap_sg(port->dev, &dma->sg, 1, dma->dir);
> zero_sg:
> - sg_init_table(&dma->tx_sg, 1);
> + sg_init_table(&dma->sg, 1);
> return ret;
> }
>
> @@ -569,7 +554,7 @@ static void msm_complete_rx_dma(void *args)
> uart_port_lock_irqsave(port, &flags);
>
> /* Already stopped */
> - if (!dma->rx.count)
> + if (!sg_dma_len(&dma->sg))
> goto done;
>
> val = msm_read(port, UARTDM_DMEN);
> @@ -586,14 +571,13 @@ static void msm_complete_rx_dma(void *args)
>
> port->icount.rx += count;
>
> - dma->rx.count = 0;
> -
> - dma_unmap_single(port->dev, dma->rx.phys, UARTDM_RX_SIZE, dma->dir);
> + dma_unmap_sg(port->dev, &dma->sg, 1, dma->dir);
> + sg_init_table(&dma->sg, 1);
>
> for (i = 0; i < count; i++) {
> char flag = TTY_NORMAL;
>
> - if (msm_port->break_detected && dma->rx.virt[i] == 0) {
> + if (msm_port->break_detected && dma->virt[i] == 0) {
> port->icount.brk++;
> flag = TTY_BREAK;
> msm_port->break_detected = false;
> @@ -604,9 +588,9 @@ static void msm_complete_rx_dma(void *args)
> if (!(port->read_status_mask & MSM_UART_SR_RX_BREAK))
> flag = TTY_NORMAL;
>
> - sysrq = uart_prepare_sysrq_char(port, dma->rx.virt[i]);
> + sysrq = uart_prepare_sysrq_char(port, dma->virt[i]);
> if (!sysrq)
> - tty_insert_flip_char(tport, dma->rx.virt[i], flag);
> + tty_insert_flip_char(tport, dma->virt[i], flag);
> }
>
> msm_start_rx_dma(msm_port);
> @@ -630,14 +614,13 @@ static void msm_start_rx_dma(struct msm_port *msm_port)
> if (!dma->chan)
> return;
>
> - dma->rx.phys = dma_map_single(uart->dev, dma->rx.virt,
> - UARTDM_RX_SIZE, dma->dir);
> - ret = dma_mapping_error(uart->dev, dma->rx.phys);
> - if (ret)
> + sg_init_one(&dma->sg, dma->virt, UARTDM_RX_SIZE);
> + ret = dma_map_sg(uart->dev, &dma->sg, 1, dma->dir);
> + if (!ret)
> goto sw_mode;
>
> - dma->desc = dmaengine_prep_slave_single(dma->chan, dma->rx.phys,
> - UARTDM_RX_SIZE, DMA_DEV_TO_MEM,
> + dma->desc = dmaengine_prep_slave_sg(dma->chan, &dma->sg, 1,
> + DMA_DEV_TO_MEM,
> DMA_PREP_INTERRUPT);
> if (!dma->desc)
> goto unmap;
> @@ -664,8 +647,6 @@ static void msm_start_rx_dma(struct msm_port *msm_port)
>
> msm_write(uart, msm_port->imr, MSM_UART_IMR);
>
> - dma->rx.count = UARTDM_RX_SIZE;
> -
> dma_async_issue_pending(dma->chan);
>
> msm_write(uart, MSM_UART_CR_CMD_RESET_STALE_INT, MSM_UART_CR);
> @@ -684,7 +665,8 @@ static void msm_start_rx_dma(struct msm_port *msm_port)
>
> return;
> unmap:
> - dma_unmap_single(uart->dev, dma->rx.phys, UARTDM_RX_SIZE, dma->dir);
> + dma_unmap_sg(uart->dev, &dma->sg, 1, dma->dir);
> + sg_init_table(&dma->sg, 1);
>
> sw_mode:
> /*
> @@ -967,7 +949,7 @@ static irqreturn_t msm_uart_irq(int irq, void *dev_id)
> }
>
> if (misr & (MSM_UART_IMR_RXLEV | MSM_UART_IMR_RXSTALE)) {
> - if (dma->rx.count) {
> + if (sg_dma_len(&dma->sg)) {
> val = MSM_UART_CR_CMD_STALE_EVENT_DISABLE;
> msm_write(port, val, MSM_UART_CR);
> val = MSM_UART_CR_CMD_RESET_STALE_INT;


2024-04-23 23:35:20

by Greg KH

[permalink] [raw]
Subject: Re: [PATCH] serial: msm: Unify TX and RX DMA paths

On Tue, Apr 23, 2024 at 04:08:04PM +0200, [email protected] wrote:
> Hi Marek,
>
> On 23/04/2024 14:08, Marek Szyprowski wrote:
> > Use scatterlist-based API also for RX mode to unify TX and RX DMA paths
> > as well as simplify the whole driver code a bit.
>
> Thanks for the patch, I have no idea if this is right or wrong.
>
> Greg, I think we should wait until this change is fully tested on multiple
> platforms including DMA usage (bluetooth) before aplying it.

Great, who is going to test that without me adding it to linux-next?

thanks,

greg k-h

2024-04-29 07:36:21

by Neil Armstrong

[permalink] [raw]
Subject: Re: [PATCH] serial: msm: Unify TX and RX DMA paths

Hi Greg,

On 24/04/2024 01:33, Greg Kroah-Hartman wrote:
> On Tue, Apr 23, 2024 at 04:08:04PM +0200, [email protected] wrote:
>> Hi Marek,
>>
>> On 23/04/2024 14:08, Marek Szyprowski wrote:
>>> Use scatterlist-based API also for RX mode to unify TX and RX DMA paths
>>> as well as simplify the whole driver code a bit.
>>
>> Thanks for the patch, I have no idea if this is right or wrong.
>>
>> Greg, I think we should wait until this change is fully tested on multiple
>> platforms including DMA usage (bluetooth) before aplying it.
>
> Great, who is going to test that without me adding it to linux-next?

Qcom developers, reviewers and maintainers will review it and test it like
any other patch affecting the qcom platform.

Thanks,
Neil

>
> thanks,
>
> greg k-h