1. We need a write lock in mhi_gen_tre otherwise there is race of the WP
used for ringing channel DB between mhi_queue and M0 transition.
2. We can not invoke local_bh_enable() when irqs are disabled, so move
read_lock_irqsave() under the mhi_gen_tre() since we add write_lock_bh() in
mhi_gen_tre().
3. Unlock xfer_cb to prevent potential lockup
v1 -> v2:
Added write_unlock_bh(&mhi_chan->lock) in mhi_gen_tre() before return
because of error process.
v2 -> v3:
1. split protecting WP and unlocking xfer_cb into two patches
2. Add a new patch to stop processing buffer and eventof a disabled or
stopped channel.
v3 -> v4:
1. Modify commit message
2. Add unlock operation before return error
v4 -> v5:
1. Squash "protecting WP" and "Take irqsave lock" into one patch
2. Drop patch 3/4 of patch v4
Bhaumik Bhatt (1):
bus: mhi: host: Add spinlock to protect WP access when queueing TREs
Qiang Yu (1):
bus: mhi: host: Drop chan lock before queuing buffers
drivers/bus/mhi/host/main.c | 26 +++++++++++++++++---------
1 file changed, 17 insertions(+), 9 deletions(-)
--
2.7.4
Ensure read and write locks for the channel are not taken in succession by
dropping the read lock from parse_xfer_event() such that a callback given
to client can potentially queue buffers and acquire the write lock in that
process. Any queueing of buffers should be done without channel read lock
acquired as it can result in multiple locks and a soft lockup.
Signed-off-by: Qiang Yu <[email protected]>
---
drivers/bus/mhi/host/main.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c
index 32021fe..25f98d6 100644
--- a/drivers/bus/mhi/host/main.c
+++ b/drivers/bus/mhi/host/main.c
@@ -642,6 +642,8 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
mhi_del_ring_element(mhi_cntrl, tre_ring);
local_rp = tre_ring->rp;
+ read_unlock_bh(&mhi_chan->lock);
+
/* notify client */
mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
@@ -667,6 +669,8 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
kfree(buf_info->cb_buf);
}
}
+
+ read_lock_bh(&mhi_chan->lock);
}
break;
} /* CC_EOT */
--
2.7.4
From: Bhaumik Bhatt <[email protected]>
Protect WP accesses such that multiple threads queueing buffers for
incoming data do not race.
Meanwhile, if CONFIG_TRACE_IRQFLAGS is enabled, irq will be enabled once
__local_bh_enable_ip is called as part of write_unlock_bh. Hence, let's
take irqsave lock after TRE is generated to avoid running write_unlock_bh
when irqsave lock is held.
Cc: <[email protected]>
Fixes: 189ff97cca53 ("bus: mhi: core: Add support for data transfer")
Signed-off-by: Bhaumik Bhatt <[email protected]>
Signed-off-by: Qiang Yu <[email protected]>
---
drivers/bus/mhi/host/main.c | 22 +++++++++++++---------
1 file changed, 13 insertions(+), 9 deletions(-)
diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c
index dcf627b..32021fe 100644
--- a/drivers/bus/mhi/host/main.c
+++ b/drivers/bus/mhi/host/main.c
@@ -1122,17 +1122,15 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
return -EIO;
- read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
-
ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
- if (unlikely(ret)) {
- ret = -EAGAIN;
- goto exit_unlock;
- }
+ if (unlikely(ret))
+ return -EAGAIN;
ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
if (unlikely(ret))
- goto exit_unlock;
+ return ret;
+
+ read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
/* Packet is queued, take a usage ref to exit M3 if necessary
* for host->device buffer, balanced put is done on buffer completion
@@ -1152,7 +1150,6 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
if (dir == DMA_FROM_DEVICE)
mhi_cntrl->runtime_put(mhi_cntrl);
-exit_unlock:
read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
return ret;
@@ -1204,6 +1201,9 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
int eot, eob, chain, bei;
int ret;
+ /* Protect accesses for reading and incrementing WP */
+ write_lock_bh(&mhi_chan->lock);
+
buf_ring = &mhi_chan->buf_ring;
tre_ring = &mhi_chan->tre_ring;
@@ -1221,8 +1221,10 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
if (!info->pre_mapped) {
ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
- if (ret)
+ if (ret) {
+ write_unlock_bh(&mhi_chan->lock);
return ret;
+ }
}
eob = !!(flags & MHI_EOB);
@@ -1239,6 +1241,8 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
mhi_add_ring_element(mhi_cntrl, tre_ring);
mhi_add_ring_element(mhi_cntrl, buf_ring);
+ write_unlock_bh(&mhi_chan->lock);
+
return 0;
}
--
2.7.4
On 12/10/2023 11:42 PM, Qiang Yu wrote:
> From: Bhaumik Bhatt <[email protected]>
>
> Protect WP accesses such that multiple threads queueing buffers for
> incoming data do not race.
>
> Meanwhile, if CONFIG_TRACE_IRQFLAGS is enabled, irq will be enabled once
> __local_bh_enable_ip is called as part of write_unlock_bh. Hence, let's
> take irqsave lock after TRE is generated to avoid running write_unlock_bh
> when irqsave lock is held.
>
> Cc: <[email protected]>
> Fixes: 189ff97cca53 ("bus: mhi: core: Add support for data transfer")
> Signed-off-by: Bhaumik Bhatt <[email protected]>
> Signed-off-by: Qiang Yu <[email protected]>
Seems to work fine for AIC100
Reviewed-by: Jeffrey Hugo <[email protected]>
Tested-by: Jeffrey Hugo <[email protected]>
On 12/10/2023 11:42 PM, Qiang Yu wrote:
> Ensure read and write locks for the channel are not taken in succession by
> dropping the read lock from parse_xfer_event() such that a callback given
> to client can potentially queue buffers and acquire the write lock in that
> process. Any queueing of buffers should be done without channel read lock
> acquired as it can result in multiple locks and a soft lockup.
>
> Signed-off-by: Qiang Yu <[email protected]>
Seems to work fine for AIC100
Reviewed-by: Jeffrey Hugo <[email protected]>
Tested-by: Jeffrey Hugo <[email protected]>
On Mon, Dec 11, 2023 at 02:42:51PM +0800, Qiang Yu wrote:
> From: Bhaumik Bhatt <[email protected]>
>
> Protect WP accesses such that multiple threads queueing buffers for
> incoming data do not race.
>
> Meanwhile, if CONFIG_TRACE_IRQFLAGS is enabled, irq will be enabled once
> __local_bh_enable_ip is called as part of write_unlock_bh. Hence, let's
> take irqsave lock after TRE is generated to avoid running write_unlock_bh
> when irqsave lock is held.
>
> Cc: <[email protected]>
> Fixes: 189ff97cca53 ("bus: mhi: core: Add support for data transfer")
> Signed-off-by: Bhaumik Bhatt <[email protected]>
> Signed-off-by: Qiang Yu <[email protected]>
Reviewed-by: Manivannan Sadhasivam <[email protected]>
- Mani
> ---
> drivers/bus/mhi/host/main.c | 22 +++++++++++++---------
> 1 file changed, 13 insertions(+), 9 deletions(-)
>
> diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c
> index dcf627b..32021fe 100644
> --- a/drivers/bus/mhi/host/main.c
> +++ b/drivers/bus/mhi/host/main.c
> @@ -1122,17 +1122,15 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
> if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
> return -EIO;
>
> - read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
> -
> ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
> - if (unlikely(ret)) {
> - ret = -EAGAIN;
> - goto exit_unlock;
> - }
> + if (unlikely(ret))
> + return -EAGAIN;
>
> ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
> if (unlikely(ret))
> - goto exit_unlock;
> + return ret;
> +
> + read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
>
> /* Packet is queued, take a usage ref to exit M3 if necessary
> * for host->device buffer, balanced put is done on buffer completion
> @@ -1152,7 +1150,6 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
> if (dir == DMA_FROM_DEVICE)
> mhi_cntrl->runtime_put(mhi_cntrl);
>
> -exit_unlock:
> read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
>
> return ret;
> @@ -1204,6 +1201,9 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
> int eot, eob, chain, bei;
> int ret;
>
> + /* Protect accesses for reading and incrementing WP */
> + write_lock_bh(&mhi_chan->lock);
> +
> buf_ring = &mhi_chan->buf_ring;
> tre_ring = &mhi_chan->tre_ring;
>
> @@ -1221,8 +1221,10 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
>
> if (!info->pre_mapped) {
> ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
> - if (ret)
> + if (ret) {
> + write_unlock_bh(&mhi_chan->lock);
> return ret;
> + }
> }
>
> eob = !!(flags & MHI_EOB);
> @@ -1239,6 +1241,8 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
> mhi_add_ring_element(mhi_cntrl, tre_ring);
> mhi_add_ring_element(mhi_cntrl, buf_ring);
>
> + write_unlock_bh(&mhi_chan->lock);
> +
> return 0;
> }
>
> --
> 2.7.4
>
>
--
மணிவண்ணன் சதாசிவம்
On Mon, Dec 11, 2023 at 02:42:52PM +0800, Qiang Yu wrote:
> Ensure read and write locks for the channel are not taken in succession by
> dropping the read lock from parse_xfer_event() such that a callback given
> to client can potentially queue buffers and acquire the write lock in that
> process. Any queueing of buffers should be done without channel read lock
> acquired as it can result in multiple locks and a soft lockup.
>
Cc: <[email protected]> # 5.7
Fixes: 1d3173a3bae7 ("bus: mhi: core: Add support for processing events from client device")
> Signed-off-by: Qiang Yu <[email protected]>
Reviewed-by: Manivannan Sadhasivam <[email protected]>
- Mani
> ---
> drivers/bus/mhi/host/main.c | 4 ++++
> 1 file changed, 4 insertions(+)
>
> diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c
> index 32021fe..25f98d6 100644
> --- a/drivers/bus/mhi/host/main.c
> +++ b/drivers/bus/mhi/host/main.c
> @@ -642,6 +642,8 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
> mhi_del_ring_element(mhi_cntrl, tre_ring);
> local_rp = tre_ring->rp;
>
> + read_unlock_bh(&mhi_chan->lock);
> +
> /* notify client */
> mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
>
> @@ -667,6 +669,8 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
> kfree(buf_info->cb_buf);
> }
> }
> +
> + read_lock_bh(&mhi_chan->lock);
> }
> break;
> } /* CC_EOT */
> --
> 2.7.4
>
>
--
மணிவண்ணன் சதாசிவம்
On Mon, Dec 11, 2023 at 02:42:50PM +0800, Qiang Yu wrote:
>
> 1. We need a write lock in mhi_gen_tre otherwise there is race of the WP
> used for ringing channel DB between mhi_queue and M0 transition.
> 2. We can not invoke local_bh_enable() when irqs are disabled, so move
> read_lock_irqsave() under the mhi_gen_tre() since we add write_lock_bh() in
> mhi_gen_tre().
> 3. Unlock xfer_cb to prevent potential lockup
>
Applied to mhi-next!
- Mani
> v1 -> v2:
> Added write_unlock_bh(&mhi_chan->lock) in mhi_gen_tre() before return
> because of error process.
>
> v2 -> v3:
> 1. split protecting WP and unlocking xfer_cb into two patches
> 2. Add a new patch to stop processing buffer and eventof a disabled or
> stopped channel.
>
> v3 -> v4:
> 1. Modify commit message
> 2. Add unlock operation before return error
>
> v4 -> v5:
> 1. Squash "protecting WP" and "Take irqsave lock" into one patch
> 2. Drop patch 3/4 of patch v4
>
> Bhaumik Bhatt (1):
> bus: mhi: host: Add spinlock to protect WP access when queueing TREs
>
> Qiang Yu (1):
> bus: mhi: host: Drop chan lock before queuing buffers
>
> drivers/bus/mhi/host/main.c | 26 +++++++++++++++++---------
> 1 file changed, 17 insertions(+), 9 deletions(-)
>
> --
> 2.7.4
>
>
--
மணிவண்ணன் சதாசிவம்