During the review of "[PATCH] vsock/virtio: Initialize core virtio vsock
before registering the driver", Stefan pointed out some possible issues
in the .probe() and .remove() callbacks of the virtio-vsock driver.
This series tries to solve these issues:
- Patch 1 adds RCU critical sections to avoid use-after-free of
'the_virtio_vsock' pointer.
- Patch 2 stops workers before to call vdev->config->reset(vdev) to
be sure that no one is accessing the device.
- Patch 3 moves the works flush at the end of the .remove() to avoid
use-after-free of 'vsock' object.
v2:
- Patch 1: use RCU to protect 'the_virtio_vsock' pointer
- Patch 2: no changes
- Patch 3: flush works only at the end of .remove()
- Removed patch 4 because virtqueue_detach_unused_buf() returns all the buffers
allocated.
v1: https://patchwork.kernel.org/cover/10964733/
Stefano Garzarella (3):
vsock/virtio: use RCU to avoid use-after-free on the_virtio_vsock
vsock/virtio: stop workers during the .remove()
vsock/virtio: fix flush of works during the .remove()
net/vmw_vsock/virtio_transport.c | 131 ++++++++++++++++++++++++-------
1 file changed, 102 insertions(+), 29 deletions(-)
--
2.20.1
Before to call vdev->config->reset(vdev) we need to be sure that
no one is accessing the device, for this reason, we add new variables
in the struct virtio_vsock to stop the workers during the .remove().
This patch also add few comments before vdev->config->reset(vdev)
and vdev->config->del_vqs(vdev).
Suggested-by: Stefan Hajnoczi <[email protected]>
Suggested-by: Michael S. Tsirkin <[email protected]>
Signed-off-by: Stefano Garzarella <[email protected]>
---
net/vmw_vsock/virtio_transport.c | 51 +++++++++++++++++++++++++++++++-
1 file changed, 50 insertions(+), 1 deletion(-)
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index 7ad510ec12e0..1b44ec6f3f6c 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -38,6 +38,7 @@ struct virtio_vsock {
* must be accessed with tx_lock held.
*/
struct mutex tx_lock;
+ bool tx_run;
struct work_struct send_pkt_work;
spinlock_t send_pkt_list_lock;
@@ -53,6 +54,7 @@ struct virtio_vsock {
* must be accessed with rx_lock held.
*/
struct mutex rx_lock;
+ bool rx_run;
int rx_buf_nr;
int rx_buf_max_nr;
@@ -60,6 +62,7 @@ struct virtio_vsock {
* vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held.
*/
struct mutex event_lock;
+ bool event_run;
struct virtio_vsock_event event_list[8];
u32 guest_cid;
@@ -94,6 +97,10 @@ static void virtio_transport_loopback_work(struct work_struct *work)
spin_unlock_bh(&vsock->loopback_list_lock);
mutex_lock(&vsock->rx_lock);
+
+ if (!vsock->rx_run)
+ goto out;
+
while (!list_empty(&pkts)) {
struct virtio_vsock_pkt *pkt;
@@ -102,6 +109,7 @@ static void virtio_transport_loopback_work(struct work_struct *work)
virtio_transport_recv_pkt(pkt);
}
+out:
mutex_unlock(&vsock->rx_lock);
}
@@ -130,6 +138,9 @@ virtio_transport_send_pkt_work(struct work_struct *work)
mutex_lock(&vsock->tx_lock);
+ if (!vsock->tx_run)
+ goto out;
+
vq = vsock->vqs[VSOCK_VQ_TX];
for (;;) {
@@ -188,6 +199,7 @@ virtio_transport_send_pkt_work(struct work_struct *work)
if (added)
virtqueue_kick(vq);
+out:
mutex_unlock(&vsock->tx_lock);
if (restart_rx)
@@ -323,6 +335,10 @@ static void virtio_transport_tx_work(struct work_struct *work)
vq = vsock->vqs[VSOCK_VQ_TX];
mutex_lock(&vsock->tx_lock);
+
+ if (!vsock->tx_run)
+ goto out;
+
do {
struct virtio_vsock_pkt *pkt;
unsigned int len;
@@ -333,6 +349,8 @@ static void virtio_transport_tx_work(struct work_struct *work)
added = true;
}
} while (!virtqueue_enable_cb(vq));
+
+out:
mutex_unlock(&vsock->tx_lock);
if (added)
@@ -361,6 +379,9 @@ static void virtio_transport_rx_work(struct work_struct *work)
mutex_lock(&vsock->rx_lock);
+ if (!vsock->rx_run)
+ goto out;
+
do {
virtqueue_disable_cb(vq);
for (;;) {
@@ -470,6 +491,9 @@ static void virtio_transport_event_work(struct work_struct *work)
mutex_lock(&vsock->event_lock);
+ if (!vsock->event_run)
+ goto out;
+
do {
struct virtio_vsock_event *event;
unsigned int len;
@@ -484,7 +508,7 @@ static void virtio_transport_event_work(struct work_struct *work)
} while (!virtqueue_enable_cb(vq));
virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]);
-
+out:
mutex_unlock(&vsock->event_lock);
}
@@ -619,12 +643,18 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work);
INIT_WORK(&vsock->loopback_work, virtio_transport_loopback_work);
+ mutex_lock(&vsock->tx_lock);
+ vsock->tx_run = true;
+ mutex_unlock(&vsock->tx_lock);
+
mutex_lock(&vsock->rx_lock);
virtio_vsock_rx_fill(vsock);
+ vsock->rx_run = true;
mutex_unlock(&vsock->rx_lock);
mutex_lock(&vsock->event_lock);
virtio_vsock_event_fill(vsock);
+ vsock->event_run = true;
mutex_unlock(&vsock->event_lock);
vdev->priv = vsock;
@@ -659,6 +689,24 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
/* Reset all connected sockets when the device disappear */
vsock_for_each_connected_socket(virtio_vsock_reset_sock);
+ /* Stop all work handlers to make sure no one is accessing the device,
+ * so we can safely call vdev->config->reset().
+ */
+ mutex_lock(&vsock->rx_lock);
+ vsock->rx_run = false;
+ mutex_unlock(&vsock->rx_lock);
+
+ mutex_lock(&vsock->tx_lock);
+ vsock->tx_run = false;
+ mutex_unlock(&vsock->tx_lock);
+
+ mutex_lock(&vsock->event_lock);
+ vsock->event_run = false;
+ mutex_unlock(&vsock->event_lock);
+
+ /* Flush all device writes and interrupts, device will not use any
+ * more buffers.
+ */
vdev->config->reset(vdev);
mutex_lock(&vsock->rx_lock);
@@ -689,6 +737,7 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
}
spin_unlock_bh(&vsock->loopback_list_lock);
+ /* Delete virtqueues and flush outstanding callbacks if any */
vdev->config->del_vqs(vdev);
mutex_unlock(&the_virtio_vsock_mutex);
--
2.20.1
This patch moves the flush of works after vdev->config->del_vqs(vdev),
because we need to be sure that no workers run before to free the
'vsock' object.
Since we stopped the workers using the [tx|rx|event]_run flags,
we are sure no one is accessing the device while we are calling
vdev->config->reset(vdev), so we can safely move the workers' flush.
Before the vdev->config->del_vqs(vdev), workers can be scheduled
by VQ callbacks, so we must flush them after del_vqs(), to avoid
use-after-free of 'vsock' object.
Suggested-by: Michael S. Tsirkin <[email protected]>
Signed-off-by: Stefano Garzarella <[email protected]>
---
net/vmw_vsock/virtio_transport.c | 15 +++++++++------
1 file changed, 9 insertions(+), 6 deletions(-)
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index 1b44ec6f3f6c..96dafa978268 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -680,12 +680,6 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
rcu_assign_pointer(the_virtio_vsock, NULL);
synchronize_rcu();
- flush_work(&vsock->loopback_work);
- flush_work(&vsock->rx_work);
- flush_work(&vsock->tx_work);
- flush_work(&vsock->event_work);
- flush_work(&vsock->send_pkt_work);
-
/* Reset all connected sockets when the device disappear */
vsock_for_each_connected_socket(virtio_vsock_reset_sock);
@@ -740,6 +734,15 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
/* Delete virtqueues and flush outstanding callbacks if any */
vdev->config->del_vqs(vdev);
+ /* Other works can be queued before 'config->del_vqs()', so we flush
+ * all works before to free the vsock object to avoid use after free.
+ */
+ flush_work(&vsock->loopback_work);
+ flush_work(&vsock->rx_work);
+ flush_work(&vsock->tx_work);
+ flush_work(&vsock->event_work);
+ flush_work(&vsock->send_pkt_work);
+
mutex_unlock(&the_virtio_vsock_mutex);
kfree(vsock);
--
2.20.1
On Fri, Jun 28, 2019 at 02:36:59PM +0200, Stefano Garzarella wrote:
> This patch moves the flush of works after vdev->config->del_vqs(vdev),
> because we need to be sure that no workers run before to free the
> 'vsock' object.
>
> Since we stopped the workers using the [tx|rx|event]_run flags,
> we are sure no one is accessing the device while we are calling
> vdev->config->reset(vdev), so we can safely move the workers' flush.
What about send_pkt and loopback work? How were they stopped safely?
For example, if send_pkt work executes then we're in trouble since it
accesses the tx virtqueue which is deleted by ->del_vqs().
On Fri, Jun 28, 2019 at 02:36:59PM +0200, Stefano Garzarella wrote:
> This patch moves the flush of works after vdev->config->del_vqs(vdev),
> because we need to be sure that no workers run before to free the
> 'vsock' object.
>
> Since we stopped the workers using the [tx|rx|event]_run flags,
> we are sure no one is accessing the device while we are calling
> vdev->config->reset(vdev), so we can safely move the workers' flush.
>
> Before the vdev->config->del_vqs(vdev), workers can be scheduled
> by VQ callbacks, so we must flush them after del_vqs(), to avoid
> use-after-free of 'vsock' object.
Nevermind, I looked back at Patch 2 and saw the send_pkt and loopback
work functions were also updated. Thanks!
Stefan
On Fri, Jun 28, 2019 at 02:36:56PM +0200, Stefano Garzarella wrote:
> During the review of "[PATCH] vsock/virtio: Initialize core virtio vsock
> before registering the driver", Stefan pointed out some possible issues
> in the .probe() and .remove() callbacks of the virtio-vsock driver.
>
> This series tries to solve these issues:
> - Patch 1 adds RCU critical sections to avoid use-after-free of
> 'the_virtio_vsock' pointer.
> - Patch 2 stops workers before to call vdev->config->reset(vdev) to
> be sure that no one is accessing the device.
> - Patch 3 moves the works flush at the end of the .remove() to avoid
> use-after-free of 'vsock' object.
>
> v2:
> - Patch 1: use RCU to protect 'the_virtio_vsock' pointer
> - Patch 2: no changes
> - Patch 3: flush works only at the end of .remove()
> - Removed patch 4 because virtqueue_detach_unused_buf() returns all the buffers
> allocated.
>
> v1: https://patchwork.kernel.org/cover/10964733/
This looks good to me.
Did you run any stress tests? For example an SMP guest constantly
connecting and sending packets together with a script that
hotplug/unplugs vhost-vsock-pci from the host side.
Stefan
On Mon, Jul 01, 2019 at 04:11:13PM +0100, Stefan Hajnoczi wrote:
> On Fri, Jun 28, 2019 at 02:36:56PM +0200, Stefano Garzarella wrote:
> > During the review of "[PATCH] vsock/virtio: Initialize core virtio vsock
> > before registering the driver", Stefan pointed out some possible issues
> > in the .probe() and .remove() callbacks of the virtio-vsock driver.
> >
> > This series tries to solve these issues:
> > - Patch 1 adds RCU critical sections to avoid use-after-free of
> > 'the_virtio_vsock' pointer.
> > - Patch 2 stops workers before to call vdev->config->reset(vdev) to
> > be sure that no one is accessing the device.
> > - Patch 3 moves the works flush at the end of the .remove() to avoid
> > use-after-free of 'vsock' object.
> >
> > v2:
> > - Patch 1: use RCU to protect 'the_virtio_vsock' pointer
> > - Patch 2: no changes
> > - Patch 3: flush works only at the end of .remove()
> > - Removed patch 4 because virtqueue_detach_unused_buf() returns all the buffers
> > allocated.
> >
> > v1: https://patchwork.kernel.org/cover/10964733/
>
> This looks good to me.
Thanks for the review!
>
> Did you run any stress tests? For example an SMP guest constantly
> connecting and sending packets together with a script that
> hotplug/unplugs vhost-vsock-pci from the host side.
Yes, I started an SMP guest (-smp 4 -monitor tcp:127.0.0.1:1234,server,nowait)
and I run these scripts to stress the .probe()/.remove() path:
- guest
while true; do
cat /dev/urandom | nc-vsock -l 4321 > /dev/null &
cat /dev/urandom | nc-vsock -l 5321 > /dev/null &
cat /dev/urandom | nc-vsock -l 6321 > /dev/null &
cat /dev/urandom | nc-vsock -l 7321 > /dev/null &
wait
done
- host
while true; do
cat /dev/urandom | nc-vsock 3 4321 > /dev/null &
cat /dev/urandom | nc-vsock 3 5321 > /dev/null &
cat /dev/urandom | nc-vsock 3 6321 > /dev/null &
cat /dev/urandom | nc-vsock 3 7321 > /dev/null &
sleep 2
echo "device_del v1" | nc 127.0.0.1 1234
sleep 1
echo "device_add vhost-vsock-pci,id=v1,guest-cid=3" | nc 127.0.0.1 1234
sleep 1
done
Do you think is enough or is better to have a test more accurate?
Thanks,
Stefano
On Mon, Jul 01, 2019 at 07:03:57PM +0200, Stefano Garzarella wrote:
> On Mon, Jul 01, 2019 at 04:11:13PM +0100, Stefan Hajnoczi wrote:
> > On Fri, Jun 28, 2019 at 02:36:56PM +0200, Stefano Garzarella wrote:
> > > During the review of "[PATCH] vsock/virtio: Initialize core virtio vsock
> > > before registering the driver", Stefan pointed out some possible issues
> > > in the .probe() and .remove() callbacks of the virtio-vsock driver.
> > >
> > > This series tries to solve these issues:
> > > - Patch 1 adds RCU critical sections to avoid use-after-free of
> > > 'the_virtio_vsock' pointer.
> > > - Patch 2 stops workers before to call vdev->config->reset(vdev) to
> > > be sure that no one is accessing the device.
> > > - Patch 3 moves the works flush at the end of the .remove() to avoid
> > > use-after-free of 'vsock' object.
> > >
> > > v2:
> > > - Patch 1: use RCU to protect 'the_virtio_vsock' pointer
> > > - Patch 2: no changes
> > > - Patch 3: flush works only at the end of .remove()
> > > - Removed patch 4 because virtqueue_detach_unused_buf() returns all the buffers
> > > allocated.
> > >
> > > v1: https://patchwork.kernel.org/cover/10964733/
> >
> > This looks good to me.
>
> Thanks for the review!
>
> >
> > Did you run any stress tests? For example an SMP guest constantly
> > connecting and sending packets together with a script that
> > hotplug/unplugs vhost-vsock-pci from the host side.
>
> Yes, I started an SMP guest (-smp 4 -monitor tcp:127.0.0.1:1234,server,nowait)
> and I run these scripts to stress the .probe()/.remove() path:
>
> - guest
> while true; do
> cat /dev/urandom | nc-vsock -l 4321 > /dev/null &
> cat /dev/urandom | nc-vsock -l 5321 > /dev/null &
> cat /dev/urandom | nc-vsock -l 6321 > /dev/null &
> cat /dev/urandom | nc-vsock -l 7321 > /dev/null &
> wait
> done
>
> - host
> while true; do
> cat /dev/urandom | nc-vsock 3 4321 > /dev/null &
> cat /dev/urandom | nc-vsock 3 5321 > /dev/null &
> cat /dev/urandom | nc-vsock 3 6321 > /dev/null &
> cat /dev/urandom | nc-vsock 3 7321 > /dev/null &
> sleep 2
> echo "device_del v1" | nc 127.0.0.1 1234
> sleep 1
> echo "device_add vhost-vsock-pci,id=v1,guest-cid=3" | nc 127.0.0.1 1234
> sleep 1
> done
>
> Do you think is enough or is better to have a test more accurate?
That's good when left running overnight so that thousands of hotplug
events are tested.
Stefan
On Wed, Jul 03, 2019 at 10:14:53AM +0100, Stefan Hajnoczi wrote:
> On Mon, Jul 01, 2019 at 07:03:57PM +0200, Stefano Garzarella wrote:
> > On Mon, Jul 01, 2019 at 04:11:13PM +0100, Stefan Hajnoczi wrote:
> > > On Fri, Jun 28, 2019 at 02:36:56PM +0200, Stefano Garzarella wrote:
> > > > During the review of "[PATCH] vsock/virtio: Initialize core virtio vsock
> > > > before registering the driver", Stefan pointed out some possible issues
> > > > in the .probe() and .remove() callbacks of the virtio-vsock driver.
> > > >
> > > > This series tries to solve these issues:
> > > > - Patch 1 adds RCU critical sections to avoid use-after-free of
> > > > 'the_virtio_vsock' pointer.
> > > > - Patch 2 stops workers before to call vdev->config->reset(vdev) to
> > > > be sure that no one is accessing the device.
> > > > - Patch 3 moves the works flush at the end of the .remove() to avoid
> > > > use-after-free of 'vsock' object.
> > > >
> > > > v2:
> > > > - Patch 1: use RCU to protect 'the_virtio_vsock' pointer
> > > > - Patch 2: no changes
> > > > - Patch 3: flush works only at the end of .remove()
> > > > - Removed patch 4 because virtqueue_detach_unused_buf() returns all the buffers
> > > > allocated.
> > > >
> > > > v1: https://patchwork.kernel.org/cover/10964733/
> > >
> > > This looks good to me.
> >
> > Thanks for the review!
> >
> > >
> > > Did you run any stress tests? For example an SMP guest constantly
> > > connecting and sending packets together with a script that
> > > hotplug/unplugs vhost-vsock-pci from the host side.
> >
> > Yes, I started an SMP guest (-smp 4 -monitor tcp:127.0.0.1:1234,server,nowait)
> > and I run these scripts to stress the .probe()/.remove() path:
> >
> > - guest
> > while true; do
> > cat /dev/urandom | nc-vsock -l 4321 > /dev/null &
> > cat /dev/urandom | nc-vsock -l 5321 > /dev/null &
> > cat /dev/urandom | nc-vsock -l 6321 > /dev/null &
> > cat /dev/urandom | nc-vsock -l 7321 > /dev/null &
> > wait
> > done
> >
> > - host
> > while true; do
> > cat /dev/urandom | nc-vsock 3 4321 > /dev/null &
> > cat /dev/urandom | nc-vsock 3 5321 > /dev/null &
> > cat /dev/urandom | nc-vsock 3 6321 > /dev/null &
> > cat /dev/urandom | nc-vsock 3 7321 > /dev/null &
> > sleep 2
> > echo "device_del v1" | nc 127.0.0.1 1234
> > sleep 1
> > echo "device_add vhost-vsock-pci,id=v1,guest-cid=3" | nc 127.0.0.1 1234
> > sleep 1
> > done
> >
> > Do you think is enough or is better to have a test more accurate?
>
> That's good when left running overnight so that thousands of hotplug
> events are tested.
Honestly I run the test for ~30 mins (because without the patch the
crash happens in a few seconds), but of course, I'll run it this night :)
Thanks,
Stefano
On 2019/6/28 下午8:36, Stefano Garzarella wrote:
> Before to call vdev->config->reset(vdev) we need to be sure that
> no one is accessing the device, for this reason, we add new variables
> in the struct virtio_vsock to stop the workers during the .remove().
>
> This patch also add few comments before vdev->config->reset(vdev)
> and vdev->config->del_vqs(vdev).
>
> Suggested-by: Stefan Hajnoczi <[email protected]>
> Suggested-by: Michael S. Tsirkin <[email protected]>
> Signed-off-by: Stefano Garzarella <[email protected]>
> ---
> net/vmw_vsock/virtio_transport.c | 51 +++++++++++++++++++++++++++++++-
> 1 file changed, 50 insertions(+), 1 deletion(-)
This should work. But we may consider to convert the_virtio_vosck to
socket object and use socket refcnt and destructor in the future instead
of inventing something new by ourselves.
Thanks
>
> diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
> index 7ad510ec12e0..1b44ec6f3f6c 100644
> --- a/net/vmw_vsock/virtio_transport.c
> +++ b/net/vmw_vsock/virtio_transport.c
> @@ -38,6 +38,7 @@ struct virtio_vsock {
> * must be accessed with tx_lock held.
> */
> struct mutex tx_lock;
> + bool tx_run;
>
> struct work_struct send_pkt_work;
> spinlock_t send_pkt_list_lock;
> @@ -53,6 +54,7 @@ struct virtio_vsock {
> * must be accessed with rx_lock held.
> */
> struct mutex rx_lock;
> + bool rx_run;
> int rx_buf_nr;
> int rx_buf_max_nr;
>
> @@ -60,6 +62,7 @@ struct virtio_vsock {
> * vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held.
> */
> struct mutex event_lock;
> + bool event_run;
> struct virtio_vsock_event event_list[8];
>
> u32 guest_cid;
> @@ -94,6 +97,10 @@ static void virtio_transport_loopback_work(struct work_struct *work)
> spin_unlock_bh(&vsock->loopback_list_lock);
>
> mutex_lock(&vsock->rx_lock);
> +
> + if (!vsock->rx_run)
> + goto out;
> +
> while (!list_empty(&pkts)) {
> struct virtio_vsock_pkt *pkt;
>
> @@ -102,6 +109,7 @@ static void virtio_transport_loopback_work(struct work_struct *work)
>
> virtio_transport_recv_pkt(pkt);
> }
> +out:
> mutex_unlock(&vsock->rx_lock);
> }
>
> @@ -130,6 +138,9 @@ virtio_transport_send_pkt_work(struct work_struct *work)
>
> mutex_lock(&vsock->tx_lock);
>
> + if (!vsock->tx_run)
> + goto out;
> +
> vq = vsock->vqs[VSOCK_VQ_TX];
>
> for (;;) {
> @@ -188,6 +199,7 @@ virtio_transport_send_pkt_work(struct work_struct *work)
> if (added)
> virtqueue_kick(vq);
>
> +out:
> mutex_unlock(&vsock->tx_lock);
>
> if (restart_rx)
> @@ -323,6 +335,10 @@ static void virtio_transport_tx_work(struct work_struct *work)
>
> vq = vsock->vqs[VSOCK_VQ_TX];
> mutex_lock(&vsock->tx_lock);
> +
> + if (!vsock->tx_run)
> + goto out;
> +
> do {
> struct virtio_vsock_pkt *pkt;
> unsigned int len;
> @@ -333,6 +349,8 @@ static void virtio_transport_tx_work(struct work_struct *work)
> added = true;
> }
> } while (!virtqueue_enable_cb(vq));
> +
> +out:
> mutex_unlock(&vsock->tx_lock);
>
> if (added)
> @@ -361,6 +379,9 @@ static void virtio_transport_rx_work(struct work_struct *work)
>
> mutex_lock(&vsock->rx_lock);
>
> + if (!vsock->rx_run)
> + goto out;
> +
> do {
> virtqueue_disable_cb(vq);
> for (;;) {
> @@ -470,6 +491,9 @@ static void virtio_transport_event_work(struct work_struct *work)
>
> mutex_lock(&vsock->event_lock);
>
> + if (!vsock->event_run)
> + goto out;
> +
> do {
> struct virtio_vsock_event *event;
> unsigned int len;
> @@ -484,7 +508,7 @@ static void virtio_transport_event_work(struct work_struct *work)
> } while (!virtqueue_enable_cb(vq));
>
> virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]);
> -
> +out:
> mutex_unlock(&vsock->event_lock);
> }
>
> @@ -619,12 +643,18 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
> INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work);
> INIT_WORK(&vsock->loopback_work, virtio_transport_loopback_work);
>
> + mutex_lock(&vsock->tx_lock);
> + vsock->tx_run = true;
> + mutex_unlock(&vsock->tx_lock);
> +
> mutex_lock(&vsock->rx_lock);
> virtio_vsock_rx_fill(vsock);
> + vsock->rx_run = true;
> mutex_unlock(&vsock->rx_lock);
>
> mutex_lock(&vsock->event_lock);
> virtio_vsock_event_fill(vsock);
> + vsock->event_run = true;
> mutex_unlock(&vsock->event_lock);
>
> vdev->priv = vsock;
> @@ -659,6 +689,24 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
> /* Reset all connected sockets when the device disappear */
> vsock_for_each_connected_socket(virtio_vsock_reset_sock);
>
> + /* Stop all work handlers to make sure no one is accessing the device,
> + * so we can safely call vdev->config->reset().
> + */
> + mutex_lock(&vsock->rx_lock);
> + vsock->rx_run = false;
> + mutex_unlock(&vsock->rx_lock);
> +
> + mutex_lock(&vsock->tx_lock);
> + vsock->tx_run = false;
> + mutex_unlock(&vsock->tx_lock);
> +
> + mutex_lock(&vsock->event_lock);
> + vsock->event_run = false;
> + mutex_unlock(&vsock->event_lock);
> +
> + /* Flush all device writes and interrupts, device will not use any
> + * more buffers.
> + */
> vdev->config->reset(vdev);
>
> mutex_lock(&vsock->rx_lock);
> @@ -689,6 +737,7 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
> }
> spin_unlock_bh(&vsock->loopback_list_lock);
>
> + /* Delete virtqueues and flush outstanding callbacks if any */
> vdev->config->del_vqs(vdev);
>
> mutex_unlock(&the_virtio_vsock_mutex);