Hi all:
The code used to busy poll for cvq command which turns out to have
several side effects:
1) infinite poll for buggy devices
2) bad interaction with scheduler
So this series tries to use sleep + timeout instead of busy polling.
Please review.
Thanks
Jason Wang (4):
virtio-net: convert rx mode setting to use workqueue
virtio_ring: switch to use BAD_RING()
virtio_ring: introduce a per virtqueue waitqueue
virtio-net: sleep instead of busy waiting for cvq command
drivers/net/virtio_net.c | 79 +++++++++++++++++++++++++++++++-----
drivers/virtio/virtio_ring.c | 33 ++++++++++++++-
include/linux/virtio.h | 4 ++
3 files changed, 105 insertions(+), 11 deletions(-)
--
2.25.1
This patch introduces a per virtqueue waitqueue to allow driver to
sleep and wait for more used. Two new helpers are introduced to allow
driver to sleep and wake up.
Signed-off-by: Jason Wang <[email protected]>
---
drivers/virtio/virtio_ring.c | 31 +++++++++++++++++++++++++++++++
include/linux/virtio.h | 4 ++++
2 files changed, 35 insertions(+)
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 90c2034a77f3..4a2d5ac30b0f 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -13,6 +13,7 @@
#include <linux/dma-mapping.h>
#include <linux/kmsan.h>
#include <linux/spinlock.h>
+#include <linux/wait.h>
#include <xen/xen.h>
#ifdef DEBUG
@@ -59,6 +60,7 @@
dev_err(&_vq->vq.vdev->dev, \
"%s:"fmt, (_vq)->vq.name, ##args); \
(_vq)->broken = true; \
+ wake_up_interruptible(&(_vq)->wq); \
} while (0)
#define START_USE(vq)
#define END_USE(vq)
@@ -202,6 +204,9 @@ struct vring_virtqueue {
/* DMA, allocation, and size information */
bool we_own_ring;
+ /* Wait for buffer to be used */
+ wait_queue_head_t wq;
+
#ifdef DEBUG
/* They're supposed to lock for us. */
unsigned int in_use;
@@ -2023,6 +2028,8 @@ static struct virtqueue *vring_create_virtqueue_packed(
if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
vq->weak_barriers = false;
+ init_waitqueue_head(&vq->wq);
+
err = vring_alloc_state_extra_packed(&vring_packed);
if (err)
goto err_state_extra;
@@ -2516,6 +2523,8 @@ static struct virtqueue *__vring_new_virtqueue(unsigned int index,
if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
vq->weak_barriers = false;
+ init_waitqueue_head(&vq->wq);
+
err = vring_alloc_state_extra_split(vring_split);
if (err) {
kfree(vq);
@@ -2653,6 +2662,8 @@ static void vring_free(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
+ wake_up_interruptible(&vq->wq);
+
if (vq->we_own_ring) {
if (vq->packed_ring) {
vring_free_queue(vq->vq.vdev,
@@ -2863,4 +2874,24 @@ const struct vring *virtqueue_get_vring(struct virtqueue *vq)
}
EXPORT_SYMBOL_GPL(virtqueue_get_vring);
+int virtqueue_wait_for_used(struct virtqueue *_vq,
+ unsigned int *len)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ /* Use a better timeout or simply start from no timeout */
+ return wait_event_interruptible_timeout(vq->wq,
+ virtqueue_get_buf(_vq, len),
+ HZ);
+}
+EXPORT_SYMBOL_GPL(virtqueue_wait_for_used);
+
+void virtqueue_wake_up(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ wake_up_interruptible(&vq->wq);
+}
+EXPORT_SYMBOL_GPL(virtqueue_wake_up);
+
MODULE_LICENSE("GPL");
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index dcab9c7e8784..4df098b8f1ca 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -72,6 +72,10 @@ void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len);
void *virtqueue_get_buf_ctx(struct virtqueue *vq, unsigned int *len,
void **ctx);
+int virtqueue_wait_for_used(struct virtqueue *vq,
+ unsigned int *len);
+void virtqueue_wake_up(struct virtqueue *vq);
+
void virtqueue_disable_cb(struct virtqueue *vq);
bool virtqueue_enable_cb(struct virtqueue *vq);
--
2.25.1
We used to busy waiting on the cvq command this tends to be
problematic since:
1) CPU could wait for ever on a buggy/malicous device
2) There's no wait to terminate the process that triggers the cvq
command
So this patch switch to use sleep with a timeout (1s) instead of busy
polling for the cvq command forever. This gives the scheduler a breath
and can let the process can respond to a signal.
Signed-off-by: Jason Wang <[email protected]>
---
drivers/net/virtio_net.c | 15 ++++++++-------
1 file changed, 8 insertions(+), 7 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 8225496ccb1e..69173049371f 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -405,6 +405,7 @@ static void disable_rx_mode_work(struct virtnet_info *vi)
vi->rx_mode_work_enabled = false;
spin_unlock_bh(&vi->rx_mode_lock);
+ virtqueue_wake_up(vi->cvq);
flush_work(&vi->rx_mode_work);
}
@@ -1497,6 +1498,11 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
return !oom;
}
+static void virtnet_cvq_done(struct virtqueue *cvq)
+{
+ virtqueue_wake_up(cvq);
+}
+
static void skb_recv_done(struct virtqueue *rvq)
{
struct virtnet_info *vi = rvq->vdev->priv;
@@ -2024,12 +2030,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
if (unlikely(!virtqueue_kick(vi->cvq)))
return vi->ctrl->status == VIRTIO_NET_OK;
- /* Spin for a response, the kick causes an ioport write, trapping
- * into the hypervisor, so the request should be handled immediately.
- */
- while (!virtqueue_get_buf(vi->cvq, &tmp) &&
- !virtqueue_is_broken(vi->cvq))
- cpu_relax();
+ virtqueue_wait_for_used(vi->cvq, &tmp);
return vi->ctrl->status == VIRTIO_NET_OK;
}
@@ -3524,7 +3525,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
/* Parameters for control virtqueue, if any */
if (vi->has_cvq) {
- callbacks[total_vqs - 1] = NULL;
+ callbacks[total_vqs - 1] = virtnet_cvq_done;
names[total_vqs - 1] = "control";
}
--
2.25.1
This patch convert rx mode setting to be done in a workqueue, this is
a must for allow to sleep when waiting for the cvq command to
response since current code is executed under addr spin lock.
Signed-off-by: Jason Wang <[email protected]>
---
drivers/net/virtio_net.c | 64 ++++++++++++++++++++++++++++++++++++++--
1 file changed, 61 insertions(+), 3 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 86e52454b5b5..8225496ccb1e 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -260,6 +260,15 @@ struct virtnet_info {
/* Work struct for config space updates */
struct work_struct config_work;
+ /* Work struct for config rx mode */
+ struct work_struct rx_mode_work;
+
+ /* Is rx_mode_work enabled? */
+ bool rx_mode_work_enabled;
+
+ /* The lock to synchronize the access to refill_enabled */
+ spinlock_t rx_mode_lock;
+
/* Does the affinity hint is set for virtqueues? */
bool affinity_hint_set;
@@ -383,6 +392,22 @@ static void disable_delayed_refill(struct virtnet_info *vi)
spin_unlock_bh(&vi->refill_lock);
}
+static void enable_rx_mode_work(struct virtnet_info *vi)
+{
+ spin_lock_bh(&vi->rx_mode_lock);
+ vi->rx_mode_work_enabled = true;
+ spin_unlock_bh(&vi->rx_mode_lock);
+}
+
+static void disable_rx_mode_work(struct virtnet_info *vi)
+{
+ spin_lock_bh(&vi->rx_mode_lock);
+ vi->rx_mode_work_enabled = false;
+ spin_unlock_bh(&vi->rx_mode_lock);
+
+ flush_work(&vi->rx_mode_work);
+}
+
static void virtqueue_napi_schedule(struct napi_struct *napi,
struct virtqueue *vq)
{
@@ -2160,9 +2185,11 @@ static int virtnet_close(struct net_device *dev)
return 0;
}
-static void virtnet_set_rx_mode(struct net_device *dev)
+static void virtnet_rx_mode_work(struct work_struct *work)
{
- struct virtnet_info *vi = netdev_priv(dev);
+ struct virtnet_info *vi =
+ container_of(work, struct virtnet_info, rx_mode_work);
+ struct net_device *dev = vi->dev;
struct scatterlist sg[2];
struct virtio_net_ctrl_mac *mac_data;
struct netdev_hw_addr *ha;
@@ -2175,8 +2202,12 @@ static void virtnet_set_rx_mode(struct net_device *dev)
if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
return;
+ rtnl_lock();
+
+ netif_addr_lock_bh(dev);
vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
+ netif_addr_unlock_bh(dev);
sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
@@ -2192,14 +2223,19 @@ static void virtnet_set_rx_mode(struct net_device *dev)
dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
vi->ctrl->allmulti ? "en" : "dis");
+ netif_addr_lock_bh(dev);
+
uc_count = netdev_uc_count(dev);
mc_count = netdev_mc_count(dev);
/* MAC filter - use one buffer for both lists */
buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
(2 * sizeof(mac_data->entries)), GFP_ATOMIC);
mac_data = buf;
- if (!buf)
+ if (!buf) {
+ netif_addr_unlock_bh(dev);
+ rtnl_unlock();
return;
+ }
sg_init_table(sg, 2);
@@ -2220,6 +2256,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
netdev_for_each_mc_addr(ha, dev)
memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
+ netif_addr_unlock_bh(dev);
+
sg_set_buf(&sg[1], mac_data,
sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
@@ -2227,9 +2265,21 @@ static void virtnet_set_rx_mode(struct net_device *dev)
VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
+ rtnl_unlock();
+
kfree(buf);
}
+static void virtnet_set_rx_mode(struct net_device *dev)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+
+ spin_lock(&vi->rx_mode_lock);
+ if (vi->rx_mode_work_enabled)
+ schedule_work(&vi->rx_mode_work);
+ spin_unlock(&vi->rx_mode_lock);
+}
+
static int virtnet_vlan_rx_add_vid(struct net_device *dev,
__be16 proto, u16 vid)
{
@@ -3000,6 +3050,7 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
/* Make sure no work handler is accessing the device */
flush_work(&vi->config_work);
+ disable_rx_mode_work(vi);
netif_tx_lock_bh(vi->dev);
netif_device_detach(vi->dev);
@@ -3022,6 +3073,8 @@ static int virtnet_restore_up(struct virtio_device *vdev)
virtio_device_ready(vdev);
enable_delayed_refill(vi);
+ enable_rx_mode_work(vi);
+ virtnet_set_rx_mode(vi->dev);
if (netif_running(vi->dev)) {
err = virtnet_open(vi->dev);
@@ -3799,7 +3852,9 @@ static int virtnet_probe(struct virtio_device *vdev)
vdev->priv = vi;
INIT_WORK(&vi->config_work, virtnet_config_changed_work);
+ INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work);
spin_lock_init(&vi->refill_lock);
+ spin_lock_init(&vi->rx_mode_lock);
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
vi->mergeable_rx_bufs = true;
@@ -3905,6 +3960,8 @@ static int virtnet_probe(struct virtio_device *vdev)
if (vi->has_rss || vi->has_rss_hash_report)
virtnet_init_default_rss(vi);
+ enable_rx_mode_work(vi);
+
/* serialize netdev register + virtio_device_ready() with ndo_open() */
rtnl_lock();
@@ -3984,6 +4041,7 @@ static void virtnet_remove(struct virtio_device *vdev)
/* Make sure no work handler is accessing the device. */
flush_work(&vi->config_work);
+ disable_rx_mode_work(vi);
unregister_netdev(vi->dev);
--
2.25.1
On Thu, Dec 22, 2022 at 7:05 AM Jason Wang <[email protected]> wrote:
>
> We used to busy waiting on the cvq command this tends to be
> problematic since:
>
> 1) CPU could wait for ever on a buggy/malicous device
> 2) There's no wait to terminate the process that triggers the cvq
> command
>
> So this patch switch to use sleep with a timeout (1s) instead of busy
> polling for the cvq command forever. This gives the scheduler a breath
> and can let the process can respond to a signal.
>
> Signed-off-by: Jason Wang <[email protected]>
> ---
> drivers/net/virtio_net.c | 15 ++++++++-------
> 1 file changed, 8 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 8225496ccb1e..69173049371f 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -405,6 +405,7 @@ static void disable_rx_mode_work(struct virtnet_info *vi)
> vi->rx_mode_work_enabled = false;
> spin_unlock_bh(&vi->rx_mode_lock);
>
> + virtqueue_wake_up(vi->cvq);
> flush_work(&vi->rx_mode_work);
> }
>
> @@ -1497,6 +1498,11 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
> return !oom;
> }
>
> +static void virtnet_cvq_done(struct virtqueue *cvq)
> +{
> + virtqueue_wake_up(cvq);
> +}
> +
> static void skb_recv_done(struct virtqueue *rvq)
> {
> struct virtnet_info *vi = rvq->vdev->priv;
> @@ -2024,12 +2030,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
> if (unlikely(!virtqueue_kick(vi->cvq)))
> return vi->ctrl->status == VIRTIO_NET_OK;
>
> - /* Spin for a response, the kick causes an ioport write, trapping
> - * into the hypervisor, so the request should be handled immediately.
> - */
> - while (!virtqueue_get_buf(vi->cvq, &tmp) &&
> - !virtqueue_is_broken(vi->cvq))
> - cpu_relax();
> + virtqueue_wait_for_used(vi->cvq, &tmp);
>
> return vi->ctrl->status == VIRTIO_NET_OK;
> }
> @@ -3524,7 +3525,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
>
> /* Parameters for control virtqueue, if any */
> if (vi->has_cvq) {
> - callbacks[total_vqs - 1] = NULL;
> + callbacks[total_vqs - 1] = virtnet_cvq_done;
If we're using CVQ callback, what is the actual use of the timeout?
I'd say there is no right choice neither in the right timeout value
nor in the action to take. Why not simply trigger the cmd and do all
the changes at command return?
I suspect the reason is that it complicates the code. For example,
having the possibility of many in flight commands, races between their
completion, etc. The virtio standard does not even cover unordered
used commands if I'm not wrong.
Is there any other fundamental reason?
Thanks!
> names[total_vqs - 1] = "control";
> }
>
> --
> 2.25.1
>
On Thu, Dec 22, 2022 at 5:19 PM Eugenio Perez Martin
<[email protected]> wrote:
>
> On Thu, Dec 22, 2022 at 7:05 AM Jason Wang <[email protected]> wrote:
> >
> > We used to busy waiting on the cvq command this tends to be
> > problematic since:
> >
> > 1) CPU could wait for ever on a buggy/malicous device
> > 2) There's no wait to terminate the process that triggers the cvq
> > command
> >
> > So this patch switch to use sleep with a timeout (1s) instead of busy
> > polling for the cvq command forever. This gives the scheduler a breath
> > and can let the process can respond to a signal.
> >
> > Signed-off-by: Jason Wang <[email protected]>
> > ---
> > drivers/net/virtio_net.c | 15 ++++++++-------
> > 1 file changed, 8 insertions(+), 7 deletions(-)
> >
> > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > index 8225496ccb1e..69173049371f 100644
> > --- a/drivers/net/virtio_net.c
> > +++ b/drivers/net/virtio_net.c
> > @@ -405,6 +405,7 @@ static void disable_rx_mode_work(struct virtnet_info *vi)
> > vi->rx_mode_work_enabled = false;
> > spin_unlock_bh(&vi->rx_mode_lock);
> >
> > + virtqueue_wake_up(vi->cvq);
> > flush_work(&vi->rx_mode_work);
> > }
> >
> > @@ -1497,6 +1498,11 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
> > return !oom;
> > }
> >
> > +static void virtnet_cvq_done(struct virtqueue *cvq)
> > +{
> > + virtqueue_wake_up(cvq);
> > +}
> > +
> > static void skb_recv_done(struct virtqueue *rvq)
> > {
> > struct virtnet_info *vi = rvq->vdev->priv;
> > @@ -2024,12 +2030,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
> > if (unlikely(!virtqueue_kick(vi->cvq)))
> > return vi->ctrl->status == VIRTIO_NET_OK;
> >
> > - /* Spin for a response, the kick causes an ioport write, trapping
> > - * into the hypervisor, so the request should be handled immediately.
> > - */
> > - while (!virtqueue_get_buf(vi->cvq, &tmp) &&
> > - !virtqueue_is_broken(vi->cvq))
> > - cpu_relax();
> > + virtqueue_wait_for_used(vi->cvq, &tmp);
> >
> > return vi->ctrl->status == VIRTIO_NET_OK;
> > }
> > @@ -3524,7 +3525,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
> >
> > /* Parameters for control virtqueue, if any */
> > if (vi->has_cvq) {
> > - callbacks[total_vqs - 1] = NULL;
> > + callbacks[total_vqs - 1] = virtnet_cvq_done;
>
> If we're using CVQ callback, what is the actual use of the timeout?
Because we can't sleep forever since locks could be held like RTNL_LOCK.
>
> I'd say there is no right choice neither in the right timeout value
> nor in the action to take.
In the next version, I tend to put BAD_RING() to prevent future requests.
> Why not simply trigger the cmd and do all
> the changes at command return?
I don't get this, sorry.
>
> I suspect the reason is that it complicates the code. For example,
> having the possibility of many in flight commands, races between their
> completion, etc.
Actually the cvq command was serialized through RTNL_LOCK, so we don't
need to worry about this.
In the next version I can add ASSERT_RTNL().
Thanks
> The virtio standard does not even cover unordered
> used commands if I'm not wrong.
>
> Is there any other fundamental reason?
>
> Thanks!
>
> > names[total_vqs - 1] = "control";
> > }
> >
> > --
> > 2.25.1
> >
>
On Fri, Dec 23, 2022 at 4:04 AM Jason Wang <[email protected]> wrote:
>
> On Thu, Dec 22, 2022 at 5:19 PM Eugenio Perez Martin
> <[email protected]> wrote:
> >
> > On Thu, Dec 22, 2022 at 7:05 AM Jason Wang <[email protected]> wrote:
> > >
> > > We used to busy waiting on the cvq command this tends to be
> > > problematic since:
> > >
> > > 1) CPU could wait for ever on a buggy/malicous device
> > > 2) There's no wait to terminate the process that triggers the cvq
> > > command
> > >
> > > So this patch switch to use sleep with a timeout (1s) instead of busy
> > > polling for the cvq command forever. This gives the scheduler a breath
> > > and can let the process can respond to a signal.
> > >
> > > Signed-off-by: Jason Wang <[email protected]>
> > > ---
> > > drivers/net/virtio_net.c | 15 ++++++++-------
> > > 1 file changed, 8 insertions(+), 7 deletions(-)
> > >
> > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > index 8225496ccb1e..69173049371f 100644
> > > --- a/drivers/net/virtio_net.c
> > > +++ b/drivers/net/virtio_net.c
> > > @@ -405,6 +405,7 @@ static void disable_rx_mode_work(struct virtnet_info *vi)
> > > vi->rx_mode_work_enabled = false;
> > > spin_unlock_bh(&vi->rx_mode_lock);
> > >
> > > + virtqueue_wake_up(vi->cvq);
> > > flush_work(&vi->rx_mode_work);
> > > }
> > >
> > > @@ -1497,6 +1498,11 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
> > > return !oom;
> > > }
> > >
> > > +static void virtnet_cvq_done(struct virtqueue *cvq)
> > > +{
> > > + virtqueue_wake_up(cvq);
> > > +}
> > > +
> > > static void skb_recv_done(struct virtqueue *rvq)
> > > {
> > > struct virtnet_info *vi = rvq->vdev->priv;
> > > @@ -2024,12 +2030,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
> > > if (unlikely(!virtqueue_kick(vi->cvq)))
> > > return vi->ctrl->status == VIRTIO_NET_OK;
> > >
> > > - /* Spin for a response, the kick causes an ioport write, trapping
> > > - * into the hypervisor, so the request should be handled immediately.
> > > - */
> > > - while (!virtqueue_get_buf(vi->cvq, &tmp) &&
> > > - !virtqueue_is_broken(vi->cvq))
> > > - cpu_relax();
> > > + virtqueue_wait_for_used(vi->cvq, &tmp);
> > >
> > > return vi->ctrl->status == VIRTIO_NET_OK;
> > > }
> > > @@ -3524,7 +3525,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
> > >
> > > /* Parameters for control virtqueue, if any */
> > > if (vi->has_cvq) {
> > > - callbacks[total_vqs - 1] = NULL;
> > > + callbacks[total_vqs - 1] = virtnet_cvq_done;
> >
> > If we're using CVQ callback, what is the actual use of the timeout?
>
> Because we can't sleep forever since locks could be held like RTNL_LOCK.
>
Right, rtnl_lock kind of invalidates it for a general case.
But do all of the commands need to take rtnl_lock? For example I see
how we could remove it from ctrl_announce, so lack of ack may not be
fatal for it. Assuming a buggy device, we can take some cvq commands
out of this fatal situation.
This series already improves the current situation and my suggestion
(if it's worth it) can be applied on top of it, so it is not a blocker
at all.
> >
> > I'd say there is no right choice neither in the right timeout value
> > nor in the action to take.
>
> In the next version, I tend to put BAD_RING() to prevent future requests.
>
> > Why not simply trigger the cmd and do all
> > the changes at command return?
>
> I don't get this, sorry.
>
It's actually expanding the first point so you already answered it :).
Thanks!
> >
> > I suspect the reason is that it complicates the code. For example,
> > having the possibility of many in flight commands, races between their
> > completion, etc.
>
> Actually the cvq command was serialized through RTNL_LOCK, so we don't
> need to worry about this.
>
> In the next version I can add ASSERT_RTNL().
>
> Thanks
>
> > The virtio standard does not even cover unordered
> > used commands if I'm not wrong.
> >
> > Is there any other fundamental reason?
> >
> > Thanks!
> >
> > > names[total_vqs - 1] = "control";
> > > }
> > >
> > > --
> > > 2.25.1
> > >
> >
>
On Fri, Dec 23, 2022 at 4:05 PM Eugenio Perez Martin
<[email protected]> wrote:
>
> On Fri, Dec 23, 2022 at 4:04 AM Jason Wang <[email protected]> wrote:
> >
> > On Thu, Dec 22, 2022 at 5:19 PM Eugenio Perez Martin
> > <[email protected]> wrote:
> > >
> > > On Thu, Dec 22, 2022 at 7:05 AM Jason Wang <[email protected]> wrote:
> > > >
> > > > We used to busy waiting on the cvq command this tends to be
> > > > problematic since:
> > > >
> > > > 1) CPU could wait for ever on a buggy/malicous device
> > > > 2) There's no wait to terminate the process that triggers the cvq
> > > > command
> > > >
> > > > So this patch switch to use sleep with a timeout (1s) instead of busy
> > > > polling for the cvq command forever. This gives the scheduler a breath
> > > > and can let the process can respond to a signal.
> > > >
> > > > Signed-off-by: Jason Wang <[email protected]>
> > > > ---
> > > > drivers/net/virtio_net.c | 15 ++++++++-------
> > > > 1 file changed, 8 insertions(+), 7 deletions(-)
> > > >
> > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > > index 8225496ccb1e..69173049371f 100644
> > > > --- a/drivers/net/virtio_net.c
> > > > +++ b/drivers/net/virtio_net.c
> > > > @@ -405,6 +405,7 @@ static void disable_rx_mode_work(struct virtnet_info *vi)
> > > > vi->rx_mode_work_enabled = false;
> > > > spin_unlock_bh(&vi->rx_mode_lock);
> > > >
> > > > + virtqueue_wake_up(vi->cvq);
> > > > flush_work(&vi->rx_mode_work);
> > > > }
> > > >
> > > > @@ -1497,6 +1498,11 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
> > > > return !oom;
> > > > }
> > > >
> > > > +static void virtnet_cvq_done(struct virtqueue *cvq)
> > > > +{
> > > > + virtqueue_wake_up(cvq);
> > > > +}
> > > > +
> > > > static void skb_recv_done(struct virtqueue *rvq)
> > > > {
> > > > struct virtnet_info *vi = rvq->vdev->priv;
> > > > @@ -2024,12 +2030,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
> > > > if (unlikely(!virtqueue_kick(vi->cvq)))
> > > > return vi->ctrl->status == VIRTIO_NET_OK;
> > > >
> > > > - /* Spin for a response, the kick causes an ioport write, trapping
> > > > - * into the hypervisor, so the request should be handled immediately.
> > > > - */
> > > > - while (!virtqueue_get_buf(vi->cvq, &tmp) &&
> > > > - !virtqueue_is_broken(vi->cvq))
> > > > - cpu_relax();
> > > > + virtqueue_wait_for_used(vi->cvq, &tmp);
> > > >
> > > > return vi->ctrl->status == VIRTIO_NET_OK;
> > > > }
> > > > @@ -3524,7 +3525,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
> > > >
> > > > /* Parameters for control virtqueue, if any */
> > > > if (vi->has_cvq) {
> > > > - callbacks[total_vqs - 1] = NULL;
> > > > + callbacks[total_vqs - 1] = virtnet_cvq_done;
> > >
> > > If we're using CVQ callback, what is the actual use of the timeout?
> >
> > Because we can't sleep forever since locks could be held like RTNL_LOCK.
> >
>
> Right, rtnl_lock kind of invalidates it for a general case.
>
> But do all of the commands need to take rtnl_lock? For example I see
> how we could remove it from ctrl_announce,
I think not, it's intended to serialize all cvq commands.
> so lack of ack may not be
> fatal for it.
Then there could be more than one cvq commands sent to the device, the
busy poll logic may not work.
And it's a hint that the device malfunctioned which is something that
the driver should be aware of.
Thanks
> Assuming a buggy device, we can take some cvq commands
> out of this fatal situation.
>
> This series already improves the current situation and my suggestion
> (if it's worth it) can be applied on top of it, so it is not a blocker
> at all.
>
> > >
> > > I'd say there is no right choice neither in the right timeout value
> > > nor in the action to take.
> >
> > In the next version, I tend to put BAD_RING() to prevent future requests.
> >
> > > Why not simply trigger the cmd and do all
> > > the changes at command return?
> >
> > I don't get this, sorry.
> >
>
> It's actually expanding the first point so you already answered it :).
>
> Thanks!
>
> > >
> > > I suspect the reason is that it complicates the code. For example,
> > > having the possibility of many in flight commands, races between their
> > > completion, etc.
> >
> > Actually the cvq command was serialized through RTNL_LOCK, so we don't
> > need to worry about this.
> >
> > In the next version I can add ASSERT_RTNL().
> >
> > Thanks
> >
> > > The virtio standard does not even cover unordered
> > > used commands if I'm not wrong.
> > >
> > > Is there any other fundamental reason?
> > >
> > > Thanks!
> > >
> > > > names[total_vqs - 1] = "control";
> > > > }
> > > >
> > > > --
> > > > 2.25.1
> > > >
> > >
> >
>