2013-04-29 07:29:30

by Stephen Rothwell

[permalink] [raw]
Subject: linux-next: manual merge of the vhost tree with the net-next tree

Hi Michael,

Today's linux-next merge of the vhost tree got a conflict in
drivers/vhost/net.c between commit 70181d51209c ("vhost_net: remove tx
polling state") from the net-next tree and commits 11569c7119a6 ("vhost:
Allow device specific fields per vq"), 15a51679a0a1 ("vhost: Move
vhost-net zerocopy support fields to net.c") and 0cc4c2bd142b ("vhost:
move per-vq net specific fields out to net") from the vhost tree.

I fixed it up (I think - see below - there is probably a better way) and
can carry the fix as necessary (no action is required).

diff --cc drivers/vhost/net.c
index 87c216c,661d9be..0000000
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@@ -64,10 -64,46 +64,36 @@@ enum
VHOST_NET_VQ_MAX = 2,
};

-enum vhost_net_poll_state {
- VHOST_NET_POLL_DISABLED = 0,
- VHOST_NET_POLL_STARTED = 1,
- VHOST_NET_POLL_STOPPED = 2,
-};
-
+ struct vhost_ubuf_ref {
+ struct kref kref;
+ wait_queue_head_t wait;
+ struct vhost_virtqueue *vq;
+ };
+
+ struct vhost_net_virtqueue {
+ struct vhost_virtqueue vq;
+ /* hdr is used to store the virtio header.
+ * Since each iovec has >= 1 byte length, we never need more than
+ * header length entries to store the header. */
+ struct iovec hdr[sizeof(struct virtio_net_hdr_mrg_rxbuf)];
+ size_t vhost_hlen;
+ size_t sock_hlen;
+ /* vhost zerocopy support fields below: */
+ /* last used idx for outstanding DMA zerocopy buffers */
+ int upend_idx;
+ /* first used idx for DMA done zerocopy buffers */
+ int done_idx;
+ /* an array of userspace buffers info */
+ struct ubuf_info *ubuf_info;
+ /* Reference counting for outstanding ubufs.
+ * Protected by vq mutex. Writers must also take device mutex. */
+ struct vhost_ubuf_ref *ubufs;
+ };
+
struct vhost_net {
struct vhost_dev dev;
- struct vhost_virtqueue vqs[VHOST_NET_VQ_MAX];
+ struct vhost_net_virtqueue vqs[VHOST_NET_VQ_MAX];
struct vhost_poll poll[VHOST_NET_VQ_MAX];
- /* Tells us whether we are polling a socket for TX.
- * We only do this when socket buffer fills up.
- * Protected by tx vq lock. */
- enum vhost_net_poll_state tx_poll_state;
/* Number of TX recently submitted.
* Protected by tx vq lock. */
unsigned tx_packets;
@@@ -224,8 -369,18 +337,8 @@@ static void handle_tx(struct vhost_net
mutex_lock(&vq->mutex);
vhost_disable_notify(&net->dev, vq);

- hdr_size = vq->vhost_hlen;
- zcopy = vq->ubufs;
- if (wmem < sock->sk->sk_sndbuf / 2)
- tx_poll_stop(net);
+ hdr_size = nvq->vhost_hlen;
+ zcopy = nvq->ubufs;

for (;;) {
/* Release DMAs done buffers first */
@@@ -246,11 -401,21 +359,12 @@@
/* If more outstanding DMAs, queue the work.
* Handle upend_idx wrap around
*/
- num_pends = likely(vq->upend_idx >= vq->done_idx) ?
- (vq->upend_idx - vq->done_idx) :
- (vq->upend_idx + UIO_MAXIOV - vq->done_idx);
+ num_pends = likely(nvq->upend_idx >= nvq->done_idx) ?
+ (nvq->upend_idx - nvq->done_idx) :
+ (nvq->upend_idx + UIO_MAXIOV -
+ nvq->done_idx);
- if (unlikely(num_pends > VHOST_MAX_PEND)) {
- tx_poll_start(net, sock);
- set_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
+ if (unlikely(num_pends > VHOST_MAX_PEND))
break;
- }
if (unlikely(vhost_enable_notify(&net->dev, vq))) {
vhost_disable_notify(&net->dev, vq);
continue;
@@@ -309,10 -474,12 +423,10 @@@
if (zcopy_used) {
if (ubufs)
vhost_ubuf_put(ubufs);
- vq->upend_idx = ((unsigned)vq->upend_idx - 1) %
- UIO_MAXIOV;
+ nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
+ % UIO_MAXIOV;
}
vhost_discard_vq_desc(vq, 1);
- if (err == -EAGAIN || err == -ENOBUFS)
- tx_poll_start(net, sock);
break;
}
if (err != len)
@@@ -584,24 -770,32 +716,31 @@@ static int vhost_net_open(struct inode
static void vhost_net_disable_vq(struct vhost_net *n,
struct vhost_virtqueue *vq)
{
- struct vhost_poll *poll = n->poll + (vq - n->vqs);
++ struct vhost_net_virtqueue *vnq;
++ struct vhost_poll *poll;
++
if (!vq->private_data)
return;
- if (vq == &n->vqs[VHOST_NET_VQ_TX].vq) {
- tx_poll_stop(n);
- n->tx_poll_state = VHOST_NET_POLL_DISABLED;
- } else
- vhost_poll_stop(n->poll + VHOST_NET_VQ_RX);
++ vnq = container_of(vq, struct vhost_net_virtqueue, vq);
++ poll = n->poll + (vnq - n->vqs);
+ vhost_poll_stop(poll);
}

static int vhost_net_enable_vq(struct vhost_net *n,
struct vhost_virtqueue *vq)
{
- struct vhost_poll *poll = n->poll + (vq - n->vqs);
++ struct vhost_net_virtqueue *vnq;
++ struct vhost_poll *poll;
struct socket *sock;
- int ret;

sock = rcu_dereference_protected(vq->private_data,
lockdep_is_held(&vq->mutex));
if (!sock)
return 0;
- if (vq == &n->vqs[VHOST_NET_VQ_TX].vq) {
- n->tx_poll_state = VHOST_NET_POLL_STOPPED;
- ret = tx_poll_start(n, sock);
- } else
- ret = vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file);
++ vnq = container_of(vq, struct vhost_net_virtqueue, vq);
++ poll = n->poll + (vnq - n->vqs);

- return ret;
+ return vhost_poll_start(poll, sock->file);
}

static struct socket *vhost_net_stop_vq(struct vhost_net *n,

--
Cheers,
Stephen Rothwell [email protected]


Attachments:
(No filename) (5.13 kB)
(No filename) (836.00 B)
Download all attachments