2018-01-04 03:14:38

by Jason Wang

[permalink] [raw]
Subject: [PATCH net-next V2 0/2] XDP transmission for tuntap

Hi all:

This series tries to implement XDP transmission (ndo_xdp_xmit) for
tuntap. Pointer ring was used for queuing both XDP buffers and
sk_buff, this is done by encoding the type into lowest bit of the
pointer and storin XDP metadata in the headroom of XDP buff.

Tests gets 3.05 Mpps when doing xdp_redirect_map from ixgbe to VM
(testpmd + virtio-net in guest). This gives us ~20% improvments
compared to use skb during redirect.

Please review.

Changes from V1:

- slient warnings
- fix typos
- add skb mode number in the commit log

Jason Wang (2):
tun/tap: use ptr_ring instead of skb_array
tuntap: XDP transmission

drivers/net/tap.c | 41 ++++-----
drivers/net/tun.c | 239 +++++++++++++++++++++++++++++++++++++++----------
drivers/vhost/net.c | 52 ++++++-----
include/linux/if_tap.h | 6 +-
include/linux/if_tun.h | 21 ++++-
5 files changed, 269 insertions(+), 90 deletions(-)

--
2.7.4


2018-01-04 03:14:45

by Jason Wang

[permalink] [raw]
Subject: [PATCH net-next V2 1/2] tun/tap: use ptr_ring instead of skb_array

This patch switches to use ptr_ring instead of skb_array. This will be
used to enqueue different types of pointers by encoding type into
lower bits.

Signed-off-by: Jason Wang <[email protected]>
---
drivers/net/tap.c | 41 +++++++++++++++++++++--------------------
drivers/net/tun.c | 42 ++++++++++++++++++++++--------------------
drivers/vhost/net.c | 39 ++++++++++++++++++++-------------------
include/linux/if_tap.h | 6 +++---
include/linux/if_tun.h | 4 ++--
5 files changed, 68 insertions(+), 64 deletions(-)

diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 0a886fda..7c38659 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -330,7 +330,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
if (!q)
return RX_HANDLER_PASS;

- if (__skb_array_full(&q->skb_array))
+ if (__ptr_ring_full(&q->ring))
goto drop;

skb_push(skb, ETH_HLEN);
@@ -348,7 +348,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
goto drop;

if (!segs) {
- if (skb_array_produce(&q->skb_array, skb))
+ if (ptr_ring_produce(&q->ring, skb))
goto drop;
goto wake_up;
}
@@ -358,7 +358,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
struct sk_buff *nskb = segs->next;

segs->next = NULL;
- if (skb_array_produce(&q->skb_array, segs)) {
+ if (ptr_ring_produce(&q->ring, segs)) {
kfree_skb(segs);
kfree_skb_list(nskb);
break;
@@ -375,7 +375,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
!(features & NETIF_F_CSUM_MASK) &&
skb_checksum_help(skb))
goto drop;
- if (skb_array_produce(&q->skb_array, skb))
+ if (ptr_ring_produce(&q->ring, skb))
goto drop;
}

@@ -497,7 +497,7 @@ static void tap_sock_destruct(struct sock *sk)
{
struct tap_queue *q = container_of(sk, struct tap_queue, sk);

- skb_array_cleanup(&q->skb_array);
+ ptr_ring_cleanup(&q->ring, __skb_array_destroy_skb);
}

static int tap_open(struct inode *inode, struct file *file)
@@ -517,7 +517,7 @@ static int tap_open(struct inode *inode, struct file *file)
&tap_proto, 0);
if (!q)
goto err;
- if (skb_array_init(&q->skb_array, tap->dev->tx_queue_len, GFP_KERNEL)) {
+ if (ptr_ring_init(&q->ring, tap->dev->tx_queue_len, GFP_KERNEL)) {
sk_free(&q->sk);
goto err;
}
@@ -546,7 +546,7 @@ static int tap_open(struct inode *inode, struct file *file)

err = tap_set_queue(tap, file, q);
if (err) {
- /* tap_sock_destruct() will take care of freeing skb_array */
+ /* tap_sock_destruct() will take care of freeing ptr_ring */
goto err_put;
}

@@ -583,7 +583,7 @@ static unsigned int tap_poll(struct file *file, poll_table *wait)
mask = 0;
poll_wait(file, &q->wq.wait, wait);

- if (!skb_array_empty(&q->skb_array))
+ if (!ptr_ring_empty(&q->ring))
mask |= POLLIN | POLLRDNORM;

if (sock_writeable(&q->sk) ||
@@ -844,7 +844,7 @@ static ssize_t tap_do_read(struct tap_queue *q,
TASK_INTERRUPTIBLE);

/* Read frames from the queue */
- skb = skb_array_consume(&q->skb_array);
+ skb = ptr_ring_consume(&q->ring);
if (skb)
break;
if (noblock) {
@@ -1176,7 +1176,7 @@ static int tap_peek_len(struct socket *sock)
{
struct tap_queue *q = container_of(sock, struct tap_queue,
sock);
- return skb_array_peek_len(&q->skb_array);
+ return PTR_RING_PEEK_CALL(&q->ring, __skb_array_len_with_tag);
}

/* Ops structure to mimic raw sockets with tun */
@@ -1202,7 +1202,7 @@ struct socket *tap_get_socket(struct file *file)
}
EXPORT_SYMBOL_GPL(tap_get_socket);

-struct skb_array *tap_get_skb_array(struct file *file)
+struct ptr_ring *tap_get_ptr_ring(struct file *file)
{
struct tap_queue *q;

@@ -1211,29 +1211,30 @@ struct skb_array *tap_get_skb_array(struct file *file)
q = file->private_data;
if (!q)
return ERR_PTR(-EBADFD);
- return &q->skb_array;
+ return &q->ring;
}
-EXPORT_SYMBOL_GPL(tap_get_skb_array);
+EXPORT_SYMBOL_GPL(tap_get_ptr_ring);

int tap_queue_resize(struct tap_dev *tap)
{
struct net_device *dev = tap->dev;
struct tap_queue *q;
- struct skb_array **arrays;
+ struct ptr_ring **rings;
int n = tap->numqueues;
int ret, i = 0;

- arrays = kmalloc_array(n, sizeof(*arrays), GFP_KERNEL);
- if (!arrays)
+ rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL);
+ if (!rings)
return -ENOMEM;

list_for_each_entry(q, &tap->queue_list, next)
- arrays[i++] = &q->skb_array;
+ rings[i++] = &q->ring;

- ret = skb_array_resize_multiple(arrays, n,
- dev->tx_queue_len, GFP_KERNEL);
+ ret = ptr_ring_resize_multiple(rings, n,
+ dev->tx_queue_len, GFP_KERNEL,
+ __skb_array_destroy_skb);

- kfree(arrays);
+ kfree(rings);
return ret;
}
EXPORT_SYMBOL_GPL(tap_queue_resize);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index e367d631..2c89efe 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -179,7 +179,7 @@ struct tun_file {
struct mutex napi_mutex; /* Protects access to the above napi */
struct list_head next;
struct tun_struct *detached;
- struct skb_array tx_array;
+ struct ptr_ring tx_ring;
};

struct tun_flow_entry {
@@ -634,7 +634,7 @@ static void tun_queue_purge(struct tun_file *tfile)
{
struct sk_buff *skb;

- while ((skb = skb_array_consume(&tfile->tx_array)) != NULL)
+ while ((skb = ptr_ring_consume(&tfile->tx_ring)) != NULL)
kfree_skb(skb);

skb_queue_purge(&tfile->sk.sk_write_queue);
@@ -688,7 +688,8 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
unregister_netdevice(tun->dev);
}
if (tun)
- skb_array_cleanup(&tfile->tx_array);
+ ptr_ring_cleanup(&tfile->tx_ring,
+ __skb_array_destroy_skb);
sock_put(&tfile->sk);
}
}
@@ -777,7 +778,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
}

if (!tfile->detached &&
- skb_array_init(&tfile->tx_array, dev->tx_queue_len, GFP_KERNEL)) {
+ ptr_ring_init(&tfile->tx_ring, dev->tx_queue_len, GFP_KERNEL)) {
err = -ENOMEM;
goto out;
}
@@ -1027,7 +1028,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)

nf_reset(skb);

- if (skb_array_produce(&tfile->tx_array, skb))
+ if (ptr_ring_produce(&tfile->tx_ring, skb))
goto drop;

/* Notify and wake up reader process */
@@ -1295,7 +1296,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait)

poll_wait(file, sk_sleep(sk), wait);

- if (!skb_array_empty(&tfile->tx_array))
+ if (!ptr_ring_empty(&tfile->tx_ring))
mask |= POLLIN | POLLRDNORM;

if (tun->dev->flags & IFF_UP &&
@@ -1944,7 +1945,7 @@ static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock,
struct sk_buff *skb = NULL;
int error = 0;

- skb = skb_array_consume(&tfile->tx_array);
+ skb = ptr_ring_consume(&tfile->tx_ring);
if (skb)
goto out;
if (noblock) {
@@ -1956,7 +1957,7 @@ static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock,
current->state = TASK_INTERRUPTIBLE;

while (1) {
- skb = skb_array_consume(&tfile->tx_array);
+ skb = ptr_ring_consume(&tfile->tx_ring);
if (skb)
break;
if (signal_pending(current)) {
@@ -2186,7 +2187,7 @@ static int tun_peek_len(struct socket *sock)
if (!tun)
return 0;

- ret = skb_array_peek_len(&tfile->tx_array);
+ ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, __skb_array_len_with_tag);
tun_put(tun);

return ret;
@@ -3092,25 +3093,26 @@ static int tun_queue_resize(struct tun_struct *tun)
{
struct net_device *dev = tun->dev;
struct tun_file *tfile;
- struct skb_array **arrays;
+ struct ptr_ring **rings;
int n = tun->numqueues + tun->numdisabled;
int ret, i;

- arrays = kmalloc_array(n, sizeof(*arrays), GFP_KERNEL);
- if (!arrays)
+ rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL);
+ if (!rings)
return -ENOMEM;

for (i = 0; i < tun->numqueues; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
- arrays[i] = &tfile->tx_array;
+ rings[i] = &tfile->tx_ring;
}
list_for_each_entry(tfile, &tun->disabled, next)
- arrays[i++] = &tfile->tx_array;
+ rings[i++] = &tfile->tx_ring;

- ret = skb_array_resize_multiple(arrays, n,
- dev->tx_queue_len, GFP_KERNEL);
+ ret = ptr_ring_resize_multiple(rings, n,
+ dev->tx_queue_len, GFP_KERNEL,
+ __skb_array_destroy_skb);

- kfree(arrays);
+ kfree(rings);
return ret;
}

@@ -3196,7 +3198,7 @@ struct socket *tun_get_socket(struct file *file)
}
EXPORT_SYMBOL_GPL(tun_get_socket);

-struct skb_array *tun_get_skb_array(struct file *file)
+struct ptr_ring *tun_get_tx_ring(struct file *file)
{
struct tun_file *tfile;

@@ -3205,9 +3207,9 @@ struct skb_array *tun_get_skb_array(struct file *file)
tfile = file->private_data;
if (!tfile)
return ERR_PTR(-EBADFD);
- return &tfile->tx_array;
+ return &tfile->tx_ring;
}
-EXPORT_SYMBOL_GPL(tun_get_skb_array);
+EXPORT_SYMBOL_GPL(tun_get_tx_ring);

module_init(tun_init);
module_exit(tun_cleanup);
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index c7bdeb6..c316555 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -89,7 +89,7 @@ struct vhost_net_ubuf_ref {

#define VHOST_RX_BATCH 64
struct vhost_net_buf {
- struct sk_buff **queue;
+ void **queue;
int tail;
int head;
};
@@ -108,7 +108,7 @@ struct vhost_net_virtqueue {
/* Reference counting for outstanding ubufs.
* Protected by vq mutex. Writers must also take device mutex. */
struct vhost_net_ubuf_ref *ubufs;
- struct skb_array *rx_array;
+ struct ptr_ring *rx_ring;
struct vhost_net_buf rxq;
};

@@ -158,7 +158,7 @@ static int vhost_net_buf_produce(struct vhost_net_virtqueue *nvq)
struct vhost_net_buf *rxq = &nvq->rxq;

rxq->head = 0;
- rxq->tail = skb_array_consume_batched(nvq->rx_array, rxq->queue,
+ rxq->tail = ptr_ring_consume_batched(nvq->rx_ring, rxq->queue,
VHOST_RX_BATCH);
return rxq->tail;
}
@@ -167,9 +167,10 @@ static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq)
{
struct vhost_net_buf *rxq = &nvq->rxq;

- if (nvq->rx_array && !vhost_net_buf_is_empty(rxq)) {
- skb_array_unconsume(nvq->rx_array, rxq->queue + rxq->head,
- vhost_net_buf_get_size(rxq));
+ if (nvq->rx_ring && !vhost_net_buf_is_empty(rxq)) {
+ ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head,
+ vhost_net_buf_get_size(rxq),
+ __skb_array_destroy_skb);
rxq->head = rxq->tail = 0;
}
}
@@ -583,7 +584,7 @@ static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk)
int len = 0;
unsigned long flags;

- if (rvq->rx_array)
+ if (rvq->rx_ring)
return vhost_net_buf_peek(rvq);

spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
@@ -790,7 +791,7 @@ static void handle_rx(struct vhost_net *net)
* they refilled. */
goto out;
}
- if (nvq->rx_array)
+ if (nvq->rx_ring)
msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
/* On overrun, truncate and discard */
if (unlikely(headcount > UIO_MAXIOV)) {
@@ -896,7 +897,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
struct vhost_net *n;
struct vhost_dev *dev;
struct vhost_virtqueue **vqs;
- struct sk_buff **queue;
+ void **queue;
int i;

n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
@@ -908,7 +909,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
return -ENOMEM;
}

- queue = kmalloc_array(VHOST_RX_BATCH, sizeof(struct sk_buff *),
+ queue = kmalloc_array(VHOST_RX_BATCH, sizeof(void *),
GFP_KERNEL);
if (!queue) {
kfree(vqs);
@@ -1046,23 +1047,23 @@ static struct socket *get_raw_socket(int fd)
return ERR_PTR(r);
}

-static struct skb_array *get_tap_skb_array(int fd)
+static struct ptr_ring *get_tap_ptr_ring(int fd)
{
- struct skb_array *array;
+ struct ptr_ring *ring;
struct file *file = fget(fd);

if (!file)
return NULL;
- array = tun_get_skb_array(file);
- if (!IS_ERR(array))
+ ring = tun_get_tx_ring(file);
+ if (!IS_ERR(ring))
goto out;
- array = tap_get_skb_array(file);
- if (!IS_ERR(array))
+ ring = tap_get_ptr_ring(file);
+ if (!IS_ERR(ring))
goto out;
- array = NULL;
+ ring = NULL;
out:
fput(file);
- return array;
+ return ring;
}

static struct socket *get_tap_socket(int fd)
@@ -1143,7 +1144,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
vq->private_data = sock;
vhost_net_buf_unproduce(nvq);
if (index == VHOST_NET_VQ_RX)
- nvq->rx_array = get_tap_skb_array(fd);
+ nvq->rx_ring = get_tap_ptr_ring(fd);
r = vhost_vq_init_access(vq);
if (r)
goto err_used;
diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h
index 3ecef57..8e66866 100644
--- a/include/linux/if_tap.h
+++ b/include/linux/if_tap.h
@@ -4,7 +4,7 @@

#if IS_ENABLED(CONFIG_TAP)
struct socket *tap_get_socket(struct file *);
-struct skb_array *tap_get_skb_array(struct file *file);
+struct ptr_ring *tap_get_ptr_ring(struct file *file);
#else
#include <linux/err.h>
#include <linux/errno.h>
@@ -14,7 +14,7 @@ static inline struct socket *tap_get_socket(struct file *f)
{
return ERR_PTR(-EINVAL);
}
-static inline struct skb_array *tap_get_skb_array(struct file *f)
+static inline struct ptr_ring *tap_get_ptr_ring(struct file *f)
{
return ERR_PTR(-EINVAL);
}
@@ -70,7 +70,7 @@ struct tap_queue {
u16 queue_index;
bool enabled;
struct list_head next;
- struct skb_array skb_array;
+ struct ptr_ring ring;
};

rx_handler_result_t tap_handle_frame(struct sk_buff **pskb);
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h
index bf9bdf4..bdee9b8 100644
--- a/include/linux/if_tun.h
+++ b/include/linux/if_tun.h
@@ -19,7 +19,7 @@

#if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE)
struct socket *tun_get_socket(struct file *);
-struct skb_array *tun_get_skb_array(struct file *file);
+struct ptr_ring *tun_get_tx_ring(struct file *file);
#else
#include <linux/err.h>
#include <linux/errno.h>
@@ -29,7 +29,7 @@ static inline struct socket *tun_get_socket(struct file *f)
{
return ERR_PTR(-EINVAL);
}
-static inline struct skb_array *tun_get_skb_array(struct file *f)
+static inline struct ptr_ring *tun_get_tx_ring(struct file *f)
{
return ERR_PTR(-EINVAL);
}
--
2.7.4

2018-01-04 03:14:51

by Jason Wang

[permalink] [raw]
Subject: [PATCH net-next V2 2/2] tuntap: XDP transmission

This patch implements XDP transmission for TAP. Since we can't create
new queues for TAP during XDP set, exist ptr_ring was reused for
queuing XDP buffers. To differ xdp_buff from sk_buff, TUN_XDP_FLAG
(0x1UL) was encoded into lowest bit of xpd_buff pointer during
ptr_ring_produce, and was decoded during consuming. XDP metadata was
stored in the headroom of the packet which should work in most of
cases since driver usually reserve enough headroom. Very minor changes
were done for vhost_net: it just need to peek the length depends on
the type of pointer.

Tests were done on two Intel E5-2630 2.40GHz machines connected back
to back through two 82599ES. Traffic were generated/received through
MoonGen/testpmd(rxonly). It reports ~20% improvements when
xdp_redirect_map is doing redirection from ixgbe to TAP (from 2.50Mpps
to 3.05Mpps)

Cc: Jesper Dangaard Brouer <[email protected]>
Signed-off-by: Jason Wang <[email protected]>
---
drivers/net/tun.c | 211 +++++++++++++++++++++++++++++++++++++++++--------
drivers/vhost/net.c | 13 ++-
include/linux/if_tun.h | 17 ++++
3 files changed, 208 insertions(+), 33 deletions(-)

diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 2c89efe..f2e805d 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -240,6 +240,24 @@ struct tun_struct {
struct tun_steering_prog __rcu *steering_prog;
};

+bool tun_is_xdp_buff(void *ptr)
+{
+ return (unsigned long)ptr & TUN_XDP_FLAG;
+}
+EXPORT_SYMBOL(tun_is_xdp_buff);
+
+void *tun_xdp_to_ptr(void *ptr)
+{
+ return (void *)((unsigned long)ptr | TUN_XDP_FLAG);
+}
+EXPORT_SYMBOL(tun_xdp_to_ptr);
+
+void *tun_ptr_to_xdp(void *ptr)
+{
+ return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG);
+}
+EXPORT_SYMBOL(tun_ptr_to_xdp);
+
static int tun_napi_receive(struct napi_struct *napi, int budget)
{
struct tun_file *tfile = container_of(napi, struct tun_file, napi);
@@ -630,12 +648,25 @@ static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
return tun;
}

+static void tun_ptr_free(void *ptr)
+{
+ if (!ptr)
+ return;
+ if (tun_is_xdp_buff(ptr)) {
+ struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
+
+ put_page(virt_to_head_page(xdp->data));
+ } else {
+ __skb_array_destroy_skb(ptr);
+ }
+}
+
static void tun_queue_purge(struct tun_file *tfile)
{
- struct sk_buff *skb;
+ void *ptr;

- while ((skb = ptr_ring_consume(&tfile->tx_ring)) != NULL)
- kfree_skb(skb);
+ while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL)
+ tun_ptr_free(ptr);

skb_queue_purge(&tfile->sk.sk_write_queue);
skb_queue_purge(&tfile->sk.sk_error_queue);
@@ -688,8 +719,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
unregister_netdevice(tun->dev);
}
if (tun)
- ptr_ring_cleanup(&tfile->tx_ring,
- __skb_array_destroy_skb);
+ ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
sock_put(&tfile->sk);
}
}
@@ -1201,6 +1231,67 @@ static const struct net_device_ops tun_netdev_ops = {
.ndo_get_stats64 = tun_net_get_stats64,
};

+static int tun_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+ struct xdp_buff *buff = xdp->data_hard_start;
+ int headroom = xdp->data - xdp->data_hard_start;
+ struct tun_file *tfile;
+ u32 numqueues;
+ int ret = 0;
+
+ /* Assure headroom is available and buff is properly aligned */
+ if (unlikely(headroom < sizeof(*xdp) || tun_is_xdp_buff(xdp)))
+ return -ENOSPC;
+
+ *buff = *xdp;
+
+ rcu_read_lock();
+
+ numqueues = READ_ONCE(tun->numqueues);
+ if (!numqueues) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
+ numqueues]);
+ /* Encode the XDP flag into lowest bit for consumer to differ
+ * XDP buffer from sk_buff.
+ */
+ if (ptr_ring_produce(&tfile->tx_ring, tun_xdp_to_ptr(buff))) {
+ this_cpu_inc(tun->pcpu_stats->tx_dropped);
+ ret = -ENOSPC;
+ }
+
+out:
+ rcu_read_unlock();
+ return ret;
+}
+
+static void tun_xdp_flush(struct net_device *dev)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+ struct tun_file *tfile;
+ u32 numqueues;
+
+ rcu_read_lock();
+
+ numqueues = READ_ONCE(tun->numqueues);
+ if (!numqueues)
+ goto out;
+
+ tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
+ numqueues]);
+ /* Notify and wake up reader process */
+ if (tfile->flags & TUN_FASYNC)
+ kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
+ tfile->socket.sk->sk_data_ready(tfile->socket.sk);
+
+out:
+ rcu_read_unlock();
+}
+
static const struct net_device_ops tap_netdev_ops = {
.ndo_uninit = tun_net_uninit,
.ndo_open = tun_net_open,
@@ -1218,6 +1309,8 @@ static const struct net_device_ops tap_netdev_ops = {
.ndo_set_rx_headroom = tun_set_headroom,
.ndo_get_stats64 = tun_net_get_stats64,
.ndo_bpf = tun_xdp,
+ .ndo_xdp_xmit = tun_xdp_xmit,
+ .ndo_xdp_flush = tun_xdp_flush,
};

static void tun_flow_init(struct tun_struct *tun)
@@ -1841,6 +1934,40 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
return result;
}

+static ssize_t tun_put_user_xdp(struct tun_struct *tun,
+ struct tun_file *tfile,
+ struct xdp_buff *xdp,
+ struct iov_iter *iter)
+{
+ int vnet_hdr_sz = 0;
+ size_t size = xdp->data_end - xdp->data;
+ struct tun_pcpu_stats *stats;
+ size_t ret;
+
+ if (tun->flags & IFF_VNET_HDR) {
+ struct virtio_net_hdr gso = { 0 };
+
+ vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
+ if (unlikely(iov_iter_count(iter) < vnet_hdr_sz))
+ return -EINVAL;
+ if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) !=
+ sizeof(gso)))
+ return -EFAULT;
+ iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
+ }
+
+ ret = copy_to_iter(xdp->data, size, iter) + vnet_hdr_sz;
+
+ stats = get_cpu_ptr(tun->pcpu_stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_packets++;
+ stats->tx_bytes += ret;
+ u64_stats_update_end(&stats->syncp);
+ put_cpu_ptr(tun->pcpu_stats);
+
+ return ret;
+}
+
/* Put packet to the user space buffer */
static ssize_t tun_put_user(struct tun_struct *tun,
struct tun_file *tfile,
@@ -1938,15 +2065,14 @@ static ssize_t tun_put_user(struct tun_struct *tun,
return total;
}

-static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock,
- int *err)
+static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
{
DECLARE_WAITQUEUE(wait, current);
- struct sk_buff *skb = NULL;
+ void *ptr = NULL;
int error = 0;

- skb = ptr_ring_consume(&tfile->tx_ring);
- if (skb)
+ ptr = ptr_ring_consume(&tfile->tx_ring);
+ if (ptr)
goto out;
if (noblock) {
error = -EAGAIN;
@@ -1957,8 +2083,8 @@ static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock,
current->state = TASK_INTERRUPTIBLE;

while (1) {
- skb = ptr_ring_consume(&tfile->tx_ring);
- if (skb)
+ ptr = ptr_ring_consume(&tfile->tx_ring);
+ if (ptr)
break;
if (signal_pending(current)) {
error = -ERESTARTSYS;
@@ -1977,12 +2103,12 @@ static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock,

out:
*err = error;
- return skb;
+ return ptr;
}

static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
struct iov_iter *to,
- int noblock, struct sk_buff *skb)
+ int noblock, void *ptr)
{
ssize_t ret;
int err;
@@ -1990,23 +2116,31 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
tun_debug(KERN_INFO, tun, "tun_do_read\n");

if (!iov_iter_count(to)) {
- if (skb)
- kfree_skb(skb);
+ tun_ptr_free(ptr);
return 0;
}

- if (!skb) {
+ if (!ptr) {
/* Read frames from ring */
- skb = tun_ring_recv(tfile, noblock, &err);
- if (!skb)
+ ptr = tun_ring_recv(tfile, noblock, &err);
+ if (!ptr)
return err;
}

- ret = tun_put_user(tun, tfile, skb, to);
- if (unlikely(ret < 0))
- kfree_skb(skb);
- else
- consume_skb(skb);
+ if (tun_is_xdp_buff(ptr)) {
+ struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
+
+ ret = tun_put_user_xdp(tun, tfile, xdp, to);
+ put_page(virt_to_head_page(xdp->data));
+ } else {
+ struct sk_buff *skb = ptr;
+
+ ret = tun_put_user(tun, tfile, skb, to);
+ if (unlikely(ret < 0))
+ kfree_skb(skb);
+ else
+ consume_skb(skb);
+ }

return ret;
}
@@ -2143,12 +2277,12 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
{
struct tun_file *tfile = container_of(sock, struct tun_file, socket);
struct tun_struct *tun = tun_get(tfile);
- struct sk_buff *skb = m->msg_control;
+ void *ptr = m->msg_control;
int ret;

if (!tun) {
ret = -EBADFD;
- goto out_free_skb;
+ goto out_free;
}

if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
@@ -2160,7 +2294,7 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
SOL_PACKET, TUN_TX_TIMESTAMP);
goto out;
}
- ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, skb);
+ ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr);
if (ret > (ssize_t)total_len) {
m->msg_flags |= MSG_TRUNC;
ret = flags & MSG_TRUNC ? ret : total_len;
@@ -2171,12 +2305,25 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,

out_put_tun:
tun_put(tun);
-out_free_skb:
- if (skb)
- kfree_skb(skb);
+out_free:
+ tun_ptr_free(ptr);
return ret;
}

+static int tun_ptr_peek_len(void *ptr)
+{
+ if (likely(ptr)) {
+ if (tun_is_xdp_buff(ptr)) {
+ struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
+
+ return xdp->data_end - xdp->data;
+ }
+ return __skb_array_len_with_tag(ptr);
+ } else {
+ return 0;
+ }
+}
+
static int tun_peek_len(struct socket *sock)
{
struct tun_file *tfile = container_of(sock, struct tun_file, socket);
@@ -2187,7 +2334,7 @@ static int tun_peek_len(struct socket *sock)
if (!tun)
return 0;

- ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, __skb_array_len_with_tag);
+ ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len);
tun_put(tun);

return ret;
@@ -3110,7 +3257,7 @@ static int tun_queue_resize(struct tun_struct *tun)

ret = ptr_ring_resize_multiple(rings, n,
dev->tx_queue_len, GFP_KERNEL,
- __skb_array_destroy_skb);
+ tun_ptr_free);

kfree(rings);
return ret;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index c316555..a5a1db6 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -175,6 +175,17 @@ static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq)
}
}

+static int vhost_net_buf_peek_len(void *ptr)
+{
+ if (tun_is_xdp_buff(ptr)) {
+ struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
+
+ return xdp->data_end - xdp->data;
+ }
+
+ return __skb_array_len_with_tag(ptr);
+}
+
static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq)
{
struct vhost_net_buf *rxq = &nvq->rxq;
@@ -186,7 +197,7 @@ static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq)
return 0;

out:
- return __skb_array_len_with_tag(vhost_net_buf_get_ptr(rxq));
+ return vhost_net_buf_peek_len(vhost_net_buf_get_ptr(rxq));
}

static void vhost_net_buf_init(struct vhost_net_buf *rxq)
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h
index bdee9b8..08e6682 100644
--- a/include/linux/if_tun.h
+++ b/include/linux/if_tun.h
@@ -17,9 +17,14 @@

#include <uapi/linux/if_tun.h>

+#define TUN_XDP_FLAG 0x1UL
+
#if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE)
struct socket *tun_get_socket(struct file *);
struct ptr_ring *tun_get_tx_ring(struct file *file);
+bool tun_is_xdp_buff(void *ptr);
+void *tun_xdp_to_ptr(void *ptr);
+void *tun_ptr_to_xdp(void *ptr);
#else
#include <linux/err.h>
#include <linux/errno.h>
@@ -33,5 +38,17 @@ static inline struct ptr_ring *tun_get_tx_ring(struct file *f)
{
return ERR_PTR(-EINVAL);
}
+static inline bool tun_is_xdp_buff(void *ptr)
+{
+ return false;
+}
+void *tun_xdp_to_ptr(void *ptr)
+{
+ return NULL;
+}
+void *tun_ptr_to_xdp(void *ptr)
+{
+ return NULL;
+}
#endif /* CONFIG_TUN */
#endif /* __IF_TUN_H */
--
2.7.4

2018-01-09 16:06:19

by David Miller

[permalink] [raw]
Subject: Re: [PATCH net-next V2 0/2] XDP transmission for tuntap

From: Jason Wang <[email protected]>
Date: Thu, 4 Jan 2018 11:14:26 +0800

> This series tries to implement XDP transmission (ndo_xdp_xmit) for
> tuntap. Pointer ring was used for queuing both XDP buffers and
> sk_buff, this is done by encoding the type into lowest bit of the
> pointer and storin XDP metadata in the headroom of XDP buff.
>
> Tests gets 3.05 Mpps when doing xdp_redirect_map from ixgbe to VM
> (testpmd + virtio-net in guest). This gives us ~20% improvments
> compared to use skb during redirect.
>
> Please review.
>
> Changes from V1:
>
> - slient warnings
> - fix typos
> - add skb mode number in the commit log

Series applied, thanks Jason.

There was some overlap with recent changes from Jesper, please take
a look.

Thank you.

2018-01-10 03:00:55

by Jason Wang

[permalink] [raw]
Subject: Re: [PATCH net-next V2 0/2] XDP transmission for tuntap



On 2018年01月10日 00:06, David Miller wrote:
> From: Jason Wang <[email protected]>
> Date: Thu, 4 Jan 2018 11:14:26 +0800
>
>> This series tries to implement XDP transmission (ndo_xdp_xmit) for
>> tuntap. Pointer ring was used for queuing both XDP buffers and
>> sk_buff, this is done by encoding the type into lowest bit of the
>> pointer and storin XDP metadata in the headroom of XDP buff.
>>
>> Tests gets 3.05 Mpps when doing xdp_redirect_map from ixgbe to VM
>> (testpmd + virtio-net in guest). This gives us ~20% improvments
>> compared to use skb during redirect.
>>
>> Please review.
>>
>> Changes from V1:
>>
>> - slient warnings
>> - fix typos
>> - add skb mode number in the commit log
> Series applied, thanks Jason.
>
> There was some overlap with recent changes from Jesper, please take
> a look.
>
> Thank you.

The changes looks good, will have a test and report.

Thanks

2018-01-10 16:12:39

by Tariq Toukan

[permalink] [raw]
Subject: Re: [PATCH net-next V2 2/2] tuntap: XDP transmission



On 04/01/2018 5:14 AM, Jason Wang wrote:
> This patch implements XDP transmission for TAP. Since we can't create
> new queues for TAP during XDP set, exist ptr_ring was reused for
> queuing XDP buffers. To differ xdp_buff from sk_buff, TUN_XDP_FLAG
> (0x1UL) was encoded into lowest bit of xpd_buff pointer during
> ptr_ring_produce, and was decoded during consuming. XDP metadata was
> stored in the headroom of the packet which should work in most of
> cases since driver usually reserve enough headroom. Very minor changes
> were done for vhost_net: it just need to peek the length depends on
> the type of pointer.
>
> Tests were done on two Intel E5-2630 2.40GHz machines connected back
> to back through two 82599ES. Traffic were generated/received through
> MoonGen/testpmd(rxonly). It reports ~20% improvements when
> xdp_redirect_map is doing redirection from ixgbe to TAP (from 2.50Mpps
> to 3.05Mpps)
>
> Cc: Jesper Dangaard Brouer <[email protected]>
> Signed-off-by: Jason Wang <[email protected]>
> ---
> drivers/net/tun.c | 211 +++++++++++++++++++++++++++++++++++++++++--------
> drivers/vhost/net.c | 13 ++-
> include/linux/if_tun.h | 17 ++++
> 3 files changed, 208 insertions(+), 33 deletions(-)
>
> diff --git a/drivers/net/tun.c b/drivers/net/tun.c
> index 2c89efe..f2e805d 100644
> --- a/drivers/net/tun.c
> +++ b/drivers/net/tun.c
> @@ -240,6 +240,24 @@ struct tun_struct {
> struct tun_steering_prog __rcu *steering_prog;
> };
>
> +bool tun_is_xdp_buff(void *ptr)
> +{
> + return (unsigned long)ptr & TUN_XDP_FLAG;
> +}
> +EXPORT_SYMBOL(tun_is_xdp_buff);
> +
> +void *tun_xdp_to_ptr(void *ptr)
> +{
> + return (void *)((unsigned long)ptr | TUN_XDP_FLAG);
> +}
> +EXPORT_SYMBOL(tun_xdp_to_ptr);
> +
> +void *tun_ptr_to_xdp(void *ptr)
> +{
> + return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG);
> +}
> +EXPORT_SYMBOL(tun_ptr_to_xdp);
> +

Hi Jason,
I started getting the following compilation issues.

+ make -j24 -s
net/socket.o: In function `tun_xdp_to_ptr':
/images/autom/buildbot/worker/merge-net-next/build/./include/linux/if_tun.h:46:
multiple definition of `tun_xdp_to_ptr'
fs/compat_ioctl.o:/images/autom/buildbot/worker/merge-net-next/build/./include/linux/if_tun.h:46:
first defined here
net/socket.o: In function `tun_ptr_to_xdp':
/images/autom/buildbot/worker/merge-net-next/build/./include/linux/if_tun.h:50:
multiple definition of `tun_ptr_to_xdp'
fs/compat_ioctl.o:/images/autom/buildbot/worker/merge-net-next/build/./include/linux/if_tun.h:50:
first defined here
make: *** [vmlinux] Error 1

Seems you missed adding the following ifdef:
#if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE)

Thanks,
Tariq

> static int tun_napi_receive(struct napi_struct *napi, int budget)
> {
> struct tun_file *tfile = container_of(napi, struct tun_file, napi);
> @@ -630,12 +648,25 @@ static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
> return tun;
> }
>
> +static void tun_ptr_free(void *ptr)
> +{
> + if (!ptr)
> + return;
> + if (tun_is_xdp_buff(ptr)) {
> + struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
> +
> + put_page(virt_to_head_page(xdp->data));
> + } else {
> + __skb_array_destroy_skb(ptr);
> + }
> +}
> +
> static void tun_queue_purge(struct tun_file *tfile)
> {
> - struct sk_buff *skb;
> + void *ptr;
>
> - while ((skb = ptr_ring_consume(&tfile->tx_ring)) != NULL)
> - kfree_skb(skb);
> + while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL)
> + tun_ptr_free(ptr);
>
> skb_queue_purge(&tfile->sk.sk_write_queue);
> skb_queue_purge(&tfile->sk.sk_error_queue);
> @@ -688,8 +719,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
> unregister_netdevice(tun->dev);
> }
> if (tun)
> - ptr_ring_cleanup(&tfile->tx_ring,
> - __skb_array_destroy_skb);
> + ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
> sock_put(&tfile->sk);
> }
> }
> @@ -1201,6 +1231,67 @@ static const struct net_device_ops tun_netdev_ops = {
> .ndo_get_stats64 = tun_net_get_stats64,
> };
>
> +static int tun_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
> +{
> + struct tun_struct *tun = netdev_priv(dev);
> + struct xdp_buff *buff = xdp->data_hard_start;
> + int headroom = xdp->data - xdp->data_hard_start;
> + struct tun_file *tfile;
> + u32 numqueues;
> + int ret = 0;
> +
> + /* Assure headroom is available and buff is properly aligned */
> + if (unlikely(headroom < sizeof(*xdp) || tun_is_xdp_buff(xdp)))
> + return -ENOSPC;
> +
> + *buff = *xdp;
> +
> + rcu_read_lock();
> +
> + numqueues = READ_ONCE(tun->numqueues);
> + if (!numqueues) {
> + ret = -ENOSPC;
> + goto out;
> + }
> +
> + tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
> + numqueues]);
> + /* Encode the XDP flag into lowest bit for consumer to differ
> + * XDP buffer from sk_buff.
> + */
> + if (ptr_ring_produce(&tfile->tx_ring, tun_xdp_to_ptr(buff))) {
> + this_cpu_inc(tun->pcpu_stats->tx_dropped);
> + ret = -ENOSPC;
> + }
> +
> +out:
> + rcu_read_unlock();
> + return ret;
> +}
> +
> +static void tun_xdp_flush(struct net_device *dev)
> +{
> + struct tun_struct *tun = netdev_priv(dev);
> + struct tun_file *tfile;
> + u32 numqueues;
> +
> + rcu_read_lock();
> +
> + numqueues = READ_ONCE(tun->numqueues);
> + if (!numqueues)
> + goto out;
> +
> + tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
> + numqueues]);
> + /* Notify and wake up reader process */
> + if (tfile->flags & TUN_FASYNC)
> + kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
> + tfile->socket.sk->sk_data_ready(tfile->socket.sk);
> +
> +out:
> + rcu_read_unlock();
> +}
> +
> static const struct net_device_ops tap_netdev_ops = {
> .ndo_uninit = tun_net_uninit,
> .ndo_open = tun_net_open,
> @@ -1218,6 +1309,8 @@ static const struct net_device_ops tap_netdev_ops = {
> .ndo_set_rx_headroom = tun_set_headroom,
> .ndo_get_stats64 = tun_net_get_stats64,
> .ndo_bpf = tun_xdp,
> + .ndo_xdp_xmit = tun_xdp_xmit,
> + .ndo_xdp_flush = tun_xdp_flush,
> };
>
> static void tun_flow_init(struct tun_struct *tun)
> @@ -1841,6 +1934,40 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
> return result;
> }
>
> +static ssize_t tun_put_user_xdp(struct tun_struct *tun,
> + struct tun_file *tfile,
> + struct xdp_buff *xdp,
> + struct iov_iter *iter)
> +{
> + int vnet_hdr_sz = 0;
> + size_t size = xdp->data_end - xdp->data;
> + struct tun_pcpu_stats *stats;
> + size_t ret;
> +
> + if (tun->flags & IFF_VNET_HDR) {
> + struct virtio_net_hdr gso = { 0 };
> +
> + vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
> + if (unlikely(iov_iter_count(iter) < vnet_hdr_sz))
> + return -EINVAL;
> + if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) !=
> + sizeof(gso)))
> + return -EFAULT;
> + iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
> + }
> +
> + ret = copy_to_iter(xdp->data, size, iter) + vnet_hdr_sz;
> +
> + stats = get_cpu_ptr(tun->pcpu_stats);
> + u64_stats_update_begin(&stats->syncp);
> + stats->tx_packets++;
> + stats->tx_bytes += ret;
> + u64_stats_update_end(&stats->syncp);
> + put_cpu_ptr(tun->pcpu_stats);
> +
> + return ret;
> +}
> +
> /* Put packet to the user space buffer */
> static ssize_t tun_put_user(struct tun_struct *tun,
> struct tun_file *tfile,
> @@ -1938,15 +2065,14 @@ static ssize_t tun_put_user(struct tun_struct *tun,
> return total;
> }
>
> -static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock,
> - int *err)
> +static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
> {
> DECLARE_WAITQUEUE(wait, current);
> - struct sk_buff *skb = NULL;
> + void *ptr = NULL;
> int error = 0;
>
> - skb = ptr_ring_consume(&tfile->tx_ring);
> - if (skb)
> + ptr = ptr_ring_consume(&tfile->tx_ring);
> + if (ptr)
> goto out;
> if (noblock) {
> error = -EAGAIN;
> @@ -1957,8 +2083,8 @@ static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock,
> current->state = TASK_INTERRUPTIBLE;
>
> while (1) {
> - skb = ptr_ring_consume(&tfile->tx_ring);
> - if (skb)
> + ptr = ptr_ring_consume(&tfile->tx_ring);
> + if (ptr)
> break;
> if (signal_pending(current)) {
> error = -ERESTARTSYS;
> @@ -1977,12 +2103,12 @@ static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock,
>
> out:
> *err = error;
> - return skb;
> + return ptr;
> }
>
> static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
> struct iov_iter *to,
> - int noblock, struct sk_buff *skb)
> + int noblock, void *ptr)
> {
> ssize_t ret;
> int err;
> @@ -1990,23 +2116,31 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
> tun_debug(KERN_INFO, tun, "tun_do_read\n");
>
> if (!iov_iter_count(to)) {
> - if (skb)
> - kfree_skb(skb);
> + tun_ptr_free(ptr);
> return 0;
> }
>
> - if (!skb) {
> + if (!ptr) {
> /* Read frames from ring */
> - skb = tun_ring_recv(tfile, noblock, &err);
> - if (!skb)
> + ptr = tun_ring_recv(tfile, noblock, &err);
> + if (!ptr)
> return err;
> }
>
> - ret = tun_put_user(tun, tfile, skb, to);
> - if (unlikely(ret < 0))
> - kfree_skb(skb);
> - else
> - consume_skb(skb);
> + if (tun_is_xdp_buff(ptr)) {
> + struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
> +
> + ret = tun_put_user_xdp(tun, tfile, xdp, to);
> + put_page(virt_to_head_page(xdp->data));
> + } else {
> + struct sk_buff *skb = ptr;
> +
> + ret = tun_put_user(tun, tfile, skb, to);
> + if (unlikely(ret < 0))
> + kfree_skb(skb);
> + else
> + consume_skb(skb);
> + }
>
> return ret;
> }
> @@ -2143,12 +2277,12 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
> {
> struct tun_file *tfile = container_of(sock, struct tun_file, socket);
> struct tun_struct *tun = tun_get(tfile);
> - struct sk_buff *skb = m->msg_control;
> + void *ptr = m->msg_control;
> int ret;
>
> if (!tun) {
> ret = -EBADFD;
> - goto out_free_skb;
> + goto out_free;
> }
>
> if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
> @@ -2160,7 +2294,7 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
> SOL_PACKET, TUN_TX_TIMESTAMP);
> goto out;
> }
> - ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, skb);
> + ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr);
> if (ret > (ssize_t)total_len) {
> m->msg_flags |= MSG_TRUNC;
> ret = flags & MSG_TRUNC ? ret : total_len;
> @@ -2171,12 +2305,25 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
>
> out_put_tun:
> tun_put(tun);
> -out_free_skb:
> - if (skb)
> - kfree_skb(skb);
> +out_free:
> + tun_ptr_free(ptr);
> return ret;
> }
>
> +static int tun_ptr_peek_len(void *ptr)
> +{
> + if (likely(ptr)) {
> + if (tun_is_xdp_buff(ptr)) {
> + struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
> +
> + return xdp->data_end - xdp->data;
> + }
> + return __skb_array_len_with_tag(ptr);
> + } else {
> + return 0;
> + }
> +}
> +
> static int tun_peek_len(struct socket *sock)
> {
> struct tun_file *tfile = container_of(sock, struct tun_file, socket);
> @@ -2187,7 +2334,7 @@ static int tun_peek_len(struct socket *sock)
> if (!tun)
> return 0;
>
> - ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, __skb_array_len_with_tag);
> + ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len);
> tun_put(tun);
>
> return ret;
> @@ -3110,7 +3257,7 @@ static int tun_queue_resize(struct tun_struct *tun)
>
> ret = ptr_ring_resize_multiple(rings, n,
> dev->tx_queue_len, GFP_KERNEL,
> - __skb_array_destroy_skb);
> + tun_ptr_free);
>
> kfree(rings);
> return ret;
> diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
> index c316555..a5a1db6 100644
> --- a/drivers/vhost/net.c
> +++ b/drivers/vhost/net.c
> @@ -175,6 +175,17 @@ static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq)
> }
> }
>
> +static int vhost_net_buf_peek_len(void *ptr)
> +{
> + if (tun_is_xdp_buff(ptr)) {
> + struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
> +
> + return xdp->data_end - xdp->data;
> + }
> +
> + return __skb_array_len_with_tag(ptr);
> +}
> +
> static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq)
> {
> struct vhost_net_buf *rxq = &nvq->rxq;
> @@ -186,7 +197,7 @@ static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq)
> return 0;
>
> out:
> - return __skb_array_len_with_tag(vhost_net_buf_get_ptr(rxq));
> + return vhost_net_buf_peek_len(vhost_net_buf_get_ptr(rxq));
> }
>
> static void vhost_net_buf_init(struct vhost_net_buf *rxq)
> diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h
> index bdee9b8..08e6682 100644
> --- a/include/linux/if_tun.h
> +++ b/include/linux/if_tun.h
> @@ -17,9 +17,14 @@
>
> #include <uapi/linux/if_tun.h>
>
> +#define TUN_XDP_FLAG 0x1UL
> +
> #if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE)
> struct socket *tun_get_socket(struct file *);
> struct ptr_ring *tun_get_tx_ring(struct file *file);
> +bool tun_is_xdp_buff(void *ptr);
> +void *tun_xdp_to_ptr(void *ptr);
> +void *tun_ptr_to_xdp(void *ptr);
> #else
> #include <linux/err.h>
> #include <linux/errno.h>
> @@ -33,5 +38,17 @@ static inline struct ptr_ring *tun_get_tx_ring(struct file *f)
> {
> return ERR_PTR(-EINVAL);
> }
> +static inline bool tun_is_xdp_buff(void *ptr)
> +{
> + return false;
> +}
> +void *tun_xdp_to_ptr(void *ptr)
> +{
> + return NULL;
> +}
> +void *tun_ptr_to_xdp(void *ptr)
> +{
> + return NULL;
> +}
> #endif /* CONFIG_TUN */
> #endif /* __IF_TUN_H */
>

2018-01-11 02:35:22

by Jason Wang

[permalink] [raw]
Subject: Re: [PATCH net-next V2 2/2] tuntap: XDP transmission



On 2018年01月11日 00:11, Tariq Toukan wrote:
>> +EXPORT_SYMBOL(tun_ptr_to_xdp);
>> +
>
> Hi Jason,
> I started getting the following compilation issues.
>
> + make -j24 -s
> net/socket.o: In function `tun_xdp_to_ptr':
> /images/autom/buildbot/worker/merge-net-next/build/./include/linux/if_tun.h:46:
> multiple definition of `tun_xdp_to_ptr'
> fs/compat_ioctl.o:/images/autom/buildbot/worker/merge-net-next/build/./include/linux/if_tun.h:46:
> first defined here
> net/socket.o: In function `tun_ptr_to_xdp':
> /images/autom/buildbot/worker/merge-net-next/build/./include/linux/if_tun.h:50:
> multiple definition of `tun_ptr_to_xdp'
> fs/compat_ioctl.o:/images/autom/buildbot/worker/merge-net-next/build/./include/linux/if_tun.h:50:
> first defined here
> make: *** [vmlinux] Error 1
>
> Seems you missed adding the following ifdef:
> #if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE)
>
> Thanks,
> Tariq

My bad and sorry for that.

It should have been fixed by
https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git/commit/?id=1125b008711581a8962ee028e2982d7757093600

Thanks