2023-07-01 06:54:21

by Arseniy Krasnov

[permalink] [raw]
Subject: [RFC PATCH v5 01/17] vsock/virtio: read data from non-linear skb

This is preparation patch for non-linear skbuff handling. It replaces
direct calls of 'memcpy_to_msg()' with 'skb_copy_datagram_iter()'. Main
advantage of the second one is that is can handle paged part of the skb
by using 'kmap()' on each page, but if there are no pages in the skb,
it behaves like simple copying to iov iterator. This patch also adds
new field to the control block of skb - this value shows current offset
in the skb to read next portion of data (it doesn't matter linear it or
not). Idea is that 'skb_copy_datagram_iter()' handles both types of
skb internally - it just needs an offset from which to copy data from
the given skb. This offset is incremented on each read from skb. This
approach allows to avoid special handling of non-linear skbs:
1) We can't call 'skb_pull()' on it, because it updates 'data' pointer.
2) We need to update 'data_len' also on each read from this skb.

Signed-off-by: Arseniy Krasnov <[email protected]>
---
Changelog:
v4 -> v5:
* Use local variable for 'frag_off' in stream dequeue calback.
* R-b from Bobby Eshleman removed due to patch update.

include/linux/virtio_vsock.h | 1 +
net/vmw_vsock/virtio_transport_common.c | 30 ++++++++++++++++++-------
2 files changed, 23 insertions(+), 8 deletions(-)

diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
index c58453699ee9..17dbb7176e37 100644
--- a/include/linux/virtio_vsock.h
+++ b/include/linux/virtio_vsock.h
@@ -12,6 +12,7 @@
struct virtio_vsock_skb_cb {
bool reply;
bool tap_delivered;
+ u32 frag_off;
};

#define VIRTIO_VSOCK_SKB_CB(skb) ((struct virtio_vsock_skb_cb *)((skb)->cb))
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index b769fc258931..e5683af23e60 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -355,7 +355,7 @@ virtio_transport_stream_do_peek(struct vsock_sock *vsk,
spin_lock_bh(&vvs->rx_lock);

skb_queue_walk_safe(&vvs->rx_queue, skb, tmp) {
- off = 0;
+ off = VIRTIO_VSOCK_SKB_CB(skb)->frag_off;

if (total == len)
break;
@@ -370,7 +370,10 @@ virtio_transport_stream_do_peek(struct vsock_sock *vsk,
*/
spin_unlock_bh(&vvs->rx_lock);

- err = memcpy_to_msg(msg, skb->data + off, bytes);
+ err = skb_copy_datagram_iter(skb, off,
+ &msg->msg_iter,
+ bytes);
+
if (err)
goto out;

@@ -411,27 +414,35 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
}

while (total < len && !skb_queue_empty(&vvs->rx_queue)) {
+ u32 skb_rest_len;
+
skb = skb_peek(&vvs->rx_queue);

bytes = len - total;
- if (bytes > skb->len)
- bytes = skb->len;
+ skb_rest_len = skb->len - VIRTIO_VSOCK_SKB_CB(skb)->frag_off;
+
+ if (bytes > skb_rest_len)
+ bytes = skb_rest_len;

/* sk_lock is held by caller so no one else can dequeue.
* Unlock rx_lock since memcpy_to_msg() may sleep.
*/
spin_unlock_bh(&vvs->rx_lock);

- err = memcpy_to_msg(msg, skb->data, bytes);
+ err = skb_copy_datagram_iter(skb,
+ VIRTIO_VSOCK_SKB_CB(skb)->frag_off,
+ &msg->msg_iter, bytes);
+
if (err)
goto out;

spin_lock_bh(&vvs->rx_lock);

total += bytes;
- skb_pull(skb, bytes);

- if (skb->len == 0) {
+ VIRTIO_VSOCK_SKB_CB(skb)->frag_off += bytes;
+
+ if (skb->len == VIRTIO_VSOCK_SKB_CB(skb)->frag_off) {
u32 pkt_len = le32_to_cpu(virtio_vsock_hdr(skb)->len);

virtio_transport_dec_rx_pkt(vvs, pkt_len);
@@ -503,7 +514,10 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
*/
spin_unlock_bh(&vvs->rx_lock);

- err = memcpy_to_msg(msg, skb->data, bytes_to_copy);
+ err = skb_copy_datagram_iter(skb, 0,
+ &msg->msg_iter,
+ bytes_to_copy);
+
if (err) {
/* Copy of message failed. Rest of
* fragments will be freed without copy.
--
2.25.1



2023-07-06 17:29:19

by Stefano Garzarella

[permalink] [raw]
Subject: Re: [RFC PATCH v5 01/17] vsock/virtio: read data from non-linear skb

On Sat, Jul 01, 2023 at 09:39:31AM +0300, Arseniy Krasnov wrote:
>This is preparation patch for non-linear skbuff handling. It replaces
>direct calls of 'memcpy_to_msg()' with 'skb_copy_datagram_iter()'. Main
>advantage of the second one is that is can handle paged part of the skb

s/is that is/is that it/

>by using 'kmap()' on each page, but if there are no pages in the skb,
>it behaves like simple copying to iov iterator. This patch also adds
>new field to the control block of skb - this value shows current offset
>in the skb to read next portion of data (it doesn't matter linear it or
>not). Idea is that 'skb_copy_datagram_iter()' handles both types of
>skb internally - it just needs an offset from which to copy data from
>the given skb. This offset is incremented on each read from skb. This
>approach allows to avoid special handling of non-linear skbs:
>1) We can't call 'skb_pull()' on it, because it updates 'data' pointer.
>2) We need to update 'data_len' also on each read from this skb.

I would mention that this change is in preparation of zero-copy support.

>
>Signed-off-by: Arseniy Krasnov <[email protected]>
>---
> Changelog:
> v4 -> v5:
> * Use local variable for 'frag_off' in stream dequeue calback.
> * R-b from Bobby Eshleman removed due to patch update.
>
> include/linux/virtio_vsock.h | 1 +
> net/vmw_vsock/virtio_transport_common.c | 30 ++++++++++++++++++-------
> 2 files changed, 23 insertions(+), 8 deletions(-)
>
>diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
>index c58453699ee9..17dbb7176e37 100644
>--- a/include/linux/virtio_vsock.h
>+++ b/include/linux/virtio_vsock.h
>@@ -12,6 +12,7 @@
> struct virtio_vsock_skb_cb {
> bool reply;
> bool tap_delivered;
>+ u32 frag_off;
> };
>
> #define VIRTIO_VSOCK_SKB_CB(skb) ((struct virtio_vsock_skb_cb *)((skb)->cb))
>diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
>index b769fc258931..e5683af23e60 100644
>--- a/net/vmw_vsock/virtio_transport_common.c
>+++ b/net/vmw_vsock/virtio_transport_common.c
>@@ -355,7 +355,7 @@ virtio_transport_stream_do_peek(struct vsock_sock *vsk,
> spin_lock_bh(&vvs->rx_lock);
>
> skb_queue_walk_safe(&vvs->rx_queue, skb, tmp) {
>- off = 0;
>+ off = VIRTIO_VSOCK_SKB_CB(skb)->frag_off;
>
> if (total == len)
> break;
>@@ -370,7 +370,10 @@ virtio_transport_stream_do_peek(struct vsock_sock *vsk,
> */
> spin_unlock_bh(&vvs->rx_lock);
>
>- err = memcpy_to_msg(msg, skb->data + off, bytes);
>+ err = skb_copy_datagram_iter(skb, off,
>+ &msg->msg_iter,
>+ bytes);
>+
> if (err)
> goto out;
>
>@@ -411,27 +414,35 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
> }
>
> while (total < len && !skb_queue_empty(&vvs->rx_queue)) {
>+ u32 skb_rest_len;
>+
> skb = skb_peek(&vvs->rx_queue);
>
> bytes = len - total;
>- if (bytes > skb->len)
>- bytes = skb->len;
>+ skb_rest_len = skb->len - VIRTIO_VSOCK_SKB_CB(skb)->frag_off;
>+
>+ if (bytes > skb_rest_len)
>+ bytes = skb_rest_len;

What about just:
bytes = min_t(size_t, len - total,
skb->len - VIRTIO_VSOCK_SKB_CB(skb)->frag_off);

The rest LGTM!

Stefano

>
> /* sk_lock is held by caller so no one else can dequeue.
> * Unlock rx_lock since memcpy_to_msg() may sleep.
> */
> spin_unlock_bh(&vvs->rx_lock);
>
>- err = memcpy_to_msg(msg, skb->data, bytes);
>+ err = skb_copy_datagram_iter(skb,
>+ VIRTIO_VSOCK_SKB_CB(skb)->frag_off,
>+ &msg->msg_iter, bytes);
>+
> if (err)
> goto out;
>
> spin_lock_bh(&vvs->rx_lock);
>
> total += bytes;
>- skb_pull(skb, bytes);
>
>- if (skb->len == 0) {
>+ VIRTIO_VSOCK_SKB_CB(skb)->frag_off += bytes;
>+
>+ if (skb->len == VIRTIO_VSOCK_SKB_CB(skb)->frag_off) {
> u32 pkt_len = le32_to_cpu(virtio_vsock_hdr(skb)->len);
>
> virtio_transport_dec_rx_pkt(vvs, pkt_len);
>@@ -503,7 +514,10 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
> */
> spin_unlock_bh(&vvs->rx_lock);
>
>- err = memcpy_to_msg(msg, skb->data, bytes_to_copy);
>+ err = skb_copy_datagram_iter(skb, 0,
>+ &msg->msg_iter,
>+ bytes_to_copy);
>+
> if (err) {
> /* Copy of message failed. Rest of
> * fragments will be freed without copy.
>--
>2.25.1
>