V2:
#2 patch made some changes following magnus' opinions.
V3:
Regarding the function xskq_cons_present_entries, I think daniel are right,
I have modified it.
Xuan Zhuo (2):
xsk: replace datagram_poll by sock_poll_wait
xsk: change the tx writeable condition
net/xdp/xsk.c | 20 ++++++++++++++++----
net/xdp/xsk_queue.h | 6 ++++++
2 files changed, 22 insertions(+), 4 deletions(-)
--
1.8.3.1
Modify the tx writeable condition from the queue is not full to the
number of present tx queues is less than the half of the total number
of queues. Because the tx queue not full is a very short time, this will
cause a large number of EPOLLOUT events, and cause a large number of
process wake up.
Signed-off-by: Xuan Zhuo <[email protected]>
Acked-by: Magnus Karlsson <[email protected]>
---
net/xdp/xsk.c | 16 +++++++++++++---
net/xdp/xsk_queue.h | 6 ++++++
2 files changed, 19 insertions(+), 3 deletions(-)
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 9bbfd8a..6250447 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -211,6 +211,14 @@ static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len,
return 0;
}
+static bool xsk_tx_writeable(struct xdp_sock *xs)
+{
+ if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2)
+ return false;
+
+ return true;
+}
+
static bool xsk_is_bound(struct xdp_sock *xs)
{
if (READ_ONCE(xs->state) == XSK_BOUND) {
@@ -296,7 +304,8 @@ void xsk_tx_release(struct xsk_buff_pool *pool)
rcu_read_lock();
list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
__xskq_cons_release(xs->tx);
- xs->sk.sk_write_space(&xs->sk);
+ if (xsk_tx_writeable(xs))
+ xs->sk.sk_write_space(&xs->sk);
}
rcu_read_unlock();
}
@@ -436,7 +445,8 @@ static int xsk_generic_xmit(struct sock *sk)
out:
if (sent_frame)
- sk->sk_write_space(sk);
+ if (xsk_tx_writeable(xs))
+ sk->sk_write_space(sk);
mutex_unlock(&xs->mutex);
return err;
@@ -493,7 +503,7 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
if (xs->rx && !xskq_prod_is_empty(xs->rx))
mask |= EPOLLIN | EPOLLRDNORM;
- if (xs->tx && !xskq_cons_is_full(xs->tx))
+ if (xs->tx && xsk_tx_writeable(xs))
mask |= EPOLLOUT | EPOLLWRNORM;
return mask;
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index cdb9cf3..9e71b9f 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -264,6 +264,12 @@ static inline bool xskq_cons_is_full(struct xsk_queue *q)
q->nentries;
}
+static inline u32 xskq_cons_present_entries(struct xsk_queue *q)
+{
+ /* No barriers needed since data is not accessed */
+ return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer);
+}
+
/* Functions for producers */
static inline bool xskq_prod_is_full(struct xsk_queue *q)
--
1.8.3.1
On Tue, Dec 1, 2020 at 2:59 PM Xuan Zhuo <[email protected]> wrote:
>
> Modify the tx writeable condition from the queue is not full to the
> number of present tx queues is less than the half of the total number
> of queues. Because the tx queue not full is a very short time, this will
> cause a large number of EPOLLOUT events, and cause a large number of
> process wake up.
And the Fixes label here should be:
Fixes: 35fcde7f8deb ("xsk: support for Tx")
> Signed-off-by: Xuan Zhuo <[email protected]>
> Acked-by: Magnus Karlsson <[email protected]>
> ---
> net/xdp/xsk.c | 16 +++++++++++++---
> net/xdp/xsk_queue.h | 6 ++++++
> 2 files changed, 19 insertions(+), 3 deletions(-)
>
> diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
> index 9bbfd8a..6250447 100644
> --- a/net/xdp/xsk.c
> +++ b/net/xdp/xsk.c
> @@ -211,6 +211,14 @@ static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len,
> return 0;
> }
>
> +static bool xsk_tx_writeable(struct xdp_sock *xs)
> +{
> + if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2)
> + return false;
> +
> + return true;
> +}
> +
> static bool xsk_is_bound(struct xdp_sock *xs)
> {
> if (READ_ONCE(xs->state) == XSK_BOUND) {
> @@ -296,7 +304,8 @@ void xsk_tx_release(struct xsk_buff_pool *pool)
> rcu_read_lock();
> list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
> __xskq_cons_release(xs->tx);
> - xs->sk.sk_write_space(&xs->sk);
> + if (xsk_tx_writeable(xs))
> + xs->sk.sk_write_space(&xs->sk);
> }
> rcu_read_unlock();
> }
> @@ -436,7 +445,8 @@ static int xsk_generic_xmit(struct sock *sk)
>
> out:
> if (sent_frame)
> - sk->sk_write_space(sk);
> + if (xsk_tx_writeable(xs))
> + sk->sk_write_space(sk);
>
> mutex_unlock(&xs->mutex);
> return err;
> @@ -493,7 +503,7 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
>
> if (xs->rx && !xskq_prod_is_empty(xs->rx))
> mask |= EPOLLIN | EPOLLRDNORM;
> - if (xs->tx && !xskq_cons_is_full(xs->tx))
> + if (xs->tx && xsk_tx_writeable(xs))
> mask |= EPOLLOUT | EPOLLWRNORM;
>
> return mask;
> diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
> index cdb9cf3..9e71b9f 100644
> --- a/net/xdp/xsk_queue.h
> +++ b/net/xdp/xsk_queue.h
> @@ -264,6 +264,12 @@ static inline bool xskq_cons_is_full(struct xsk_queue *q)
> q->nentries;
> }
>
> +static inline u32 xskq_cons_present_entries(struct xsk_queue *q)
> +{
> + /* No barriers needed since data is not accessed */
> + return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer);
> +}
> +
> /* Functions for producers */
>
> static inline bool xskq_prod_is_full(struct xsk_queue *q)
> --
> 1.8.3.1
>
Hello:
This series was applied to bpf/bpf.git (refs/heads/master):
On Tue, 1 Dec 2020 21:56:56 +0800 you wrote:
> V2:
> #2 patch made some changes following magnus' opinions.
>
> V3:
> Regarding the function xskq_cons_present_entries, I think daniel are right,
> I have modified it.
>
> [...]
Here is the summary with links:
- [bpf,V3,1/2] xsk: replace datagram_poll by sock_poll_wait
https://git.kernel.org/bpf/bpf/c/f5da54187e33
- [bpf,V3,2/2] xsk: change the tx writeable condition
https://git.kernel.org/bpf/bpf/c/3413f04141aa
You are awesome, thank you!
--
Deet-doot-dot, I am a bot.
https://korg.docs.kernel.org/patchwork/pwbot.html