Set the backlog earlier in inet_dccp_listen() and inet_listen(),
then we can avoid the redundant setting.
Signed-off-by: Yafang Shao <[email protected]>
---
net/dccp/proto.c | 2 +-
net/ipv4/af_inet.c | 2 +-
net/ipv4/inet_connection_sock.c | 1 -
3 files changed, 2 insertions(+), 3 deletions(-)
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 43733ac..658cd32b 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -948,6 +948,7 @@ int inet_dccp_listen(struct socket *sock, int backlog)
if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN)))
goto out;
+ sk->sk_max_ack_backlog = backlog;
/* Really, if the socket is already in listen state
* we can only allow the backlog to be adjusted.
*/
@@ -960,7 +961,6 @@ int inet_dccp_listen(struct socket *sock, int backlog)
if (err)
goto out;
}
- sk->sk_max_ack_backlog = backlog;
err = 0;
out:
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 1fbe2f8..39066cd 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -208,6 +208,7 @@ int inet_listen(struct socket *sock, int backlog)
if (!((1 << old_state) & (TCPF_CLOSE | TCPF_LISTEN)))
goto out;
+ sk->sk_max_ack_backlog = backlog;
/* Really, if the socket is already in listen state
* we can only allow the backlog to be adjusted.
*/
@@ -231,7 +232,6 @@ int inet_listen(struct socket *sock, int backlog)
goto out;
tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_LISTEN_CB, 0, NULL);
}
- sk->sk_max_ack_backlog = backlog;
err = 0;
out:
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 15e7f79..860e22a 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -874,7 +874,6 @@ int inet_csk_listen_start(struct sock *sk, int backlog)
reqsk_queue_alloc(&icsk->icsk_accept_queue);
- sk->sk_max_ack_backlog = backlog;
sk->sk_ack_backlog = 0;
inet_csk_delack_init(sk);
--
1.8.3.1
Bitwise operation is a little faster.
So I replace after() with (flag & FLAG_SND_UNA_ADVANCED) as this flag is
already set before.
Cc: Joe Perches <[email protected]>
Signed-off-by: Yafang Shao <[email protected]>
---
net/ipv4/tcp_input.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 2868ef2..0167015 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3610,7 +3610,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
if (flag & FLAG_UPDATE_TS_RECENT)
tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
- if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) {
+ if (!(flag & FLAG_SLOWPATH) && (flag & FLAG_SND_UNA_ADVANCED)) {
/* Window is constant, pure forward advance.
* No more checks are required.
* Note, we use the fact that SND.UNA>=SND.WL2.
--
1.8.3.1
On 11/07/2018 03:20 AM, Yafang Shao wrote:
> Bitwise operation is a little faster.
> So I replace after() with (flag & FLAG_SND_UNA_ADVANCED) as this flag is
> already set before.
>
> Cc: Joe Perches <[email protected]>
> Signed-off-by: Yafang Shao <[email protected]>
> ---
> net/ipv4/tcp_input.c | 2 +-
> 1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
> index 2868ef2..0167015 100644
> --- a/net/ipv4/tcp_input.c
> +++ b/net/ipv4/tcp_input.c
> @@ -3610,7 +3610,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
> if (flag & FLAG_UPDATE_TS_RECENT)
> tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
>
> - if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) {
> + if (!(flag & FLAG_SLOWPATH) && (flag & FLAG_SND_UNA_ADVANCED)) {
> /* Window is constant, pure forward advance.
> * No more checks are required.
> * Note, we use the fact that SND.UNA>=SND.WL2.
>
What about reducing this to a single conditional jump ?
if ((flag & (FLAG_SLOWPATH | FLAG_SND_UNA_ADVANCED)) == FLAG_SND_UNA_ADVANCED) {
On 11/07/2018 03:20 AM, Yafang Shao wrote:
> Set the backlog earlier in inet_dccp_listen() and inet_listen(),
> then we can avoid the redundant setting.
>
> Signed-off-by: Yafang Shao <[email protected]>
> ---
>
Reviewed-by: Eric Dumazet <[email protected]>
On Wed, Nov 7, 2018 at 11:16 PM Eric Dumazet <[email protected]> wrote:
>
>
>
> On 11/07/2018 03:20 AM, Yafang Shao wrote:
> > Bitwise operation is a little faster.
>
>
> > So I replace after() with (flag & FLAG_SND_UNA_ADVANCED) as this flag is
> > already set before.
> >
> > Cc: Joe Perches <[email protected]>
> > Signed-off-by: Yafang Shao <[email protected]>
> > ---
> > net/ipv4/tcp_input.c | 2 +-
> > 1 file changed, 1 insertion(+), 1 deletion(-)
> >
> > diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
> > index 2868ef2..0167015 100644
> > --- a/net/ipv4/tcp_input.c
> > +++ b/net/ipv4/tcp_input.c
> > @@ -3610,7 +3610,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
> > if (flag & FLAG_UPDATE_TS_RECENT)
> > tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
> >
> > - if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) {
> > + if (!(flag & FLAG_SLOWPATH) && (flag & FLAG_SND_UNA_ADVANCED)) {
> > /* Window is constant, pure forward advance.
> > * No more checks are required.
> > * Note, we use the fact that SND.UNA>=SND.WL2.
> >
>
> What about reducing this to a single conditional jump ?
>
> if ((flag & (FLAG_SLOWPATH | FLAG_SND_UNA_ADVANCED)) == FLAG_SND_UNA_ADVANCED) {
>
That's better.
Will change it.
Thanks
Yafang
From: Yafang Shao <[email protected]>
Date: Wed, 7 Nov 2018 19:20:16 +0800
> Set the backlog earlier in inet_dccp_listen() and inet_listen(),
> then we can avoid the redundant setting.
>
> Signed-off-by: Yafang Shao <[email protected]>
Applied.