2023-05-31 17:41:56

by Jon Kohler

[permalink] [raw]
Subject: [PATCH] flow_dissector: introduce skb_get_hash_symmetric()

tun.c changed from skb_get_hash() to __skb_get_hash_symmetric() on
commit feec084a7cf4 ("tun: use symmetric hash"), which exposes an
overhead for OVS datapath, where ovs_dp_process_packet() has to
calculate the hash again because __skb_get_hash_symmetric() does not
retain the hash that it calculates.

Introduce skb_get_hash_symmetric(), which will get and save the hash
in one go, so that calcuation work does not go to waste, and plumb it
into tun.c.

Fixes: feec084a7cf4 ("tun: use symmetric hash")
Signed-off-by: Jon Kohler <[email protected]>
CC: Jason Wang <[email protected]>
CC: David S. Miller <[email protected]>
---
drivers/net/tun.c | 8 ++++----
include/linux/skbuff.h | 1 +
net/core/flow_dissector.c | 29 +++++++++++++++++++++++++++++
3 files changed, 34 insertions(+), 4 deletions(-)

diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index d75456adc62a..27e9be434593 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -528,7 +528,7 @@ static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)

numqueues = READ_ONCE(tun->numqueues);

- txq = __skb_get_hash_symmetric(skb);
+ txq = skb_get_hash_symmetric(skb);
e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
if (e) {
tun_flow_save_rps_rxhash(e, txq);
@@ -1046,7 +1046,7 @@ static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
struct tun_flow_entry *e;
__u32 rxhash;

- rxhash = __skb_get_hash_symmetric(skb);
+ rxhash = skb_get_hash_symmetric(skb);
e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash);
if (e)
tun_flow_save_rps_rxhash(e, rxhash);
@@ -1933,7 +1933,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
*/
if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 &&
!tfile->detached)
- rxhash = __skb_get_hash_symmetric(skb);
+ rxhash = skb_get_hash_symmetric(skb);

rcu_read_lock();
if (unlikely(!(tun->dev->flags & IFF_UP))) {
@@ -2515,7 +2515,7 @@ static int tun_xdp_one(struct tun_struct *tun,

if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 &&
!tfile->detached)
- rxhash = __skb_get_hash_symmetric(skb);
+ rxhash = skb_get_hash_symmetric(skb);

if (tfile->napi_enabled) {
queue = &tfile->sk.sk_write_queue;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 0b40417457cd..8112b1ab5735 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1474,6 +1474,7 @@ __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)

void __skb_get_hash(struct sk_buff *skb);
u32 __skb_get_hash_symmetric(const struct sk_buff *skb);
+u32 skb_get_hash_symmetric(struct sk_buff *skb);
u32 skb_get_poff(const struct sk_buff *skb);
u32 __skb_get_poff(const struct sk_buff *skb, const void *data,
const struct flow_keys_basic *keys, int hlen);
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 25fb0bbc310f..d8c0e804bbfe 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -1747,6 +1747,35 @@ u32 __skb_get_hash_symmetric(const struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);

+/**
+ * skb_get_hash_symmetric: calculate and set a flow hash in @skb, using
+ * flow_keys_dissector_symmetric.
+ * @skb: sk_buff to calculate flow hash from
+ *
+ * This function is similar to __skb_get_hash_symmetric except that it
+ * retains the hash within the skb, such that it can be reused without
+ * being recalculated later.
+ */
+u32 skb_get_hash_symmetric(struct sk_buff *skb)
+{
+ struct flow_keys keys;
+ u32 hash;
+
+ __flow_hash_secret_init();
+
+ memset(&keys, 0, sizeof(keys));
+ __skb_flow_dissect(NULL, skb, &flow_keys_dissector_symmetric,
+ &keys, NULL, 0, 0, 0,
+ FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
+
+ hash = __flow_hash_from_keys(&keys, &hashrnd);
+
+ __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
+
+ return hash;
+}
+EXPORT_SYMBOL_GPL(skb_get_hash_symmetric);
+
/**
* __skb_get_hash: calculate a flow hash
* @skb: sk_buff to calculate flow hash from
--
2.30.1 (Apple Git-130)



2023-05-31 18:03:48

by Jon Kohler

[permalink] [raw]
Subject: Re: [PATCH] flow_dissector: introduce skb_get_hash_symmetric()



> On May 31, 2023, at 1:33 PM, Eric Dumazet <[email protected]> wrote:
>
> On Wed, May 31, 2023 at 7:22 PM Jon Kohler <[email protected]> wrote:
>>
>> tun.c changed from skb_get_hash() to __skb_get_hash_symmetric() on
>> commit feec084a7cf4 ("tun: use symmetric hash"), which exposes an
>> overhead for OVS datapath, where ovs_dp_process_packet() has to
>> calculate the hash again because __skb_get_hash_symmetric() does not
>> retain the hash that it calculates.
>>
>> Introduce skb_get_hash_symmetric(), which will get and save the hash
>> in one go, so that calcuation work does not go to waste, and plumb it
>> into tun.c.
>>
>> Fixes: feec084a7cf4 ("tun: use symmetric hash")
>
>
>> Signed-off-by: Jon Kohler <[email protected]>
>> CC: Jason Wang <[email protected]>
>> CC: David S. Miller <[email protected]>
>> ---
>>
>
>> diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
>> index 0b40417457cd..8112b1ab5735 100644
>> --- a/include/linux/skbuff.h
>> +++ b/include/linux/skbuff.h
>> @@ -1474,6 +1474,7 @@ __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
>>
>> void __skb_get_hash(struct sk_buff *skb);
>> u32 __skb_get_hash_symmetric(const struct sk_buff *skb);
>> +u32 skb_get_hash_symmetric(struct sk_buff *skb);
>> u32 skb_get_poff(const struct sk_buff *skb);
>> u32 __skb_get_poff(const struct sk_buff *skb, const void *data,
>> const struct flow_keys_basic *keys, int hlen);
>> diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
>> index 25fb0bbc310f..d8c0e804bbfe 100644
>> --- a/net/core/flow_dissector.c
>> +++ b/net/core/flow_dissector.c
>> @@ -1747,6 +1747,35 @@ u32 __skb_get_hash_symmetric(const struct sk_buff *skb)
>> }
>> EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
>>
>> +/**
>> + * skb_get_hash_symmetric: calculate and set a flow hash in @skb, using
>> + * flow_keys_dissector_symmetric.
>> + * @skb: sk_buff to calculate flow hash from
>> + *
>> + * This function is similar to __skb_get_hash_symmetric except that it
>> + * retains the hash within the skb, such that it can be reused without
>> + * being recalculated later.
>> + */
>> +u32 skb_get_hash_symmetric(struct sk_buff *skb)
>> +{
>> + struct flow_keys keys;
>> + u32 hash;
>> +
>> + __flow_hash_secret_init();
>> +
>> + memset(&keys, 0, sizeof(keys));
>> + __skb_flow_dissect(NULL, skb, &flow_keys_dissector_symmetric,
>> + &keys, NULL, 0, 0, 0,
>> + FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
>> +
>> + hash = __flow_hash_from_keys(&keys, &hashrnd);
>> +
>> + __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
>> +
>> + return hash;
>> +}
>> +EXPORT_SYMBOL_GPL(skb_get_hash_symmetric);
>> +
>
> Why copy/pasting __skb_get_hash_symmetric() ?
>
> Can you reuse it ?

Not directly, because to use __skb_set_sw_hash requires struct flow_keys
when using flow_keys_have_l4(). __skb_get_hash_symmetric() does not
take or return that struct, so we’d either have to refactor that (and its callers)
or introduce yet another function and consolidate down to that “new one”.

I played around with that exact thought by taking the functional guts out of
__skb_get_hash_symmetric, making it a new static function, plumbing that
into __skb_get_hash_symmetric and this new skb_get_hash_symmetric, but
the LOC churn was basically the same and it felt a bit worse than just a
copy/paste.

Alternatively, if it turned out that flow_keys_have_l4() wasn’t important, we
Could simply set that to false and then reuse __skb_get_hash_symmetric
in a trivial manner. I couldn’t quite figure out if L4 flag was necessary, so I
went the safe(maybe?) route and copy/paste instead.

Happy to take suggestions either way!

2023-05-31 18:10:38

by Eric Dumazet

[permalink] [raw]
Subject: Re: [PATCH] flow_dissector: introduce skb_get_hash_symmetric()

On Wed, May 31, 2023 at 7:22 PM Jon Kohler <[email protected]> wrote:
>
> tun.c changed from skb_get_hash() to __skb_get_hash_symmetric() on
> commit feec084a7cf4 ("tun: use symmetric hash"), which exposes an
> overhead for OVS datapath, where ovs_dp_process_packet() has to
> calculate the hash again because __skb_get_hash_symmetric() does not
> retain the hash that it calculates.
>
> Introduce skb_get_hash_symmetric(), which will get and save the hash
> in one go, so that calcuation work does not go to waste, and plumb it
> into tun.c.
>
> Fixes: feec084a7cf4 ("tun: use symmetric hash")


> Signed-off-by: Jon Kohler <[email protected]>
> CC: Jason Wang <[email protected]>
> CC: David S. Miller <[email protected]>
> ---
>

> diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
> index 0b40417457cd..8112b1ab5735 100644
> --- a/include/linux/skbuff.h
> +++ b/include/linux/skbuff.h
> @@ -1474,6 +1474,7 @@ __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
>
> void __skb_get_hash(struct sk_buff *skb);
> u32 __skb_get_hash_symmetric(const struct sk_buff *skb);
> +u32 skb_get_hash_symmetric(struct sk_buff *skb);
> u32 skb_get_poff(const struct sk_buff *skb);
> u32 __skb_get_poff(const struct sk_buff *skb, const void *data,
> const struct flow_keys_basic *keys, int hlen);
> diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
> index 25fb0bbc310f..d8c0e804bbfe 100644
> --- a/net/core/flow_dissector.c
> +++ b/net/core/flow_dissector.c
> @@ -1747,6 +1747,35 @@ u32 __skb_get_hash_symmetric(const struct sk_buff *skb)
> }
> EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
>
> +/**
> + * skb_get_hash_symmetric: calculate and set a flow hash in @skb, using
> + * flow_keys_dissector_symmetric.
> + * @skb: sk_buff to calculate flow hash from
> + *
> + * This function is similar to __skb_get_hash_symmetric except that it
> + * retains the hash within the skb, such that it can be reused without
> + * being recalculated later.
> + */
> +u32 skb_get_hash_symmetric(struct sk_buff *skb)
> +{
> + struct flow_keys keys;
> + u32 hash;
> +
> + __flow_hash_secret_init();
> +
> + memset(&keys, 0, sizeof(keys));
> + __skb_flow_dissect(NULL, skb, &flow_keys_dissector_symmetric,
> + &keys, NULL, 0, 0, 0,
> + FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
> +
> + hash = __flow_hash_from_keys(&keys, &hashrnd);
> +
> + __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
> +
> + return hash;
> +}
> +EXPORT_SYMBOL_GPL(skb_get_hash_symmetric);
> +

Why copy/pasting __skb_get_hash_symmetric() ?

Can you reuse it ?

2023-05-31 18:31:13

by Jon Kohler

[permalink] [raw]
Subject: Re: [PATCH] flow_dissector: introduce skb_get_hash_symmetric()



> On May 31, 2023, at 2:00 PM, Eric Dumazet <[email protected]> wrote:
>
> On Wed, May 31, 2023 at 7:47 PM Jon Kohler <[email protected]> wrote:
>>
>>
>>
>>> On May 31, 2023, at 1:33 PM, Eric Dumazet <[email protected]> wrote:
>>>
>>> On Wed, May 31, 2023 at 7:22 PM Jon Kohler <[email protected]> wrote:
>>>>
>>>> tun.c changed from skb_get_hash() to __skb_get_hash_symmetric() on
>>>> commit feec084a7cf4 ("tun: use symmetric hash"), which exposes an
>>>> overhead for OVS datapath, where ovs_dp_process_packet() has to
>>>> calculate the hash again because __skb_get_hash_symmetric() does not
>>>> retain the hash that it calculates.
>>>>
>>>> Introduce skb_get_hash_symmetric(), which will get and save the hash
>>>> in one go, so that calcuation work does not go to waste, and plumb it
>>>> into tun.c.
>>>>
>>>> Fixes: feec084a7cf4 ("tun: use symmetric hash")
>>>
>>>
>>>> Signed-off-by: Jon Kohler <[email protected]>
>>>> CC: Jason Wang <[email protected]>
>>>> CC: David S. Miller <[email protected]>
>>>> ---
>>>>
>>>
>>>> diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
>>>> index 0b40417457cd..8112b1ab5735 100644
>>>> --- a/include/linux/skbuff.h
>>>> +++ b/include/linux/skbuff.h
>>>> @@ -1474,6 +1474,7 @@ __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
>>>>
>>>> void __skb_get_hash(struct sk_buff *skb);
>>>> u32 __skb_get_hash_symmetric(const struct sk_buff *skb);
>>>> +u32 skb_get_hash_symmetric(struct sk_buff *skb);
>>>> u32 skb_get_poff(const struct sk_buff *skb);
>>>> u32 __skb_get_poff(const struct sk_buff *skb, const void *data,
>>>> const struct flow_keys_basic *keys, int hlen);
>>>> diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
>>>> index 25fb0bbc310f..d8c0e804bbfe 100644
>>>> --- a/net/core/flow_dissector.c
>>>> +++ b/net/core/flow_dissector.c
>>>> @@ -1747,6 +1747,35 @@ u32 __skb_get_hash_symmetric(const struct sk_buff *skb)
>>>> }
>>>> EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
>>>>
>>>> +/**
>>>> + * skb_get_hash_symmetric: calculate and set a flow hash in @skb, using
>>>> + * flow_keys_dissector_symmetric.
>>>> + * @skb: sk_buff to calculate flow hash from
>>>> + *
>>>> + * This function is similar to __skb_get_hash_symmetric except that it
>>>> + * retains the hash within the skb, such that it can be reused without
>>>> + * being recalculated later.
>>>> + */
>>>> +u32 skb_get_hash_symmetric(struct sk_buff *skb)
>>>> +{
>>>> + struct flow_keys keys;
>>>> + u32 hash;
>>>> +
>>>> + __flow_hash_secret_init();
>>>> +
>>>> + memset(&keys, 0, sizeof(keys));
>>>> + __skb_flow_dissect(NULL, skb, &flow_keys_dissector_symmetric,
>>>> + &keys, NULL, 0, 0, 0,
>>>> + FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
>>>> +
>>>> + hash = __flow_hash_from_keys(&keys, &hashrnd);
>>>> +
>>>> + __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
>>>> +
>>>> + return hash;
>>>> +}
>>>> +EXPORT_SYMBOL_GPL(skb_get_hash_symmetric);
>>>> +
>>>
>>> Why copy/pasting __skb_get_hash_symmetric() ?
>>>
>>> Can you reuse it ?
>>
>> Not directly, because to use __skb_set_sw_hash requires struct flow_keys
>> when using flow_keys_have_l4(). __skb_get_hash_symmetric() does not
>> take or return that struct, so we’d either have to refactor that (and its callers)
>> or introduce yet another function and consolidate down to that “new one”.
>>
>> I played around with that exact thought by taking the functional guts out of
>> __skb_get_hash_symmetric, making it a new static function, plumbing that
>> into __skb_get_hash_symmetric and this new skb_get_hash_symmetric, but
>> the LOC churn was basically the same and it felt a bit worse than just a
>> copy/paste.
>>
>> Alternatively, if it turned out that flow_keys_have_l4() wasn’t important, we
>> Could simply set that to false and then reuse __skb_get_hash_symmetric
>> in a trivial manner. I couldn’t quite figure out if L4 flag was necessary, so I
>> went the safe(maybe?) route and copy/paste instead.
>>
>> Happy to take suggestions either way!
>
> There are 6 callers of __skb_get_hash_symmetric()
>
> I would convert __skb_get_hash_symmetric() to
>
> skb_get_hash_symmetric(struct sk_buff *skb, bool record_hash)

Ok, thank you for the suggestion. I’ll work that up as a v2 to this patch.

2023-05-31 18:42:06

by Eric Dumazet

[permalink] [raw]
Subject: Re: [PATCH] flow_dissector: introduce skb_get_hash_symmetric()

On Wed, May 31, 2023 at 7:47 PM Jon Kohler <[email protected]> wrote:
>
>
>
> > On May 31, 2023, at 1:33 PM, Eric Dumazet <[email protected]> wrote:
> >
> > On Wed, May 31, 2023 at 7:22 PM Jon Kohler <[email protected]> wrote:
> >>
> >> tun.c changed from skb_get_hash() to __skb_get_hash_symmetric() on
> >> commit feec084a7cf4 ("tun: use symmetric hash"), which exposes an
> >> overhead for OVS datapath, where ovs_dp_process_packet() has to
> >> calculate the hash again because __skb_get_hash_symmetric() does not
> >> retain the hash that it calculates.
> >>
> >> Introduce skb_get_hash_symmetric(), which will get and save the hash
> >> in one go, so that calcuation work does not go to waste, and plumb it
> >> into tun.c.
> >>
> >> Fixes: feec084a7cf4 ("tun: use symmetric hash")
> >
> >
> >> Signed-off-by: Jon Kohler <[email protected]>
> >> CC: Jason Wang <[email protected]>
> >> CC: David S. Miller <[email protected]>
> >> ---
> >>
> >
> >> diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
> >> index 0b40417457cd..8112b1ab5735 100644
> >> --- a/include/linux/skbuff.h
> >> +++ b/include/linux/skbuff.h
> >> @@ -1474,6 +1474,7 @@ __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
> >>
> >> void __skb_get_hash(struct sk_buff *skb);
> >> u32 __skb_get_hash_symmetric(const struct sk_buff *skb);
> >> +u32 skb_get_hash_symmetric(struct sk_buff *skb);
> >> u32 skb_get_poff(const struct sk_buff *skb);
> >> u32 __skb_get_poff(const struct sk_buff *skb, const void *data,
> >> const struct flow_keys_basic *keys, int hlen);
> >> diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
> >> index 25fb0bbc310f..d8c0e804bbfe 100644
> >> --- a/net/core/flow_dissector.c
> >> +++ b/net/core/flow_dissector.c
> >> @@ -1747,6 +1747,35 @@ u32 __skb_get_hash_symmetric(const struct sk_buff *skb)
> >> }
> >> EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
> >>
> >> +/**
> >> + * skb_get_hash_symmetric: calculate and set a flow hash in @skb, using
> >> + * flow_keys_dissector_symmetric.
> >> + * @skb: sk_buff to calculate flow hash from
> >> + *
> >> + * This function is similar to __skb_get_hash_symmetric except that it
> >> + * retains the hash within the skb, such that it can be reused without
> >> + * being recalculated later.
> >> + */
> >> +u32 skb_get_hash_symmetric(struct sk_buff *skb)
> >> +{
> >> + struct flow_keys keys;
> >> + u32 hash;
> >> +
> >> + __flow_hash_secret_init();
> >> +
> >> + memset(&keys, 0, sizeof(keys));
> >> + __skb_flow_dissect(NULL, skb, &flow_keys_dissector_symmetric,
> >> + &keys, NULL, 0, 0, 0,
> >> + FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
> >> +
> >> + hash = __flow_hash_from_keys(&keys, &hashrnd);
> >> +
> >> + __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
> >> +
> >> + return hash;
> >> +}
> >> +EXPORT_SYMBOL_GPL(skb_get_hash_symmetric);
> >> +
> >
> > Why copy/pasting __skb_get_hash_symmetric() ?
> >
> > Can you reuse it ?
>
> Not directly, because to use __skb_set_sw_hash requires struct flow_keys
> when using flow_keys_have_l4(). __skb_get_hash_symmetric() does not
> take or return that struct, so we’d either have to refactor that (and its callers)
> or introduce yet another function and consolidate down to that “new one”.
>
> I played around with that exact thought by taking the functional guts out of
> __skb_get_hash_symmetric, making it a new static function, plumbing that
> into __skb_get_hash_symmetric and this new skb_get_hash_symmetric, but
> the LOC churn was basically the same and it felt a bit worse than just a
> copy/paste.
>
> Alternatively, if it turned out that flow_keys_have_l4() wasn’t important, we
> Could simply set that to false and then reuse __skb_get_hash_symmetric
> in a trivial manner. I couldn’t quite figure out if L4 flag was necessary, so I
> went the safe(maybe?) route and copy/paste instead.
>
> Happy to take suggestions either way!

There are 6 callers of __skb_get_hash_symmetric()

I would convert __skb_get_hash_symmetric() to

skb_get_hash_symmetric(struct sk_buff *skb, bool record_hash)