use kmem_cache_zalloc instead kmem_cache_alloc and memset.
Signed-off-by: Tian Tao <[email protected]>
---
net/core/skbuff.c | 10 +++-------
1 file changed, 3 insertions(+), 7 deletions(-)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index c9a5a3c..3449c1c 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -313,12 +313,10 @@ struct sk_buff *__build_skb(void *data, unsigned int frag_size)
{
struct sk_buff *skb;
- skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
+ skb = kmem_cache_zalloc(skbuff_head_cache, GFP_ATOMIC);
if (unlikely(!skb))
return NULL;
- memset(skb, 0, offsetof(struct sk_buff, tail));
-
return __build_skb_around(skb, data, frag_size);
}
@@ -6170,12 +6168,10 @@ static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id)
*/
struct skb_ext *__skb_ext_alloc(gfp_t flags)
{
- struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, flags);
+ struct skb_ext *new = kmem_cache_zalloc(skbuff_ext_cache, flags);
- if (new) {
- memset(new->offset, 0, sizeof(new->offset));
+ if (new)
refcount_set(&new->refcnt, 1);
- }
return new;
}
--
2.7.4
On Wed, 2020-11-18 at 16:15 +0800, Tian Tao wrote:
> use kmem_cache_zalloc instead kmem_cache_alloc and memset.
[]
> diff --git a/net/core/skbuff.c b/net/core/skbuff.c
[]
> @@ -313,12 +313,10 @@ struct sk_buff *__build_skb(void *data, unsigned int frag_size)
> ?{
> ? struct sk_buff *skb;
> ?
> - skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
> + skb = kmem_cache_zalloc(skbuff_head_cache, GFP_ATOMIC);
> ? if (unlikely(!skb))
> ? return NULL;
> ?
> - memset(skb, 0, offsetof(struct sk_buff, tail));
> -
> ? return __build_skb_around(skb, data, frag_size);
> ?}
> ?
>
> @@ -6170,12 +6168,10 @@ static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id)
> ??*/
> ?struct skb_ext *__skb_ext_alloc(gfp_t flags)
> ?{
> - struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, flags);
> + struct skb_ext *new = kmem_cache_zalloc(skbuff_ext_cache, flags);
> ?
> - if (new) {
> - memset(new->offset, 0, sizeof(new->offset));
> + if (new)
> ? refcount_set(&new->refcnt, 1);
> - }
> ?
> ? return new;
> ?}
I think it'd be nicer to use the same form for both of these functions.
This could be:
struct skb_ext *__skb_ext_alloc(gfp_t flags)
{
struct skb_ext *new;
new = kmem_cache_zalloc(skbbuff_ext_cache, flags);
if (unlikely(!new))
return NULL;
refcount_set(&new->refcnt, 1);
return new;
}
On Wed, 2020-11-18 at 16:15 +0800, Tian Tao wrote:
> use kmem_cache_zalloc instead kmem_cache_alloc and memset.
>
> Signed-off-by: Tian Tao <[email protected]>
> ---
> net/core/skbuff.c | 10 +++-------
> 1 file changed, 3 insertions(+), 7 deletions(-)
>
> diff --git a/net/core/skbuff.c b/net/core/skbuff.c
> index c9a5a3c..3449c1c 100644
> --- a/net/core/skbuff.c
> +++ b/net/core/skbuff.c
> @@ -313,12 +313,10 @@ struct sk_buff *__build_skb(void *data, unsigned int frag_size)
> {
> struct sk_buff *skb;
>
> - skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
> + skb = kmem_cache_zalloc(skbuff_head_cache, GFP_ATOMIC);
This will zeroed a slighly larger amount of data compared to the
existing code: offsetof(struct sk_buff, tail) == 182, sizeof(struct
sk_buff) == 224.
> if (unlikely(!skb))
> return NULL;
>
> - memset(skb, 0, offsetof(struct sk_buff, tail));
Additionally this leverages constant argument optimizations.
Possibly overall not noticeable, but this code path is quite critical
performance wise.
I would avoid the above.
> -
> return __build_skb_around(skb, data, frag_size);
> }
>
> @@ -6170,12 +6168,10 @@ static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id)
> */
> struct skb_ext *__skb_ext_alloc(gfp_t flags)
> {
> - struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, flags);
> + struct skb_ext *new = kmem_cache_zalloc(skbuff_ext_cache, flags);
>
> - if (new) {
> - memset(new->offset, 0, sizeof(new->offset));
Similar to the above, but additionally here the number of zeroed bytes
changes a lot and a few additional cachelines will be touched. The
performance impact is likely relevant.
Overall I think we should not do this.
Thanks,
Paolo