Received: by 2002:a05:6a10:8c0a:0:0:0:0 with SMTP id go10csp1451410pxb; Wed, 10 Feb 2021 08:37:04 -0800 (PST) X-Google-Smtp-Source: ABdhPJyTbJr+GYrKNx7moQRxm3RULCcab47M2S1IAnQ9U1c9RyK5hvam4bkYn/7A1vZp5F8GRHos X-Received: by 2002:a17:906:cc5d:: with SMTP id mm29mr3835181ejb.183.1612975023925; Wed, 10 Feb 2021 08:37:03 -0800 (PST) ARC-Seal: i=1; a=rsa-sha256; t=1612975023; cv=none; d=google.com; s=arc-20160816; b=BMybMnmcV4SJ9lZ2ESAh2b5/k4LwWpmWkxZjQCfhMSLsdj6hbJReoE160UxvOzpX1w GbC8ixmxp3Q64jGNcljZuMWnWk+GhPQhV8iXh2acJZWhtfRUkcOYT2uamqED/Ksg61J5 gvpL8Mk+amFRTat5qBMT//Lkz7MxsP1gIcqM+jrZcFPQ8a2xT/WQMrRJ3e4cdmW0ZkzZ dDUqR5CXe4M/WquXSKNP7b5TTVNw0Yl77Re0lNQv3PA0SxUYYupM1OYJpKN+rcvxjtKJ OE5mJxF7Cs43SmmJTzZvis+blvBQ6PuVYpTTJFyS8AUTEd87SUN8ULqiPEB/y8mKvmX5 nwXA== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:content-transfer-encoding:mime-version :references:in-reply-to:message-id:subject:reply-to:cc:from:to :dkim-signature:date; bh=HZHaNJCAig0YSUt683wmcffffm0YebCE8doSL87HOwk=; b=DHM6LNWlB8tWOoy3zWMZjD19+BHJ0h6W1EdKMLMBRHdFX7yUgldOT7gNior4zL1+FL 78GR45k7Tq+zowsAOZebRWsYV2/1Ed6BWcwIzImvGSBQL2qT6YCfb0z+Zbxs+tKiAuuC 4pZqNaO/1GJMtPnLEs0mfYK6UvWtJcJwSp2KEXkVnOmsKRhgDs5kLNzuJDevPyqUj05t 9My5OtqITEOd8r8VOmJPyfOr9WvzjS0D6sMWJiX8uuUd4X3t2kmrItQ7wStfXDOa1WV9 uF0WlIepribnPGgtCtwAe0qLmchJYkJf9bMgHlsOVNuWPor34L27eku57BA51WyCc5Fz yGNw== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@pm.me header.s=protonmail header.b=AAVz4XLz; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 23.128.96.18 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=QUARANTINE sp=QUARANTINE dis=NONE) header.from=pm.me Return-Path: Received: from vger.kernel.org (vger.kernel.org. [23.128.96.18]) by mx.google.com with ESMTP id i1si1579195edn.588.2021.02.10.08.36.38; Wed, 10 Feb 2021 08:37:03 -0800 (PST) Received-SPF: pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 23.128.96.18 as permitted sender) client-ip=23.128.96.18; Authentication-Results: mx.google.com; dkim=pass header.i=@pm.me header.s=protonmail header.b=AAVz4XLz; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 23.128.96.18 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=QUARANTINE sp=QUARANTINE dis=NONE) header.from=pm.me Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232730AbhBJQfM (ORCPT + 99 others); Wed, 10 Feb 2021 11:35:12 -0500 Received: from mail-40131.protonmail.ch ([185.70.40.131]:33856 "EHLO mail-40131.protonmail.ch" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232542AbhBJQbD (ORCPT ); Wed, 10 Feb 2021 11:31:03 -0500 Date: Wed, 10 Feb 2021 16:30:09 +0000 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=pm.me; s=protonmail; t=1612974620; bh=HZHaNJCAig0YSUt683wmcffffm0YebCE8doSL87HOwk=; h=Date:To:From:Cc:Reply-To:Subject:In-Reply-To:References:From; b=AAVz4XLzmIZ7ZKgQf5/pTKIllBOLjbg5ahLMuzgVNUV3Ck8VZrvugYGJrz18g57tg 7FB9nGwKd4MeViCBO4mQuZYW2s0x8P1tgR0X9kVhY6X9ZC5RPauq6O1V4IgxM6QcPx ZDF80s8j9zAvu04cQWoVbzYK8KV0voV5SIbnSIr/l1d1aVesv0jFfUTWwnSXh5oxYJ dDIZbOVv1JLkyHaRM/LGqgNvYY394LNADNQ0kUwMr+RuE0cQphYItqIfIcKkq8Hrxz SH2+NL6gDXSJf/a2dYD2YjYSBRV5KS0CIQvepy3MLzpcF8dTT6W2Ui/cxzD//pRRK+ oh0ErizMVcu3g== To: "David S. Miller" , Jakub Kicinski From: Alexander Lobakin Cc: Jonathan Lemon , Eric Dumazet , Dmitry Vyukov , Willem de Bruijn , Alexander Lobakin , Randy Dunlap , Kevin Hao , Pablo Neira Ayuso , Jakub Sitnicki , Marco Elver , Dexuan Cui , Paolo Abeni , Jesper Dangaard Brouer , Alexei Starovoitov , Daniel Borkmann , Andrii Nakryiko , Taehee Yoo , Cong Wang , =?utf-8?Q?Bj=C3=B6rn_T=C3=B6pel?= , Miaohe Lin , Guillaume Nault , Yonghong Song , zhudi , Michal Kubecek , Marcelo Ricardo Leitner , Dmitry Safonov <0x7f454c46@gmail.com>, Yang Yingliang , Florian Westphal , Edward Cree , linux-kernel@vger.kernel.org, netdev@vger.kernel.org Reply-To: Alexander Lobakin Subject: [PATCH v4 net-next 07/11] skbuff: move NAPI cache declarations upper in the file Message-ID: <20210210162732.80467-8-alobakin@pm.me> In-Reply-To: <20210210162732.80467-1-alobakin@pm.me> References: <20210210162732.80467-1-alobakin@pm.me> MIME-Version: 1.0 Content-Type: text/plain; charset=utf-8 Content-Transfer-Encoding: quoted-printable X-Spam-Status: No, score=-1.2 required=10.0 tests=ALL_TRUSTED,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_AU,DKIM_VALID_EF shortcircuit=no autolearn=disabled version=3.4.4 X-Spam-Checker-Version: SpamAssassin 3.4.4 (2020-01-24) on mailout.protonmail.ch Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org NAPI cache structures will be used for allocating skbuff_heads, so move their declarations a bit upper. Signed-off-by: Alexander Lobakin --- net/core/skbuff.c | 90 +++++++++++++++++++++++------------------------ 1 file changed, 45 insertions(+), 45 deletions(-) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 4be2bb969535..860a9d4f752f 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -119,6 +119,51 @@ static void skb_under_panic(struct sk_buff *skb, unsig= ned int sz, void *addr) =09skb_panic(skb, sz, addr, __func__); } =20 +#define NAPI_SKB_CACHE_SIZE=0964 + +struct napi_alloc_cache { +=09struct page_frag_cache page; +=09unsigned int skb_count; +=09void *skb_cache[NAPI_SKB_CACHE_SIZE]; +}; + +static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); +static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache); + +static void *__alloc_frag_align(unsigned int fragsz, gfp_t gfp_mask, +=09=09=09=09unsigned int align_mask) +{ +=09struct napi_alloc_cache *nc =3D this_cpu_ptr(&napi_alloc_cache); + +=09return page_frag_alloc_align(&nc->page, fragsz, gfp_mask, align_mask); +} + +void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask= ) +{ +=09fragsz =3D SKB_DATA_ALIGN(fragsz); + +=09return __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask); +} +EXPORT_SYMBOL(__napi_alloc_frag_align); + +void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_ma= sk) +{ +=09struct page_frag_cache *nc; +=09void *data; + +=09fragsz =3D SKB_DATA_ALIGN(fragsz); +=09if (in_irq() || irqs_disabled()) { +=09=09nc =3D this_cpu_ptr(&netdev_alloc_cache); +=09=09data =3D page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align_mask); +=09} else { +=09=09local_bh_disable(); +=09=09data =3D __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask); +=09=09local_bh_enable(); +=09} +=09return data; +} +EXPORT_SYMBOL(__netdev_alloc_frag_align); + /* Caller must provide SKB that is memset cleared */ static void __build_skb_around(struct sk_buff *skb, void *data, =09=09=09 unsigned int frag_size) @@ -220,51 +265,6 @@ struct sk_buff *build_skb_around(struct sk_buff *skb, } EXPORT_SYMBOL(build_skb_around); =20 -#define NAPI_SKB_CACHE_SIZE=0964 - -struct napi_alloc_cache { -=09struct page_frag_cache page; -=09unsigned int skb_count; -=09void *skb_cache[NAPI_SKB_CACHE_SIZE]; -}; - -static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); -static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache); - -static void *__alloc_frag_align(unsigned int fragsz, gfp_t gfp_mask, -=09=09=09=09unsigned int align_mask) -{ -=09struct napi_alloc_cache *nc =3D this_cpu_ptr(&napi_alloc_cache); - -=09return page_frag_alloc_align(&nc->page, fragsz, gfp_mask, align_mask); -} - -void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask= ) -{ -=09fragsz =3D SKB_DATA_ALIGN(fragsz); - -=09return __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask); -} -EXPORT_SYMBOL(__napi_alloc_frag_align); - -void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_ma= sk) -{ -=09struct page_frag_cache *nc; -=09void *data; - -=09fragsz =3D SKB_DATA_ALIGN(fragsz); -=09if (in_irq() || irqs_disabled()) { -=09=09nc =3D this_cpu_ptr(&netdev_alloc_cache); -=09=09data =3D page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align_mask); -=09} else { -=09=09local_bh_disable(); -=09=09data =3D __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask); -=09=09local_bh_enable(); -=09} -=09return data; -} -EXPORT_SYMBOL(__netdev_alloc_frag_align); - /* * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tell= s * the caller if emergency pfmemalloc reserves are being used. If it is an= d --=20 2.30.1