Enable the memcg-based memory accounting for the memory used by
the bpf ringbuffer.
Signed-off-by: Roman Gushchin <[email protected]>
---
kernel/bpf/ringbuf.c | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c
index 002f8a5c9e51..e8e2c39cbdc9 100644
--- a/kernel/bpf/ringbuf.c
+++ b/kernel/bpf/ringbuf.c
@@ -60,8 +60,8 @@ struct bpf_ringbuf_hdr {
static struct bpf_ringbuf *bpf_ringbuf_area_alloc(size_t data_sz, int numa_node)
{
- const gfp_t flags = GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN |
- __GFP_ZERO;
+ const gfp_t flags = GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL |
+ __GFP_NOWARN | __GFP_ZERO;
int nr_meta_pages = RINGBUF_PGOFF + RINGBUF_POS_PAGES;
int nr_data_pages = data_sz >> PAGE_SHIFT;
int nr_pages = nr_meta_pages + nr_data_pages;
@@ -89,7 +89,8 @@ static struct bpf_ringbuf *bpf_ringbuf_area_alloc(size_t data_sz, int numa_node)
*/
array_size = (nr_meta_pages + 2 * nr_data_pages) * sizeof(*pages);
if (array_size > PAGE_SIZE)
- pages = vmalloc_node(array_size, numa_node);
+ pages = __vmalloc_node(array_size, 1, GFP_KERNEL_ACCOUNT,
+ numa_node, __builtin_return_address(0));
else
pages = kmalloc_node(array_size, flags, numa_node);
if (!pages)
@@ -167,7 +168,7 @@ static struct bpf_map *ringbuf_map_alloc(union bpf_attr *attr)
return ERR_PTR(-E2BIG);
#endif
- rb_map = kzalloc(sizeof(*rb_map), GFP_USER);
+ rb_map = kzalloc(sizeof(*rb_map), GFP_USER | __GFP_ACCOUNT);
if (!rb_map)
return ERR_PTR(-ENOMEM);
--
2.26.2
On Mon, Jul 27, 2020 at 12:22 PM Roman Gushchin <[email protected]> wrote:
>
> Enable the memcg-based memory accounting for the memory used by
> the bpf ringbuffer.
>
> Signed-off-by: Roman Gushchin <[email protected]>
Acked-by: Song Liu <[email protected]>
> ---
> kernel/bpf/ringbuf.c | 9 +++++----
> 1 file changed, 5 insertions(+), 4 deletions(-)
>
> diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c
> index 002f8a5c9e51..e8e2c39cbdc9 100644
> --- a/kernel/bpf/ringbuf.c
> +++ b/kernel/bpf/ringbuf.c
> @@ -60,8 +60,8 @@ struct bpf_ringbuf_hdr {
>
> static struct bpf_ringbuf *bpf_ringbuf_area_alloc(size_t data_sz, int numa_node)
> {
> - const gfp_t flags = GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN |
> - __GFP_ZERO;
> + const gfp_t flags = GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL |
> + __GFP_NOWARN | __GFP_ZERO;
> int nr_meta_pages = RINGBUF_PGOFF + RINGBUF_POS_PAGES;
> int nr_data_pages = data_sz >> PAGE_SHIFT;
> int nr_pages = nr_meta_pages + nr_data_pages;
> @@ -89,7 +89,8 @@ static struct bpf_ringbuf *bpf_ringbuf_area_alloc(size_t data_sz, int numa_node)
> */
> array_size = (nr_meta_pages + 2 * nr_data_pages) * sizeof(*pages);
> if (array_size > PAGE_SIZE)
> - pages = vmalloc_node(array_size, numa_node);
> + pages = __vmalloc_node(array_size, 1, GFP_KERNEL_ACCOUNT,
> + numa_node, __builtin_return_address(0));
> else
> pages = kmalloc_node(array_size, flags, numa_node);
> if (!pages)
> @@ -167,7 +168,7 @@ static struct bpf_map *ringbuf_map_alloc(union bpf_attr *attr)
> return ERR_PTR(-E2BIG);
> #endif
>
> - rb_map = kzalloc(sizeof(*rb_map), GFP_USER);
> + rb_map = kzalloc(sizeof(*rb_map), GFP_USER | __GFP_ACCOUNT);
> if (!rb_map)
> return ERR_PTR(-ENOMEM);
>
> --
> 2.26.2
>