Do not use rlimit-based memory accounting for sockmap and sockhash maps.
It has been replaced with the memcg-based memory accounting.
Signed-off-by: Roman Gushchin <[email protected]>
---
net/core/sock_map.c | 33 ++++++---------------------------
1 file changed, 6 insertions(+), 27 deletions(-)
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index bc797adca44c..07c90baf8db1 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -26,8 +26,6 @@ struct bpf_stab {
static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
{
struct bpf_stab *stab;
- u64 cost;
- int err;
if (!capable(CAP_NET_ADMIN))
return ERR_PTR(-EPERM);
@@ -45,22 +43,15 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
bpf_map_init_from_attr(&stab->map, attr);
raw_spin_lock_init(&stab->lock);
- /* Make sure page count doesn't overflow. */
- cost = (u64) stab->map.max_entries * sizeof(struct sock *);
- err = bpf_map_charge_init(&stab->map.memory, cost);
- if (err)
- goto free_stab;
-
stab->sks = bpf_map_area_alloc(stab->map.max_entries *
sizeof(struct sock *),
stab->map.numa_node);
- if (stab->sks)
- return &stab->map;
- err = -ENOMEM;
- bpf_map_charge_finish(&stab->map.memory);
-free_stab:
- kfree(stab);
- return ERR_PTR(err);
+ if (!stab->sks) {
+ kfree(stab);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return &stab->map;
}
int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog)
@@ -999,7 +990,6 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
{
struct bpf_shtab *htab;
int i, err;
- u64 cost;
if (!capable(CAP_NET_ADMIN))
return ERR_PTR(-EPERM);
@@ -1027,21 +1017,10 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
goto free_htab;
}
- cost = (u64) htab->buckets_num * sizeof(struct bpf_shtab_bucket) +
- (u64) htab->elem_size * htab->map.max_entries;
- if (cost >= U32_MAX - PAGE_SIZE) {
- err = -EINVAL;
- goto free_htab;
- }
- err = bpf_map_charge_init(&htab->map.memory, cost);
- if (err)
- goto free_htab;
-
htab->buckets = bpf_map_area_alloc(htab->buckets_num *
sizeof(struct bpf_shtab_bucket),
htab->map.numa_node);
if (!htab->buckets) {
- bpf_map_charge_finish(&htab->map.memory);
err = -ENOMEM;
goto free_htab;
}
--
2.26.2
On Mon, Jul 27, 2020 at 12:21 PM Roman Gushchin <[email protected]> wrote:
>
> Do not use rlimit-based memory accounting for sockmap and sockhash maps.
> It has been replaced with the memcg-based memory accounting.
>
> Signed-off-by: Roman Gushchin <[email protected]>
Acked-by: Song Liu <[email protected]>
> ---
> net/core/sock_map.c | 33 ++++++---------------------------
> 1 file changed, 6 insertions(+), 27 deletions(-)
>
> diff --git a/net/core/sock_map.c b/net/core/sock_map.c
> index bc797adca44c..07c90baf8db1 100644
> --- a/net/core/sock_map.c
> +++ b/net/core/sock_map.c
> @@ -26,8 +26,6 @@ struct bpf_stab {
> static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
> {
> struct bpf_stab *stab;
> - u64 cost;
> - int err;
>
> if (!capable(CAP_NET_ADMIN))
> return ERR_PTR(-EPERM);
> @@ -45,22 +43,15 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
> bpf_map_init_from_attr(&stab->map, attr);
> raw_spin_lock_init(&stab->lock);
>
> - /* Make sure page count doesn't overflow. */
> - cost = (u64) stab->map.max_entries * sizeof(struct sock *);
> - err = bpf_map_charge_init(&stab->map.memory, cost);
> - if (err)
> - goto free_stab;
> -
> stab->sks = bpf_map_area_alloc(stab->map.max_entries *
> sizeof(struct sock *),
> stab->map.numa_node);
> - if (stab->sks)
> - return &stab->map;
> - err = -ENOMEM;
> - bpf_map_charge_finish(&stab->map.memory);
> -free_stab:
> - kfree(stab);
> - return ERR_PTR(err);
> + if (!stab->sks) {
> + kfree(stab);
> + return ERR_PTR(-ENOMEM);
> + }
> +
> + return &stab->map;
> }
>
> int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog)
> @@ -999,7 +990,6 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
> {
> struct bpf_shtab *htab;
> int i, err;
> - u64 cost;
>
> if (!capable(CAP_NET_ADMIN))
> return ERR_PTR(-EPERM);
> @@ -1027,21 +1017,10 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
> goto free_htab;
> }
>
> - cost = (u64) htab->buckets_num * sizeof(struct bpf_shtab_bucket) +
> - (u64) htab->elem_size * htab->map.max_entries;
> - if (cost >= U32_MAX - PAGE_SIZE) {
> - err = -EINVAL;
> - goto free_htab;
> - }
> - err = bpf_map_charge_init(&htab->map.memory, cost);
> - if (err)
> - goto free_htab;
> -
> htab->buckets = bpf_map_area_alloc(htab->buckets_num *
> sizeof(struct bpf_shtab_bucket),
> htab->map.numa_node);
> if (!htab->buckets) {
> - bpf_map_charge_finish(&htab->map.memory);
> err = -ENOMEM;
> goto free_htab;
> }
> --
> 2.26.2
>