2020-12-04 11:40:33

by Florent Revest

[permalink] [raw]
Subject: [PATCH bpf-next v5 1/6] net: Remove the err argument from sock_from_file

Currently, the sock_from_file prototype takes an "err" pointer that is
either not set or set to -ENOTSOCK IFF the returned socket is NULL. This
makes the error redundant and it is ignored by a few callers.

This patch simplifies the API by letting callers deduce the error based
on whether the returned socket is NULL or not.

Suggested-by: Al Viro <[email protected]>
Signed-off-by: Florent Revest <[email protected]>
Reviewed-by: KP Singh <[email protected]>
---
fs/eventpoll.c | 3 +--
fs/io_uring.c | 16 ++++++++--------
include/linux/net.h | 2 +-
net/core/netclassid_cgroup.c | 3 +--
net/core/netprio_cgroup.c | 3 +--
net/core/sock.c | 8 +-------
net/socket.c | 27 ++++++++++++++++-----------
7 files changed, 29 insertions(+), 33 deletions(-)

diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 73c346e503d7..19499b7bb82c 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -416,12 +416,11 @@ static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
unsigned int napi_id;
struct socket *sock;
struct sock *sk;
- int err;

if (!net_busy_loop_on())
return;

- sock = sock_from_file(epi->ffd.file, &err);
+ sock = sock_from_file(epi->ffd.file);
if (!sock)
return;

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 8018c7076b25..ace99b15cbd3 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -4341,9 +4341,9 @@ static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
unsigned flags;
int ret;

- sock = sock_from_file(req->file, &ret);
+ sock = sock_from_file(req->file);
if (unlikely(!sock))
- return ret;
+ return -ENOTSOCK;

if (req->async_data) {
kmsg = req->async_data;
@@ -4390,9 +4390,9 @@ static int io_send(struct io_kiocb *req, bool force_nonblock,
unsigned flags;
int ret;

- sock = sock_from_file(req->file, &ret);
+ sock = sock_from_file(req->file);
if (unlikely(!sock))
- return ret;
+ return -ENOTSOCK;

ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
if (unlikely(ret))
@@ -4569,9 +4569,9 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
unsigned flags;
int ret, cflags = 0;

- sock = sock_from_file(req->file, &ret);
+ sock = sock_from_file(req->file);
if (unlikely(!sock))
- return ret;
+ return -ENOTSOCK;

if (req->async_data) {
kmsg = req->async_data;
@@ -4632,9 +4632,9 @@ static int io_recv(struct io_kiocb *req, bool force_nonblock,
unsigned flags;
int ret, cflags = 0;

- sock = sock_from_file(req->file, &ret);
+ sock = sock_from_file(req->file);
if (unlikely(!sock))
- return ret;
+ return -ENOTSOCK;

if (req->flags & REQ_F_BUFFER_SELECT) {
kbuf = io_recv_buffer_select(req, !force_nonblock);
diff --git a/include/linux/net.h b/include/linux/net.h
index 0dcd51feef02..9e2324efc26a 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -240,7 +240,7 @@ int sock_sendmsg(struct socket *sock, struct msghdr *msg);
int sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags);
struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname);
struct socket *sockfd_lookup(int fd, int *err);
-struct socket *sock_from_file(struct file *file, int *err);
+struct socket *sock_from_file(struct file *file);
#define sockfd_put(sock) fput(sock->file)
int net_ratelimit(void);

diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
index 41b24cd31562..b49c57d35a88 100644
--- a/net/core/netclassid_cgroup.c
+++ b/net/core/netclassid_cgroup.c
@@ -68,9 +68,8 @@ struct update_classid_context {

static int update_classid_sock(const void *v, struct file *file, unsigned n)
{
- int err;
struct update_classid_context *ctx = (void *)v;
- struct socket *sock = sock_from_file(file, &err);
+ struct socket *sock = sock_from_file(file);

if (sock) {
spin_lock(&cgroup_sk_update_lock);
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index 9bd4cab7d510..99a431c56f23 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -220,8 +220,7 @@ static ssize_t write_priomap(struct kernfs_open_file *of,

static int update_netprio(const void *v, struct file *file, unsigned n)
{
- int err;
- struct socket *sock = sock_from_file(file, &err);
+ struct socket *sock = sock_from_file(file);
if (sock) {
spin_lock(&cgroup_sk_update_lock);
sock_cgroup_set_prioidx(&sock->sk->sk_cgrp_data,
diff --git a/net/core/sock.c b/net/core/sock.c
index d422a6808405..eb55cf79bb24 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2827,14 +2827,8 @@ EXPORT_SYMBOL(sock_no_mmap);
void __receive_sock(struct file *file)
{
struct socket *sock;
- int error;

- /*
- * The resulting value of "error" is ignored here since we only
- * need to take action when the file is a socket and testing
- * "sock" for NULL is sufficient.
- */
- sock = sock_from_file(file, &error);
+ sock = sock_from_file(file);
if (sock) {
sock_update_netprioidx(&sock->sk->sk_cgrp_data);
sock_update_classid(&sock->sk->sk_cgrp_data);
diff --git a/net/socket.c b/net/socket.c
index 6e6cccc2104f..c799d9652a2c 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -445,17 +445,15 @@ static int sock_map_fd(struct socket *sock, int flags)
/**
* sock_from_file - Return the &socket bounded to @file.
* @file: file
- * @err: pointer to an error code return
*
- * On failure returns %NULL and assigns -ENOTSOCK to @err.
+ * On failure returns %NULL.
*/

-struct socket *sock_from_file(struct file *file, int *err)
+struct socket *sock_from_file(struct file *file)
{
if (file->f_op == &socket_file_ops)
return file->private_data; /* set in sock_map_fd */

- *err = -ENOTSOCK;
return NULL;
}
EXPORT_SYMBOL(sock_from_file);
@@ -484,9 +482,11 @@ struct socket *sockfd_lookup(int fd, int *err)
return NULL;
}

- sock = sock_from_file(file, err);
- if (!sock)
+ sock = sock_from_file(file);
+ if (!sock) {
+ *err = -ENOTSOCK;
fput(file);
+ }
return sock;
}
EXPORT_SYMBOL(sockfd_lookup);
@@ -498,11 +498,12 @@ static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed)

*err = -EBADF;
if (f.file) {
- sock = sock_from_file(f.file, err);
+ sock = sock_from_file(f.file);
if (likely(sock)) {
*fput_needed = f.flags & FDPUT_FPUT;
return sock;
}
+ *err = -ENOTSOCK;
fdput(f);
}
return NULL;
@@ -1715,9 +1716,11 @@ int __sys_accept4_file(struct file *file, unsigned file_flags,
if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;

- sock = sock_from_file(file, &err);
- if (!sock)
+ sock = sock_from_file(file);
+ if (!sock) {
+ err = -ENOTSOCK;
goto out;
+ }

err = -ENFILE;
newsock = sock_alloc();
@@ -1840,9 +1843,11 @@ int __sys_connect_file(struct file *file, struct sockaddr_storage *address,
struct socket *sock;
int err;

- sock = sock_from_file(file, &err);
- if (!sock)
+ sock = sock_from_file(file);
+ if (!sock) {
+ err = -ENOTSOCK;
goto out;
+ }

err =
security_socket_connect(sock, (struct sockaddr *)address, addrlen);
--
2.29.2.576.ga3fc446d84-goog


2020-12-04 11:40:35

by Florent Revest

[permalink] [raw]
Subject: [PATCH bpf-next v5 3/6] bpf: Expose bpf_sk_storage_* to iterator programs

Iterators are currently used to expose kernel information to userspace
over fast procfs-like files but iterators could also be used to
manipulate local storage. For example, the task_file iterator could be
used to initialize a socket local storage with associations between
processes and sockets or to selectively delete local storage values.

Signed-off-by: Florent Revest <[email protected]>
Acked-by: Martin KaFai Lau <[email protected]>
Acked-by: KP Singh <[email protected]>
---
net/core/bpf_sk_storage.c | 1 +
1 file changed, 1 insertion(+)

diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
index a32037daa933..4edd033e899c 100644
--- a/net/core/bpf_sk_storage.c
+++ b/net/core/bpf_sk_storage.c
@@ -394,6 +394,7 @@ static bool bpf_sk_storage_tracing_allowed(const struct bpf_prog *prog)
* use the bpf_sk_storage_(get|delete) helper.
*/
switch (prog->expected_attach_type) {
+ case BPF_TRACE_ITER:
case BPF_TRACE_RAW_TP:
/* bpf_sk_storage has no trace point */
return true;
--
2.29.2.576.ga3fc446d84-goog

2020-12-04 11:41:14

by Florent Revest

[permalink] [raw]
Subject: [PATCH bpf-next v5 4/6] selftests/bpf: Add an iterator selftest for bpf_sk_storage_delete

The eBPF program iterates over all entries (well, only one) of a socket
local storage map and deletes them all. The test makes sure that the
entry is indeed deleted.

Signed-off-by: Florent Revest <[email protected]>
Acked-by: Martin KaFai Lau <[email protected]>
---
.../selftests/bpf/prog_tests/bpf_iter.c | 64 +++++++++++++++++++
.../progs/bpf_iter_bpf_sk_storage_helpers.c | 23 +++++++
2 files changed, 87 insertions(+)
create mode 100644 tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_helpers.c

diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
index 448885b95eed..bb4a638f2e6f 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
@@ -20,6 +20,7 @@
#include "bpf_iter_bpf_percpu_hash_map.skel.h"
#include "bpf_iter_bpf_array_map.skel.h"
#include "bpf_iter_bpf_percpu_array_map.skel.h"
+#include "bpf_iter_bpf_sk_storage_helpers.skel.h"
#include "bpf_iter_bpf_sk_storage_map.skel.h"
#include "bpf_iter_test_kern5.skel.h"
#include "bpf_iter_test_kern6.skel.h"
@@ -913,6 +914,67 @@ static void test_bpf_percpu_array_map(void)
bpf_iter_bpf_percpu_array_map__destroy(skel);
}

+/* An iterator program deletes all local storage in a map. */
+static void test_bpf_sk_storage_delete(void)
+{
+ DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+ struct bpf_iter_bpf_sk_storage_helpers *skel;
+ union bpf_iter_link_info linfo;
+ int err, len, map_fd, iter_fd;
+ struct bpf_link *link;
+ int sock_fd = -1;
+ __u32 val = 42;
+ char buf[64];
+
+ skel = bpf_iter_bpf_sk_storage_helpers__open_and_load();
+ if (CHECK(!skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load",
+ "skeleton open_and_load failed\n"))
+ return;
+
+ map_fd = bpf_map__fd(skel->maps.sk_stg_map);
+
+ sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
+ if (CHECK(sock_fd < 0, "socket", "errno: %d\n", errno))
+ goto out;
+ err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST);
+ if (CHECK(err, "map_update", "map_update failed\n"))
+ goto out;
+
+ memset(&linfo, 0, sizeof(linfo));
+ linfo.map.map_fd = map_fd;
+ opts.link_info = &linfo;
+ opts.link_info_len = sizeof(linfo);
+ link = bpf_program__attach_iter(skel->progs.delete_bpf_sk_storage_map,
+ &opts);
+ if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
+ goto out;
+
+ iter_fd = bpf_iter_create(bpf_link__fd(link));
+ if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
+ goto free_link;
+
+ /* do some tests */
+ while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
+ ;
+ if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
+ goto close_iter;
+
+ /* test results */
+ err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
+ if (CHECK(!err || errno != ENOENT, "bpf_map_lookup_elem",
+ "map value wasn't deleted (err=%d, errno=%d)\n", err, errno))
+ goto close_iter;
+
+close_iter:
+ close(iter_fd);
+free_link:
+ bpf_link__destroy(link);
+out:
+ if (sock_fd >= 0)
+ close(sock_fd);
+ bpf_iter_bpf_sk_storage_helpers__destroy(skel);
+}
+
static void test_bpf_sk_storage_map(void)
{
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
@@ -1067,6 +1129,8 @@ void test_bpf_iter(void)
test_bpf_percpu_array_map();
if (test__start_subtest("bpf_sk_storage_map"))
test_bpf_sk_storage_map();
+ if (test__start_subtest("bpf_sk_storage_delete"))
+ test_bpf_sk_storage_delete();
if (test__start_subtest("rdonly-buf-out-of-bound"))
test_rdonly_buf_out_of_bound();
if (test__start_subtest("buf-neg-offset"))
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_helpers.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_helpers.c
new file mode 100644
index 000000000000..01ff3235e413
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_helpers.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Google LLC. */
+#include "bpf_iter.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, int);
+} sk_stg_map SEC(".maps");
+
+SEC("iter/bpf_sk_storage_map")
+int delete_bpf_sk_storage_map(struct bpf_iter__bpf_sk_storage_map *ctx)
+{
+ if (ctx->sk)
+ bpf_sk_storage_delete(&sk_stg_map, ctx->sk);
+
+ return 0;
+}
--
2.29.2.576.ga3fc446d84-goog

2020-12-04 11:41:16

by Florent Revest

[permalink] [raw]
Subject: [PATCH bpf-next v5 5/6] selftests/bpf: Add an iterator selftest for bpf_sk_storage_get

The eBPF program iterates over all files and tasks. For all socket
files, it stores the tgid of the last task it encountered with a handle
to that socket. This is a heuristic for finding the "owner" of a socket
similar to what's done by lsof, ss, netstat or fuser. Potentially, this
information could be used from a cgroup_skb/*gress hook to try to
associate network traffic with processes.

The test makes sure that a socket it created is tagged with prog_tests's
pid.

Signed-off-by: Florent Revest <[email protected]>
Acked-by: Yonghong Song <[email protected]>
Acked-by: Martin KaFai Lau <[email protected]>
---
.../selftests/bpf/prog_tests/bpf_iter.c | 40 +++++++++++++++++++
.../progs/bpf_iter_bpf_sk_storage_helpers.c | 24 +++++++++++
2 files changed, 64 insertions(+)

diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
index bb4a638f2e6f..9336d0f18331 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
@@ -975,6 +975,44 @@ static void test_bpf_sk_storage_delete(void)
bpf_iter_bpf_sk_storage_helpers__destroy(skel);
}

+/* This creates a socket and its local storage. It then runs a task_iter BPF
+ * program that replaces the existing socket local storage with the tgid of the
+ * only task owning a file descriptor to this socket, this process, prog_tests.
+ */
+static void test_bpf_sk_storage_get(void)
+{
+ struct bpf_iter_bpf_sk_storage_helpers *skel;
+ int err, map_fd, val = -1;
+ int sock_fd = -1;
+
+ skel = bpf_iter_bpf_sk_storage_helpers__open_and_load();
+ if (CHECK(!skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load",
+ "skeleton open_and_load failed\n"))
+ return;
+
+ sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
+ if (CHECK(sock_fd < 0, "socket", "errno: %d\n", errno))
+ goto out;
+
+ map_fd = bpf_map__fd(skel->maps.sk_stg_map);
+
+ err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST);
+ if (CHECK(err, "bpf_map_update_elem", "map_update_failed\n"))
+ goto close_socket;
+
+ do_dummy_read(skel->progs.fill_socket_owner);
+
+ err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
+ CHECK(err || val != getpid(), "bpf_map_lookup_elem",
+ "map value wasn't set correctly (expected %d, got %d, err=%d)\n",
+ getpid(), val, err);
+
+close_socket:
+ close(sock_fd);
+out:
+ bpf_iter_bpf_sk_storage_helpers__destroy(skel);
+}
+
static void test_bpf_sk_storage_map(void)
{
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
@@ -1131,6 +1169,8 @@ void test_bpf_iter(void)
test_bpf_sk_storage_map();
if (test__start_subtest("bpf_sk_storage_delete"))
test_bpf_sk_storage_delete();
+ if (test__start_subtest("bpf_sk_storage_get"))
+ test_bpf_sk_storage_get();
if (test__start_subtest("rdonly-buf-out-of-bound"))
test_rdonly_buf_out_of_bound();
if (test__start_subtest("buf-neg-offset"))
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_helpers.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_helpers.c
index 01ff3235e413..dde53df37de8 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_helpers.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_helpers.c
@@ -21,3 +21,27 @@ int delete_bpf_sk_storage_map(struct bpf_iter__bpf_sk_storage_map *ctx)

return 0;
}
+
+SEC("iter/task_file")
+int fill_socket_owner(struct bpf_iter__task_file *ctx)
+{
+ struct task_struct *task = ctx->task;
+ struct file *file = ctx->file;
+ struct socket *sock;
+ int *sock_tgid;
+
+ if (!task || !file)
+ return 0;
+
+ sock = bpf_sock_from_file(file);
+ if (!sock)
+ return 0;
+
+ sock_tgid = bpf_sk_storage_get(&sk_stg_map, sock->sk, 0, 0);
+ if (!sock_tgid)
+ return 0;
+
+ *sock_tgid = task->tgid;
+
+ return 0;
+}
--
2.29.2.576.ga3fc446d84-goog

2020-12-04 11:41:42

by Florent Revest

[permalink] [raw]
Subject: [PATCH bpf-next v5 6/6] selftests/bpf: Test bpf_sk_storage_get in tcp iterators

This extends the existing bpf_sk_storage_get test where a socket is
created and tagged with its creator's pid by a task_file iterator.

A TCP iterator is now also used at the end of the test to negate the
values already stored in the local storage. The test therefore expects
-getpid() to be stored in the local storage.

Signed-off-by: Florent Revest <[email protected]>
Acked-by: Yonghong Song <[email protected]>
Acked-by: Martin KaFai Lau <[email protected]>
---
.../selftests/bpf/prog_tests/bpf_iter.c | 18 ++++++++++++++++--
.../progs/bpf_iter_bpf_sk_storage_helpers.c | 18 ++++++++++++++++++
2 files changed, 34 insertions(+), 2 deletions(-)

diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
index 9336d0f18331..0e586368948d 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
@@ -978,6 +978,8 @@ static void test_bpf_sk_storage_delete(void)
/* This creates a socket and its local storage. It then runs a task_iter BPF
* program that replaces the existing socket local storage with the tgid of the
* only task owning a file descriptor to this socket, this process, prog_tests.
+ * It then runs a tcp socket iterator that negates the value in the existing
+ * socket local storage, the test verifies that the resulting value is -pid.
*/
static void test_bpf_sk_storage_get(void)
{
@@ -994,6 +996,10 @@ static void test_bpf_sk_storage_get(void)
if (CHECK(sock_fd < 0, "socket", "errno: %d\n", errno))
goto out;

+ err = listen(sock_fd, 1);
+ if (CHECK(err != 0, "listen", "errno: %d\n", errno))
+ goto close_socket;
+
map_fd = bpf_map__fd(skel->maps.sk_stg_map);

err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST);
@@ -1003,9 +1009,17 @@ static void test_bpf_sk_storage_get(void)
do_dummy_read(skel->progs.fill_socket_owner);

err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
- CHECK(err || val != getpid(), "bpf_map_lookup_elem",
+ if (CHECK(err || val != getpid(), "bpf_map_lookup_elem",
+ "map value wasn't set correctly (expected %d, got %d, err=%d)\n",
+ getpid(), val, err))
+ goto close_socket;
+
+ do_dummy_read(skel->progs.negate_socket_local_storage);
+
+ err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
+ CHECK(err || val != -getpid(), "bpf_map_lookup_elem",
"map value wasn't set correctly (expected %d, got %d, err=%d)\n",
- getpid(), val, err);
+ -getpid(), val, err);

close_socket:
close(sock_fd);
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_helpers.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_helpers.c
index dde53df37de8..6cecab2b32ba 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_helpers.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_helpers.c
@@ -45,3 +45,21 @@ int fill_socket_owner(struct bpf_iter__task_file *ctx)

return 0;
}
+
+SEC("iter/tcp")
+int negate_socket_local_storage(struct bpf_iter__tcp *ctx)
+{
+ struct sock_common *sk_common = ctx->sk_common;
+ int *sock_tgid;
+
+ if (!sk_common)
+ return 0;
+
+ sock_tgid = bpf_sk_storage_get(&sk_stg_map, sk_common, 0, 0);
+ if (!sock_tgid)
+ return 0;
+
+ *sock_tgid = -*sock_tgid;
+
+ return 0;
+}
--
2.29.2.576.ga3fc446d84-goog

2020-12-04 22:03:12

by patchwork-bot+netdevbpf

[permalink] [raw]
Subject: Re: [PATCH bpf-next v5 1/6] net: Remove the err argument from sock_from_file

Hello:

This series was applied to bpf/bpf-next.git (refs/heads/master):

On Fri, 4 Dec 2020 12:36:04 +0100 you wrote:
> Currently, the sock_from_file prototype takes an "err" pointer that is
> either not set or set to -ENOTSOCK IFF the returned socket is NULL. This
> makes the error redundant and it is ignored by a few callers.
>
> This patch simplifies the API by letting callers deduce the error based
> on whether the returned socket is NULL or not.
>
> [...]

Here is the summary with links:
- [bpf-next,v5,1/6] net: Remove the err argument from sock_from_file
https://git.kernel.org/bpf/bpf-next/c/dba4a9256bb4
- [bpf-next,v5,2/6] bpf: Add a bpf_sock_from_file helper
https://git.kernel.org/bpf/bpf-next/c/4f19cab76136
- [bpf-next,v5,3/6] bpf: Expose bpf_sk_storage_* to iterator programs
https://git.kernel.org/bpf/bpf-next/c/a50a85e40c59
- [bpf-next,v5,4/6] selftests/bpf: Add an iterator selftest for bpf_sk_storage_delete
https://git.kernel.org/bpf/bpf-next/c/593f6d41abbb
- [bpf-next,v5,5/6] selftests/bpf: Add an iterator selftest for bpf_sk_storage_get
https://git.kernel.org/bpf/bpf-next/c/bd9b327e58f9
- [bpf-next,v5,6/6] selftests/bpf: Test bpf_sk_storage_get in tcp iterators
https://git.kernel.org/bpf/bpf-next/c/34da87213d3d

You are awesome, thank you!
--
Deet-doot-dot, I am a bot.
https://korg.docs.kernel.org/patchwork/pwbot.html