2023-10-31 13:49:25

by Vadim Fedorenko

[permalink] [raw]
Subject: [PATCH bpf-next v3 1/2] bpf: add skcipher API support to TC/XDP programs

Add crypto API support to BPF to be able to decrypt or encrypt packets
in TC/XDP BPF programs. Only symmetric key ciphers are supported for
now. Special care should be taken for initialization part of crypto algo
because crypto_alloc_sync_skcipher() doesn't work with preemtion
disabled, it can be run only in sleepable BPF program. Also async crypto
is not supported because of the very same issue - TC/XDP BPF programs
are not sleepable.

Signed-off-by: Vadim Fedorenko <[email protected]>
---
v2 -> v3:
- fix kdoc issues
v1 -> v2:
- use kmalloc in sleepable func, suggested by Alexei
- use __bpf_dynptr_is_rdonly() to check destination, suggested by Jakub
- use __bpf_dynptr_data_ptr() for all dynptr accesses

include/linux/bpf.h | 2 +
kernel/bpf/Makefile | 3 +
kernel/bpf/crypto.c | 280 ++++++++++++++++++++++++++++++++++++++++++
kernel/bpf/helpers.c | 4 +-
kernel/bpf/verifier.c | 1 +
5 files changed, 288 insertions(+), 2 deletions(-)
create mode 100644 kernel/bpf/crypto.c

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index d3c51a507508..5ad1e83394b3 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1222,6 +1222,8 @@ enum bpf_dynptr_type {

int bpf_dynptr_check_size(u32 size);
u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr);
+enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr);
+bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr);

#ifdef CONFIG_BPF_JIT
int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index f526b7573e97..e14b5834c477 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -41,6 +41,9 @@ obj-$(CONFIG_BPF_SYSCALL) += bpf_struct_ops.o
obj-$(CONFIG_BPF_SYSCALL) += cpumask.o
obj-${CONFIG_BPF_LSM} += bpf_lsm.o
endif
+ifeq ($(CONFIG_CRYPTO_SKCIPHER),y)
+obj-$(CONFIG_BPF_SYSCALL) += crypto.o
+endif
obj-$(CONFIG_BPF_PRELOAD) += preload/

obj-$(CONFIG_BPF_SYSCALL) += relo_core.o
diff --git a/kernel/bpf/crypto.c b/kernel/bpf/crypto.c
new file mode 100644
index 000000000000..c2a0703934fc
--- /dev/null
+++ b/kernel/bpf/crypto.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2023 Meta, Inc */
+#include <linux/bpf.h>
+#include <linux/bpf_mem_alloc.h>
+#include <linux/btf.h>
+#include <linux/btf_ids.h>
+#include <linux/filter.h>
+#include <linux/scatterlist.h>
+#include <linux/skbuff.h>
+#include <crypto/skcipher.h>
+
+/**
+ * struct bpf_crypto_skcipher_ctx - refcounted BPF sync skcipher context structure
+ * @tfm: The pointer to crypto_sync_skcipher struct.
+ * @rcu: The RCU head used to free the crypto context with RCU safety.
+ * @usage: Object reference counter. When the refcount goes to 0, the
+ * memory is released back to the BPF allocator, which provides
+ * RCU safety.
+ */
+struct bpf_crypto_skcipher_ctx {
+ struct crypto_sync_skcipher *tfm;
+ struct rcu_head rcu;
+ refcount_t usage;
+};
+
+__diag_push();
+__diag_ignore_all("-Wmissing-prototypes",
+ "Global kfuncs as their definitions will be in BTF");
+
+static void *__bpf_dynptr_data_ptr(const struct bpf_dynptr_kern *ptr)
+{
+ enum bpf_dynptr_type type;
+
+ if (!ptr->data)
+ return NULL;
+
+ type = bpf_dynptr_get_type(ptr);
+
+ switch (type) {
+ case BPF_DYNPTR_TYPE_LOCAL:
+ case BPF_DYNPTR_TYPE_RINGBUF:
+ return ptr->data + ptr->offset;
+ case BPF_DYNPTR_TYPE_SKB:
+ return skb_pointer_if_linear(ptr->data, ptr->offset, __bpf_dynptr_size(ptr));
+ case BPF_DYNPTR_TYPE_XDP:
+ {
+ void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset, __bpf_dynptr_size(ptr));
+ if (!IS_ERR_OR_NULL(xdp_ptr))
+ return xdp_ptr;
+
+ return NULL;
+ }
+ default:
+ WARN_ONCE(true, "unknown dynptr type %d\n", type);
+ return NULL;
+ }
+}
+
+/**
+ * bpf_crypto_skcipher_ctx_create() - Create a mutable BPF crypto context.
+ *
+ * Allocates a crypto context that can be used, acquired, and released by
+ * a BPF program. The crypto context returned by this function must either
+ * be embedded in a map as a kptr, or freed with bpf_crypto_skcipher_ctx_release().
+ *
+ * bpf_crypto_skcipher_ctx_create() allocates memory using the BPF memory
+ * allocator, and will not block. It may return NULL if no memory is available.
+ * @algo: bpf_dynptr which holds string representation of algorithm.
+ * @key: bpf_dynptr which holds cipher key to do crypto.
+ */
+__bpf_kfunc struct bpf_crypto_skcipher_ctx *
+bpf_crypto_skcipher_ctx_create(const struct bpf_dynptr_kern *palgo,
+ const struct bpf_dynptr_kern *pkey, int *err)
+{
+ struct bpf_crypto_skcipher_ctx *ctx;
+ char *algo;
+
+ if (__bpf_dynptr_size(palgo) > CRYPTO_MAX_ALG_NAME) {
+ *err = -EINVAL;
+ return NULL;
+ }
+
+ algo = __bpf_dynptr_data_ptr(palgo);
+
+ if (!crypto_has_skcipher(algo, CRYPTO_ALG_TYPE_SKCIPHER, CRYPTO_ALG_TYPE_MASK)) {
+ *err = -EOPNOTSUPP;
+ return NULL;
+ }
+
+ ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ *err = -ENOMEM;
+ return NULL;
+ }
+
+ memset(ctx, 0, sizeof(*ctx));
+
+ ctx->tfm = crypto_alloc_sync_skcipher(algo, 0, 0);
+ if (IS_ERR(ctx->tfm)) {
+ *err = PTR_ERR(ctx->tfm);
+ ctx->tfm = NULL;
+ goto err;
+ }
+
+ *err = crypto_sync_skcipher_setkey(ctx->tfm, __bpf_dynptr_data_ptr(pkey),
+ __bpf_dynptr_size(pkey));
+ if (*err)
+ goto err;
+
+ refcount_set(&ctx->usage, 1);
+
+ return ctx;
+err:
+ if (ctx->tfm)
+ crypto_free_sync_skcipher(ctx->tfm);
+ kfree(ctx);
+
+ return NULL;
+}
+
+static void crypto_free_sync_skcipher_cb(struct rcu_head *head)
+{
+ struct bpf_crypto_skcipher_ctx *ctx;
+
+ ctx = container_of(head, struct bpf_crypto_skcipher_ctx, rcu);
+ crypto_free_sync_skcipher(ctx->tfm);
+ kfree(ctx);
+}
+
+/**
+ * bpf_crypto_skcipher_ctx_acquire() - Acquire a reference to a BPF crypto context.
+ * @ctx: The BPF crypto context being acquired. The ctx must be a trusted
+ * pointer.
+ *
+ * Acquires a reference to a BPF crypto context. The context returned by this function
+ * must either be embedded in a map as a kptr, or freed with
+ * bpf_crypto_skcipher_ctx_release().
+ */
+__bpf_kfunc struct bpf_crypto_skcipher_ctx *
+bpf_crypto_skcipher_ctx_acquire(struct bpf_crypto_skcipher_ctx *ctx)
+{
+ refcount_inc(&ctx->usage);
+ return ctx;
+}
+
+/**
+ * bpf_crypto_skcipher_ctx_release() - Release a previously acquired BPF crypto context.
+ * @ctx: The crypto context being released.
+ *
+ * Releases a previously acquired reference to a BPF cpumask. When the final
+ * reference of the BPF cpumask has been released, it is subsequently freed in
+ * an RCU callback in the BPF memory allocator.
+ */
+__bpf_kfunc void bpf_crypto_skcipher_ctx_release(struct bpf_crypto_skcipher_ctx *ctx)
+{
+ if (refcount_dec_and_test(&ctx->usage))
+ call_rcu(&ctx->rcu, crypto_free_sync_skcipher_cb);
+}
+
+static int bpf_crypto_skcipher_crypt(struct crypto_sync_skcipher *tfm,
+ const struct bpf_dynptr_kern *src,
+ struct bpf_dynptr_kern *dst,
+ const struct bpf_dynptr_kern *iv,
+ bool decrypt)
+{
+ struct skcipher_request *req = NULL;
+ struct scatterlist sgin, sgout;
+ int err;
+
+ if (crypto_sync_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -EINVAL;
+
+ if (__bpf_dynptr_is_rdonly(dst))
+ return -EINVAL;
+
+ if (!__bpf_dynptr_size(dst) || !__bpf_dynptr_size(src))
+ return -EINVAL;
+
+ if (__bpf_dynptr_size(iv) != crypto_sync_skcipher_ivsize(tfm))
+ return -EINVAL;
+
+ req = skcipher_request_alloc(&tfm->base, GFP_ATOMIC);
+ if (!req)
+ return -ENOMEM;
+
+ sg_init_one(&sgin, __bpf_dynptr_data_ptr(src), __bpf_dynptr_size(src));
+ sg_init_one(&sgout, __bpf_dynptr_data_ptr(dst), __bpf_dynptr_size(dst));
+
+ skcipher_request_set_crypt(req, &sgin, &sgout, __bpf_dynptr_size(src),
+ __bpf_dynptr_data_ptr(iv));
+
+ err = decrypt ? crypto_skcipher_decrypt(req) : crypto_skcipher_encrypt(req);
+
+ skcipher_request_free(req);
+
+ return err;
+}
+
+/**
+ * bpf_crypto_skcipher_decrypt() - Decrypt buffer using configured context and IV provided.
+ * @ctx: The crypto context being used. The ctx must be a trusted pointer.
+ * @src: bpf_dynptr to the encrypted data. Must be a trusted pointer.
+ * @dst: bpf_dynptr to the buffer where to store the result. Must be a trusted pointer.
+ * @iv: bpf_dynptr to IV data to be used by decryptor.
+ *
+ * Decrypts provided buffer using IV data and the crypto context. Crypto context must be configured.
+ */
+__bpf_kfunc int bpf_crypto_skcipher_decrypt(struct bpf_crypto_skcipher_ctx *ctx,
+ const struct bpf_dynptr_kern *src,
+ struct bpf_dynptr_kern *dst,
+ const struct bpf_dynptr_kern *iv)
+{
+ return bpf_crypto_skcipher_crypt(ctx->tfm, src, dst, iv, true);
+}
+
+/**
+ * bpf_crypto_skcipher_encrypt() - Encrypt buffer using configured context and IV provided.
+ * @ctx: The crypto context being used. The ctx must be a trusted pointer.
+ * @src: bpf_dynptr to the plain data. Must be a trusted pointer.
+ * @dst: bpf_dynptr to buffer where to store the result. Must be a trusted pointer.
+ * @iv: bpf_dynptr to IV data to be used by decryptor.
+ *
+ * Encrypts provided buffer using IV data and the crypto context. Crypto context must be configured.
+ */
+__bpf_kfunc int bpf_crypto_skcipher_encrypt(struct bpf_crypto_skcipher_ctx *ctx,
+ const struct bpf_dynptr_kern *src,
+ struct bpf_dynptr_kern *dst,
+ const struct bpf_dynptr_kern *iv)
+{
+ return bpf_crypto_skcipher_crypt(ctx->tfm, src, dst, iv, false);
+}
+
+__diag_pop();
+
+BTF_SET8_START(crypt_skcipher_init_kfunc_btf_ids)
+BTF_ID_FLAGS(func, bpf_crypto_skcipher_ctx_create, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
+BTF_ID_FLAGS(func, bpf_crypto_skcipher_ctx_release, KF_RELEASE)
+BTF_ID_FLAGS(func, bpf_crypto_skcipher_ctx_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS)
+BTF_SET8_END(crypt_skcipher_init_kfunc_btf_ids)
+
+static const struct btf_kfunc_id_set crypt_skcipher_init_kfunc_set = {
+ .owner = THIS_MODULE,
+ .set = &crypt_skcipher_init_kfunc_btf_ids,
+};
+
+BTF_SET8_START(crypt_skcipher_kfunc_btf_ids)
+BTF_ID_FLAGS(func, bpf_crypto_skcipher_decrypt, KF_RCU)
+BTF_ID_FLAGS(func, bpf_crypto_skcipher_encrypt, KF_RCU)
+BTF_SET8_END(crypt_skcipher_kfunc_btf_ids)
+
+static const struct btf_kfunc_id_set crypt_skcipher_kfunc_set = {
+ .owner = THIS_MODULE,
+ .set = &crypt_skcipher_kfunc_btf_ids,
+};
+
+BTF_ID_LIST(crypto_skcipher_dtor_ids)
+BTF_ID(struct, bpf_crypto_skcipher_ctx)
+BTF_ID(func, bpf_crypto_skcipher_ctx_release)
+
+static int __init crypto_skcipher_kfunc_init(void)
+{
+ int ret;
+ const struct btf_id_dtor_kfunc crypto_skcipher_dtors[] = {
+ {
+ .btf_id = crypto_skcipher_dtor_ids[0],
+ .kfunc_btf_id = crypto_skcipher_dtor_ids[1]
+ },
+ };
+
+ ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &crypt_skcipher_kfunc_set);
+ ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_ACT, &crypt_skcipher_kfunc_set);
+ ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &crypt_skcipher_kfunc_set);
+ ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC,
+ &crypt_skcipher_init_kfunc_set);
+ return ret ?: register_btf_id_dtor_kfuncs(crypto_skcipher_dtors,
+ ARRAY_SIZE(crypto_skcipher_dtors),
+ THIS_MODULE);
+}
+
+late_initcall(crypto_skcipher_kfunc_init);
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 62a53ebfedf9..2020884fca3d 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -1429,7 +1429,7 @@ static const struct bpf_func_proto bpf_kptr_xchg_proto = {
#define DYNPTR_SIZE_MASK 0xFFFFFF
#define DYNPTR_RDONLY_BIT BIT(31)

-static bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr)
+bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr)
{
return ptr->size & DYNPTR_RDONLY_BIT;
}
@@ -1444,7 +1444,7 @@ static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_typ
ptr->size |= type << DYNPTR_TYPE_SHIFT;
}

-static enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr)
+enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr)
{
return (ptr->size & ~(DYNPTR_RDONLY_BIT)) >> DYNPTR_TYPE_SHIFT;
}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index bb58987e4844..75d2f47ca3cb 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -5184,6 +5184,7 @@ BTF_ID(struct, prog_test_ref_kfunc)
BTF_ID(struct, cgroup)
BTF_ID(struct, bpf_cpumask)
BTF_ID(struct, task_struct)
+BTF_ID(struct, bpf_crypto_skcipher_ctx)
BTF_SET_END(rcu_protected_types)

static bool rcu_protected_object(const struct btf *btf, u32 btf_id)
--
2.39.3


2023-10-31 13:49:25

by Vadim Fedorenko

[permalink] [raw]
Subject: [PATCH bpf-next v3 2/2] selftests: bpf: crypto skcipher algo selftests

Add simple tc hook selftests to show the way to work with new crypto
BPF API. Some weird structre and map are added to setup program to make
verifier happy about dynptr initialization from memory. Simple AES-ECB
algo is used to demonstrate encryption and decryption of fixed size
buffers.

Signed-off-by: Vadim Fedorenko <[email protected]>
---
v2 -> v3:
- disable tests on s390 and aarch64 because of unknown Fatal exception
in sg_init_one
v1 -> v2:
- add CONFIG_CRYPTO_AES and CONFIG_CRYPTO_ECB to selftest build config
suggested by Daniel

kernel/bpf/crypto.c | 5 +-
tools/testing/selftests/bpf/DENYLIST.aarch64 | 1 +
tools/testing/selftests/bpf/DENYLIST.s390x | 1 +
tools/testing/selftests/bpf/config | 3 +
.../selftests/bpf/prog_tests/crypto_sanity.c | 129 +++++++++++++++
.../selftests/bpf/progs/crypto_common.h | 103 ++++++++++++
.../selftests/bpf/progs/crypto_sanity.c | 154 ++++++++++++++++++
7 files changed, 394 insertions(+), 2 deletions(-)
create mode 100644 tools/testing/selftests/bpf/prog_tests/crypto_sanity.c
create mode 100644 tools/testing/selftests/bpf/progs/crypto_common.h
create mode 100644 tools/testing/selftests/bpf/progs/crypto_sanity.c

diff --git a/kernel/bpf/crypto.c b/kernel/bpf/crypto.c
index c2a0703934fc..4ee6193165ca 100644
--- a/kernel/bpf/crypto.c
+++ b/kernel/bpf/crypto.c
@@ -65,8 +65,9 @@ static void *__bpf_dynptr_data_ptr(const struct bpf_dynptr_kern *ptr)
*
* bpf_crypto_skcipher_ctx_create() allocates memory using the BPF memory
* allocator, and will not block. It may return NULL if no memory is available.
- * @algo: bpf_dynptr which holds string representation of algorithm.
- * @key: bpf_dynptr which holds cipher key to do crypto.
+ * @palgo: bpf_dynptr which holds string representation of algorithm.
+ * @pkey: bpf_dynptr which holds cipher key to do crypto.
+ * @err: integer to store error code when NULL is returned
*/
__bpf_kfunc struct bpf_crypto_skcipher_ctx *
bpf_crypto_skcipher_ctx_create(const struct bpf_dynptr_kern *palgo,
diff --git a/tools/testing/selftests/bpf/DENYLIST.aarch64 b/tools/testing/selftests/bpf/DENYLIST.aarch64
index 5c2cc7e8c5d0..72c7ef3e5872 100644
--- a/tools/testing/selftests/bpf/DENYLIST.aarch64
+++ b/tools/testing/selftests/bpf/DENYLIST.aarch64
@@ -1,5 +1,6 @@
bpf_cookie/multi_kprobe_attach_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3
bpf_cookie/multi_kprobe_link_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3
+crypto_sanity # sg_init_one has exception on aarch64
exceptions # JIT does not support calling kfunc bpf_throw: -524
fexit_sleep # The test never returns. The remaining tests cannot start.
kprobe_multi_bench_attach # needs CONFIG_FPROBE
diff --git a/tools/testing/selftests/bpf/DENYLIST.s390x b/tools/testing/selftests/bpf/DENYLIST.s390x
index 1a63996c0304..8ab7485bedb1 100644
--- a/tools/testing/selftests/bpf/DENYLIST.s390x
+++ b/tools/testing/selftests/bpf/DENYLIST.s390x
@@ -1,5 +1,6 @@
# TEMPORARY
# Alphabetical order
+crypto_sanity # sg_init_one has exception on s390 (exceptions)
exceptions # JIT does not support calling kfunc bpf_throw (exceptions)
get_stack_raw_tp # user_stack corrupted user stack (no backchain userspace)
stacktrace_build_id # compare_map_keys stackid_hmap vs. stackmap err -2 errno 2 (?)
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index 02dd4409200e..48b570fd1752 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -14,6 +14,9 @@ CONFIG_CGROUP_BPF=y
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_USER_API_HASH=y
+CONFIG_CRYPTO_SKCIPHER=y
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_AES=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_INFO_BTF=y
CONFIG_DEBUG_INFO_DWARF4=y
diff --git a/tools/testing/selftests/bpf/prog_tests/crypto_sanity.c b/tools/testing/selftests/bpf/prog_tests/crypto_sanity.c
new file mode 100644
index 000000000000..a43969da6d15
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/crypto_sanity.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <net/if.h>
+#include <linux/in6.h>
+
+#include "test_progs.h"
+#include "network_helpers.h"
+#include "crypto_sanity.skel.h"
+
+#define NS_TEST "crypto_sanity_ns"
+#define IPV6_IFACE_ADDR "face::1"
+#define UDP_TEST_PORT 7777
+static const char plain_text[] = "stringtoencrypt0";
+static const char crypted_data[] = "\x5B\x59\x39\xEA\xD9\x7A\x2D\xAD\xA7\xE0\x43" \
+ "\x37\x8A\x77\x17\xB2";
+
+void test_crypto_sanity(void)
+{
+ LIBBPF_OPTS(bpf_tc_hook, qdisc_hook, .attach_point = BPF_TC_EGRESS);
+ LIBBPF_OPTS(bpf_tc_opts, tc_attach_enc);
+ LIBBPF_OPTS(bpf_tc_opts, tc_attach_dec);
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = crypted_data,
+ .data_size_in = sizeof(crypted_data),
+ .repeat = 1,
+ );
+ struct nstoken *nstoken = NULL;
+ struct crypto_sanity *skel;
+ struct sockaddr_in6 addr;
+ int sockfd, err, pfd;
+ socklen_t addrlen;
+
+ skel = crypto_sanity__open();
+ if (!ASSERT_OK_PTR(skel, "skel open"))
+ return;
+
+ bpf_program__set_autoload(skel->progs.skb_crypto_setup, true);
+
+ SYS(fail, "ip netns add %s", NS_TEST);
+ SYS(fail, "ip -net %s -6 addr add %s/128 dev lo nodad", NS_TEST, IPV6_IFACE_ADDR);
+ SYS(fail, "ip -net %s link set dev lo up", NS_TEST);
+
+ err = crypto_sanity__load(skel);
+ if (!ASSERT_OK(err, "crypto_sanity__load"))
+ goto fail;
+
+ nstoken = open_netns(NS_TEST);
+ if (!ASSERT_OK_PTR(nstoken, "open_netns"))
+ goto fail;
+
+ qdisc_hook.ifindex = if_nametoindex("lo");
+ if (!ASSERT_GT(qdisc_hook.ifindex, 0, "if_nametoindex lo"))
+ goto fail;
+
+ err = crypto_sanity__attach(skel);
+ if (!ASSERT_OK(err, "crypto_sanity__attach"))
+ goto fail;
+
+ pfd = bpf_program__fd(skel->progs.skb_crypto_setup);
+ if (!ASSERT_GT(pfd, 0, "skb_crypto_setup fd"))
+ goto fail;
+
+ err = bpf_prog_test_run_opts(pfd, &opts);
+ if (!ASSERT_OK(err, "skb_crypto_setup") ||
+ !ASSERT_OK(opts.retval, "skb_crypto_setup retval"))
+ goto fail;
+
+ if (!ASSERT_OK(skel->bss->status, "skb_crypto_setup status"))
+ goto fail;
+
+ err = bpf_tc_hook_create(&qdisc_hook);
+ if (!ASSERT_OK(err, "create qdisc hook"))
+ goto fail;
+
+ addrlen = sizeof(addr);
+ err = make_sockaddr(AF_INET6, IPV6_IFACE_ADDR, UDP_TEST_PORT,
+ (void *)&addr, &addrlen);
+ if (!ASSERT_OK(err, "make_sockaddr"))
+ goto fail;
+
+ tc_attach_dec.prog_fd = bpf_program__fd(skel->progs.decrypt_sanity);
+ err = bpf_tc_attach(&qdisc_hook, &tc_attach_dec);
+ if (!ASSERT_OK(err, "attach decrypt filter"))
+ goto fail;
+
+ sockfd = socket(AF_INET6, SOCK_DGRAM, 0);
+ if (!ASSERT_NEQ(sockfd, -1, "decrypt socket"))
+ goto fail;
+ err = sendto(sockfd, crypted_data, 16, 0, (void *)&addr, addrlen);
+ close(sockfd);
+ if (!ASSERT_EQ(err, 16, "decrypt send"))
+ goto fail;
+
+ bpf_tc_detach(&qdisc_hook, &tc_attach_dec);
+ if (!ASSERT_OK(skel->bss->status, "decrypt status"))
+ goto fail;
+ if (!ASSERT_STRNEQ(skel->bss->dst, plain_text, sizeof(plain_text), "decrypt"))
+ goto fail;
+
+ tc_attach_enc.prog_fd = bpf_program__fd(skel->progs.encrypt_sanity);
+ err = bpf_tc_attach(&qdisc_hook, &tc_attach_enc);
+ if (!ASSERT_OK(err, "attach encrypt filter"))
+ goto fail;
+
+ sockfd = socket(AF_INET6, SOCK_DGRAM, 0);
+ if (!ASSERT_NEQ(sockfd, -1, "encrypt socket"))
+ goto fail;
+ err = sendto(sockfd, plain_text, 16, 0, (void *)&addr, addrlen);
+ close(sockfd);
+ if (!ASSERT_EQ(err, 16, "encrypt send"))
+ goto fail;
+
+ bpf_tc_detach(&qdisc_hook, &tc_attach_enc);
+ if (!ASSERT_OK(skel->bss->status, "encrypt status"))
+ goto fail;
+ if (!ASSERT_STRNEQ(skel->bss->dst, crypted_data, sizeof(crypted_data), "encrypt"))
+ goto fail;
+
+fail:
+ if (nstoken) {
+ bpf_tc_hook_destroy(&qdisc_hook);
+ close_netns(nstoken);
+ }
+ SYS_NOFAIL("ip netns del " NS_TEST " &> /dev/null");
+ crypto_sanity__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/progs/crypto_common.h b/tools/testing/selftests/bpf/progs/crypto_common.h
new file mode 100644
index 000000000000..584378bb6df8
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/crypto_common.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#ifndef _CRYPTO_COMMON_H
+#define _CRYPTO_COMMON_H
+
+#include "errno.h"
+#include <stdbool.h>
+
+#define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8)))
+private(CTX) static struct bpf_crypto_skcipher_ctx __kptr * global_crypto_ctx;
+
+struct bpf_crypto_skcipher_ctx *bpf_crypto_skcipher_ctx_create(const struct bpf_dynptr *algo,
+ const struct bpf_dynptr *key,
+ int *err) __ksym;
+struct bpf_crypto_skcipher_ctx *bpf_crypto_skcipher_ctx_acquire(struct bpf_crypto_skcipher_ctx *ctx) __ksym;
+void bpf_crypto_skcipher_ctx_release(struct bpf_crypto_skcipher_ctx *ctx) __ksym;
+int bpf_crypto_skcipher_encrypt(struct bpf_crypto_skcipher_ctx *ctx,
+ const struct bpf_dynptr *src, struct bpf_dynptr *dst,
+ const struct bpf_dynptr *iv) __ksym;
+int bpf_crypto_skcipher_decrypt(struct bpf_crypto_skcipher_ctx *ctx,
+ const struct bpf_dynptr *src, struct bpf_dynptr *dst,
+ const struct bpf_dynptr *iv) __ksym;
+
+struct __crypto_skcipher_ctx_value {
+ struct bpf_crypto_skcipher_ctx __kptr * ctx;
+};
+
+struct crypto_conf_value {
+ __u8 algo[32];
+ __u32 algo_size;
+ __u8 key[32];
+ __u32 key_size;
+ __u8 iv[32];
+ __u32 iv_size;
+ __u8 dst[32];
+ __u32 dst_size;
+};
+
+struct array_conf_map {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct crypto_conf_value);
+ __uint(max_entries, 1);
+} __crypto_conf_map SEC(".maps");
+
+struct array_map {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct __crypto_skcipher_ctx_value);
+ __uint(max_entries, 1);
+} __crypto_skcipher_ctx_map SEC(".maps");
+
+static inline struct crypto_conf_value *crypto_conf_lookup(void)
+{
+ struct crypto_conf_value *v, local = {};
+ u32 key = 0;
+
+ v = bpf_map_lookup_elem(&__crypto_conf_map, &key);
+ if (v)
+ return v;
+
+ bpf_map_update_elem(&__crypto_conf_map, &key, &local, 0);
+ return bpf_map_lookup_elem(&__crypto_conf_map, &key);
+}
+
+static inline struct __crypto_skcipher_ctx_value *crypto_skcipher_ctx_value_lookup(void)
+{
+ u32 key = 0;
+
+ return bpf_map_lookup_elem(&__crypto_skcipher_ctx_map, &key);
+}
+
+static inline int crypto_skcipher_ctx_insert(struct bpf_crypto_skcipher_ctx *ctx)
+{
+ struct __crypto_skcipher_ctx_value local, *v;
+ long status;
+ struct bpf_crypto_skcipher_ctx *old;
+ u32 key = 0;
+
+ local.ctx = NULL;
+ status = bpf_map_update_elem(&__crypto_skcipher_ctx_map, &key, &local, 0);
+ if (status) {
+ bpf_crypto_skcipher_ctx_release(ctx);
+ return status;
+ }
+
+ v = bpf_map_lookup_elem(&__crypto_skcipher_ctx_map, &key);
+ if (!v) {
+ bpf_crypto_skcipher_ctx_release(ctx);
+ return -ENOENT;
+ }
+
+ old = bpf_kptr_xchg(&v->ctx, ctx);
+ if (old) {
+ bpf_crypto_skcipher_ctx_release(old);
+ return -EEXIST;
+ }
+
+ return 0;
+}
+
+#endif /* _CRYPTO_COMMON_H */
diff --git a/tools/testing/selftests/bpf/progs/crypto_sanity.c b/tools/testing/selftests/bpf/progs/crypto_sanity.c
new file mode 100644
index 000000000000..71a172d8d2a2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/crypto_sanity.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "bpf_kfuncs.h"
+#include "crypto_common.h"
+
+#define UDP_TEST_PORT 7777
+unsigned char crypto_key[16] = "testtest12345678";
+char crypto_algo[9] = "ecb(aes)";
+char dst[32] = {};
+int status;
+
+static inline int skb_validate_test(const struct __sk_buff *skb)
+{
+ struct ipv6hdr ip6h;
+ struct udphdr udph;
+ u32 offset;
+
+ if (skb->protocol != __bpf_constant_htons(ETH_P_IPV6))
+ return -1;
+
+ if (bpf_skb_load_bytes(skb, ETH_HLEN, &ip6h, sizeof(ip6h)))
+ return -1;
+
+ if (ip6h.nexthdr != IPPROTO_UDP)
+ return -1;
+
+ if (bpf_skb_load_bytes(skb, ETH_HLEN + sizeof(ip6h), &udph, sizeof(udph)))
+ return -1;
+
+ if (udph.dest != __bpf_constant_htons(UDP_TEST_PORT))
+ return -1;
+
+ offset = ETH_HLEN + sizeof(ip6h) + sizeof(udph);
+ if (skb->len < offset + 16)
+ return -1;
+
+ return offset;
+}
+
+SEC("?fentry.s/bpf_fentry_test1")
+int BPF_PROG(skb_crypto_setup)
+{
+ struct crypto_conf_value *c;
+ struct bpf_dynptr algo, key;
+ int err = 0;
+
+ status = 0;
+
+ c = crypto_conf_lookup();
+ if (!c) {
+ status = -EINVAL;
+ return 0;
+ }
+
+ bpf_dynptr_from_mem(crypto_algo, sizeof(crypto_algo), 0, &algo);
+ bpf_dynptr_from_mem(crypto_key, sizeof(crypto_key), 0, &key);
+ struct bpf_crypto_skcipher_ctx *cctx = bpf_crypto_skcipher_ctx_create(&algo, &key, &err);
+
+ if (!cctx) {
+ status = err;
+ return 0;
+ }
+
+ err = crypto_skcipher_ctx_insert(cctx);
+ if (err && err != -EEXIST)
+ status = err;
+
+ return 0;
+}
+
+SEC("tc")
+int decrypt_sanity(struct __sk_buff *skb)
+{
+ struct __crypto_skcipher_ctx_value *v;
+ struct bpf_crypto_skcipher_ctx *ctx;
+ struct bpf_dynptr psrc, pdst, iv;
+ int err;
+
+ err = skb_validate_test(skb);
+ if (err < 0) {
+ status = err;
+ return TC_ACT_SHOT;
+ }
+
+ v = crypto_skcipher_ctx_value_lookup();
+ if (!v) {
+ status = -ENOENT;
+ return TC_ACT_SHOT;
+ }
+
+ ctx = v->ctx;
+ if (!ctx) {
+ status = -ENOENT;
+ return TC_ACT_SHOT;
+ }
+
+ bpf_dynptr_from_skb(skb, 0, &psrc);
+ bpf_dynptr_adjust(&psrc, err, err + 16);
+ bpf_dynptr_from_mem(dst, sizeof(dst), 0, &pdst);
+ bpf_dynptr_from_mem(dst, 0, 0, &iv);
+
+ bpf_crypto_skcipher_decrypt(ctx, &psrc, &pdst, &iv);
+
+ status = 0;
+
+ return TC_ACT_SHOT;
+}
+
+SEC("tc")
+int encrypt_sanity(struct __sk_buff *skb)
+{
+ struct __crypto_skcipher_ctx_value *v;
+ struct bpf_crypto_skcipher_ctx *ctx;
+ struct bpf_dynptr psrc, pdst, iv;
+ int err;
+
+ status = 0;
+
+ err = skb_validate_test(skb);
+ if (err < 0) {
+ status = err;
+ return TC_ACT_SHOT;
+ }
+
+ v = crypto_skcipher_ctx_value_lookup();
+ if (!v) {
+ status = -ENOENT;
+ return TC_ACT_SHOT;
+ }
+
+ ctx = v->ctx;
+ if (!ctx) {
+ status = -ENOENT;
+ return TC_ACT_SHOT;
+ }
+
+ bpf_dynptr_from_skb(skb, 0, &psrc);
+ bpf_dynptr_adjust(&psrc, err, err + 16);
+ bpf_dynptr_from_mem(dst, sizeof(dst), 0, &pdst);
+ bpf_dynptr_from_mem(dst, 0, 0, &iv);
+
+ bpf_crypto_skcipher_encrypt(ctx, &psrc, &pdst, &iv);
+
+ return TC_ACT_SHOT;
+}
+
+char __license[] SEC("license") = "GPL";
--
2.39.3

2023-10-31 15:46:37

by Jiri Olsa

[permalink] [raw]
Subject: Re: [PATCH bpf-next v3 1/2] bpf: add skcipher API support to TC/XDP programs

On Tue, Oct 31, 2023 at 06:48:59AM -0700, Vadim Fedorenko wrote:

SNIP

> diff --git a/kernel/bpf/crypto.c b/kernel/bpf/crypto.c
> new file mode 100644
> index 000000000000..c2a0703934fc
> --- /dev/null
> +++ b/kernel/bpf/crypto.c
> @@ -0,0 +1,280 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +/* Copyright (c) 2023 Meta, Inc */
> +#include <linux/bpf.h>
> +#include <linux/bpf_mem_alloc.h>
> +#include <linux/btf.h>
> +#include <linux/btf_ids.h>
> +#include <linux/filter.h>
> +#include <linux/scatterlist.h>
> +#include <linux/skbuff.h>
> +#include <crypto/skcipher.h>
> +
> +/**
> + * struct bpf_crypto_skcipher_ctx - refcounted BPF sync skcipher context structure
> + * @tfm: The pointer to crypto_sync_skcipher struct.
> + * @rcu: The RCU head used to free the crypto context with RCU safety.
> + * @usage: Object reference counter. When the refcount goes to 0, the
> + * memory is released back to the BPF allocator, which provides
> + * RCU safety.
> + */
> +struct bpf_crypto_skcipher_ctx {
> + struct crypto_sync_skcipher *tfm;
> + struct rcu_head rcu;
> + refcount_t usage;
> +};
> +
> +__diag_push();
> +__diag_ignore_all("-Wmissing-prototypes",
> + "Global kfuncs as their definitions will be in BTF");
> +

hi,
just a heads up.. there's [1] change adding macro for this, it's not
merged yet, but will probably get in soon

jirka

[1] https://lore.kernel.org/bpf/[email protected]/

> +static void *__bpf_dynptr_data_ptr(const struct bpf_dynptr_kern *ptr)
> +{
> + enum bpf_dynptr_type type;
> +
> + if (!ptr->data)
> + return NULL;
> +
> + type = bpf_dynptr_get_type(ptr);
> +
> + switch (type) {
> + case BPF_DYNPTR_TYPE_LOCAL:
> + case BPF_DYNPTR_TYPE_RINGBUF:
> + return ptr->data + ptr->offset;
> + case BPF_DYNPTR_TYPE_SKB:
> + return skb_pointer_if_linear(ptr->data, ptr->offset, __bpf_dynptr_size(ptr));
> + case BPF_DYNPTR_TYPE_XDP:
> + {
> + void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset, __bpf_dynptr_size(ptr));
> + if (!IS_ERR_OR_NULL(xdp_ptr))
> + return xdp_ptr;
> +
> + return NULL;
> + }
> + default:
> + WARN_ONCE(true, "unknown dynptr type %d\n", type);
> + return NULL;
> + }
> +}
> +
> +/**
> + * bpf_crypto_skcipher_ctx_create() - Create a mutable BPF crypto context.
> + *
> + * Allocates a crypto context that can be used, acquired, and released by
> + * a BPF program. The crypto context returned by this function must either
> + * be embedded in a map as a kptr, or freed with bpf_crypto_skcipher_ctx_release().
> + *
> + * bpf_crypto_skcipher_ctx_create() allocates memory using the BPF memory
> + * allocator, and will not block. It may return NULL if no memory is available.
> + * @algo: bpf_dynptr which holds string representation of algorithm.
> + * @key: bpf_dynptr which holds cipher key to do crypto.
> + */
> +__bpf_kfunc struct bpf_crypto_skcipher_ctx *
> +bpf_crypto_skcipher_ctx_create(const struct bpf_dynptr_kern *palgo,
> + const struct bpf_dynptr_kern *pkey, int *err)
> +{
> + struct bpf_crypto_skcipher_ctx *ctx;
> + char *algo;
> +
> + if (__bpf_dynptr_size(palgo) > CRYPTO_MAX_ALG_NAME) {
> + *err = -EINVAL;
> + return NULL;
> + }
> +
> + algo = __bpf_dynptr_data_ptr(palgo);
> +
> + if (!crypto_has_skcipher(algo, CRYPTO_ALG_TYPE_SKCIPHER, CRYPTO_ALG_TYPE_MASK)) {
> + *err = -EOPNOTSUPP;
> + return NULL;
> + }
> +
> + ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
> + if (!ctx) {
> + *err = -ENOMEM;
> + return NULL;
> + }
> +
> + memset(ctx, 0, sizeof(*ctx));
> +
> + ctx->tfm = crypto_alloc_sync_skcipher(algo, 0, 0);
> + if (IS_ERR(ctx->tfm)) {
> + *err = PTR_ERR(ctx->tfm);
> + ctx->tfm = NULL;
> + goto err;
> + }
> +
> + *err = crypto_sync_skcipher_setkey(ctx->tfm, __bpf_dynptr_data_ptr(pkey),
> + __bpf_dynptr_size(pkey));
> + if (*err)
> + goto err;
> +
> + refcount_set(&ctx->usage, 1);
> +
> + return ctx;
> +err:
> + if (ctx->tfm)
> + crypto_free_sync_skcipher(ctx->tfm);
> + kfree(ctx);
> +
> + return NULL;
> +}
> +
> +static void crypto_free_sync_skcipher_cb(struct rcu_head *head)
> +{
> + struct bpf_crypto_skcipher_ctx *ctx;
> +
> + ctx = container_of(head, struct bpf_crypto_skcipher_ctx, rcu);
> + crypto_free_sync_skcipher(ctx->tfm);
> + kfree(ctx);
> +}
> +
> +/**
> + * bpf_crypto_skcipher_ctx_acquire() - Acquire a reference to a BPF crypto context.
> + * @ctx: The BPF crypto context being acquired. The ctx must be a trusted
> + * pointer.
> + *
> + * Acquires a reference to a BPF crypto context. The context returned by this function
> + * must either be embedded in a map as a kptr, or freed with
> + * bpf_crypto_skcipher_ctx_release().
> + */
> +__bpf_kfunc struct bpf_crypto_skcipher_ctx *
> +bpf_crypto_skcipher_ctx_acquire(struct bpf_crypto_skcipher_ctx *ctx)
> +{
> + refcount_inc(&ctx->usage);
> + return ctx;
> +}
> +
> +/**
> + * bpf_crypto_skcipher_ctx_release() - Release a previously acquired BPF crypto context.
> + * @ctx: The crypto context being released.
> + *
> + * Releases a previously acquired reference to a BPF cpumask. When the final
> + * reference of the BPF cpumask has been released, it is subsequently freed in
> + * an RCU callback in the BPF memory allocator.
> + */
> +__bpf_kfunc void bpf_crypto_skcipher_ctx_release(struct bpf_crypto_skcipher_ctx *ctx)
> +{
> + if (refcount_dec_and_test(&ctx->usage))
> + call_rcu(&ctx->rcu, crypto_free_sync_skcipher_cb);
> +}
> +
> +static int bpf_crypto_skcipher_crypt(struct crypto_sync_skcipher *tfm,
> + const struct bpf_dynptr_kern *src,
> + struct bpf_dynptr_kern *dst,
> + const struct bpf_dynptr_kern *iv,
> + bool decrypt)
> +{
> + struct skcipher_request *req = NULL;
> + struct scatterlist sgin, sgout;
> + int err;
> +
> + if (crypto_sync_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
> + return -EINVAL;
> +
> + if (__bpf_dynptr_is_rdonly(dst))
> + return -EINVAL;
> +
> + if (!__bpf_dynptr_size(dst) || !__bpf_dynptr_size(src))
> + return -EINVAL;
> +
> + if (__bpf_dynptr_size(iv) != crypto_sync_skcipher_ivsize(tfm))
> + return -EINVAL;
> +
> + req = skcipher_request_alloc(&tfm->base, GFP_ATOMIC);
> + if (!req)
> + return -ENOMEM;
> +
> + sg_init_one(&sgin, __bpf_dynptr_data_ptr(src), __bpf_dynptr_size(src));
> + sg_init_one(&sgout, __bpf_dynptr_data_ptr(dst), __bpf_dynptr_size(dst));
> +
> + skcipher_request_set_crypt(req, &sgin, &sgout, __bpf_dynptr_size(src),
> + __bpf_dynptr_data_ptr(iv));
> +
> + err = decrypt ? crypto_skcipher_decrypt(req) : crypto_skcipher_encrypt(req);
> +
> + skcipher_request_free(req);
> +
> + return err;
> +}
> +
> +/**
> + * bpf_crypto_skcipher_decrypt() - Decrypt buffer using configured context and IV provided.
> + * @ctx: The crypto context being used. The ctx must be a trusted pointer.
> + * @src: bpf_dynptr to the encrypted data. Must be a trusted pointer.
> + * @dst: bpf_dynptr to the buffer where to store the result. Must be a trusted pointer.
> + * @iv: bpf_dynptr to IV data to be used by decryptor.
> + *
> + * Decrypts provided buffer using IV data and the crypto context. Crypto context must be configured.
> + */
> +__bpf_kfunc int bpf_crypto_skcipher_decrypt(struct bpf_crypto_skcipher_ctx *ctx,
> + const struct bpf_dynptr_kern *src,
> + struct bpf_dynptr_kern *dst,
> + const struct bpf_dynptr_kern *iv)
> +{
> + return bpf_crypto_skcipher_crypt(ctx->tfm, src, dst, iv, true);
> +}
> +
> +/**
> + * bpf_crypto_skcipher_encrypt() - Encrypt buffer using configured context and IV provided.
> + * @ctx: The crypto context being used. The ctx must be a trusted pointer.
> + * @src: bpf_dynptr to the plain data. Must be a trusted pointer.
> + * @dst: bpf_dynptr to buffer where to store the result. Must be a trusted pointer.
> + * @iv: bpf_dynptr to IV data to be used by decryptor.
> + *
> + * Encrypts provided buffer using IV data and the crypto context. Crypto context must be configured.
> + */
> +__bpf_kfunc int bpf_crypto_skcipher_encrypt(struct bpf_crypto_skcipher_ctx *ctx,
> + const struct bpf_dynptr_kern *src,
> + struct bpf_dynptr_kern *dst,
> + const struct bpf_dynptr_kern *iv)
> +{
> + return bpf_crypto_skcipher_crypt(ctx->tfm, src, dst, iv, false);
> +}
> +
> +__diag_pop();
> +
> +BTF_SET8_START(crypt_skcipher_init_kfunc_btf_ids)
> +BTF_ID_FLAGS(func, bpf_crypto_skcipher_ctx_create, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
> +BTF_ID_FLAGS(func, bpf_crypto_skcipher_ctx_release, KF_RELEASE)
> +BTF_ID_FLAGS(func, bpf_crypto_skcipher_ctx_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS)
> +BTF_SET8_END(crypt_skcipher_init_kfunc_btf_ids)
> +
> +static const struct btf_kfunc_id_set crypt_skcipher_init_kfunc_set = {
> + .owner = THIS_MODULE,
> + .set = &crypt_skcipher_init_kfunc_btf_ids,
> +};
> +
> +BTF_SET8_START(crypt_skcipher_kfunc_btf_ids)
> +BTF_ID_FLAGS(func, bpf_crypto_skcipher_decrypt, KF_RCU)
> +BTF_ID_FLAGS(func, bpf_crypto_skcipher_encrypt, KF_RCU)
> +BTF_SET8_END(crypt_skcipher_kfunc_btf_ids)
> +
> +static const struct btf_kfunc_id_set crypt_skcipher_kfunc_set = {
> + .owner = THIS_MODULE,
> + .set = &crypt_skcipher_kfunc_btf_ids,
> +};
> +
> +BTF_ID_LIST(crypto_skcipher_dtor_ids)
> +BTF_ID(struct, bpf_crypto_skcipher_ctx)
> +BTF_ID(func, bpf_crypto_skcipher_ctx_release)
> +
> +static int __init crypto_skcipher_kfunc_init(void)
> +{
> + int ret;
> + const struct btf_id_dtor_kfunc crypto_skcipher_dtors[] = {
> + {
> + .btf_id = crypto_skcipher_dtor_ids[0],
> + .kfunc_btf_id = crypto_skcipher_dtor_ids[1]
> + },
> + };
> +
> + ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &crypt_skcipher_kfunc_set);
> + ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_ACT, &crypt_skcipher_kfunc_set);
> + ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &crypt_skcipher_kfunc_set);
> + ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC,
> + &crypt_skcipher_init_kfunc_set);
> + return ret ?: register_btf_id_dtor_kfuncs(crypto_skcipher_dtors,
> + ARRAY_SIZE(crypto_skcipher_dtors),
> + THIS_MODULE);
> +}
> +
> +late_initcall(crypto_skcipher_kfunc_init);
> diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
> index 62a53ebfedf9..2020884fca3d 100644
> --- a/kernel/bpf/helpers.c
> +++ b/kernel/bpf/helpers.c
> @@ -1429,7 +1429,7 @@ static const struct bpf_func_proto bpf_kptr_xchg_proto = {
> #define DYNPTR_SIZE_MASK 0xFFFFFF
> #define DYNPTR_RDONLY_BIT BIT(31)
>
> -static bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr)
> +bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr)
> {
> return ptr->size & DYNPTR_RDONLY_BIT;
> }
> @@ -1444,7 +1444,7 @@ static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_typ
> ptr->size |= type << DYNPTR_TYPE_SHIFT;
> }
>
> -static enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr)
> +enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr)
> {
> return (ptr->size & ~(DYNPTR_RDONLY_BIT)) >> DYNPTR_TYPE_SHIFT;
> }
> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> index bb58987e4844..75d2f47ca3cb 100644
> --- a/kernel/bpf/verifier.c
> +++ b/kernel/bpf/verifier.c
> @@ -5184,6 +5184,7 @@ BTF_ID(struct, prog_test_ref_kfunc)
> BTF_ID(struct, cgroup)
> BTF_ID(struct, bpf_cpumask)
> BTF_ID(struct, task_struct)
> +BTF_ID(struct, bpf_crypto_skcipher_ctx)
> BTF_SET_END(rcu_protected_types)
>
> static bool rcu_protected_object(const struct btf *btf, u32 btf_id)
> --
> 2.39.3
>
>

2023-10-31 16:06:11

by Vadim Fedorenko

[permalink] [raw]
Subject: Re: [PATCH bpf-next v3 1/2] bpf: add skcipher API support to TC/XDP programs

On 31/10/2023 15:46, Jiri Olsa wrote:
> On Tue, Oct 31, 2023 at 06:48:59AM -0700, Vadim Fedorenko wrote:
>
> SNIP
>
>> diff --git a/kernel/bpf/crypto.c b/kernel/bpf/crypto.c
>> new file mode 100644
>> index 000000000000..c2a0703934fc
>> --- /dev/null
>> +++ b/kernel/bpf/crypto.c
>> @@ -0,0 +1,280 @@
>> +// SPDX-License-Identifier: GPL-2.0-only
>> +/* Copyright (c) 2023 Meta, Inc */
>> +#include <linux/bpf.h>
>> +#include <linux/bpf_mem_alloc.h>
>> +#include <linux/btf.h>
>> +#include <linux/btf_ids.h>
>> +#include <linux/filter.h>
>> +#include <linux/scatterlist.h>
>> +#include <linux/skbuff.h>
>> +#include <crypto/skcipher.h>
>> +
>> +/**
>> + * struct bpf_crypto_skcipher_ctx - refcounted BPF sync skcipher context structure
>> + * @tfm: The pointer to crypto_sync_skcipher struct.
>> + * @rcu: The RCU head used to free the crypto context with RCU safety.
>> + * @usage: Object reference counter. When the refcount goes to 0, the
>> + * memory is released back to the BPF allocator, which provides
>> + * RCU safety.
>> + */
>> +struct bpf_crypto_skcipher_ctx {
>> + struct crypto_sync_skcipher *tfm;
>> + struct rcu_head rcu;
>> + refcount_t usage;
>> +};
>> +
>> +__diag_push();
>> +__diag_ignore_all("-Wmissing-prototypes",
>> + "Global kfuncs as their definitions will be in BTF");
>> +
>
> hi,
> just a heads up.. there's [1] change adding macro for this, it's not
> merged yet, but will probably get in soon
>
> jirka
>
> [1] https://lore.kernel.org/bpf/[email protected]/
>

Hi Jiri,

Thanks for heads up, I'll adjust the patchset when macro patch is merged.


>> +static void *__bpf_dynptr_data_ptr(const struct bpf_dynptr_kern *ptr)
>> +{
>> + enum bpf_dynptr_type type;
>> +
>> + if (!ptr->data)
>> + return NULL;
>> +
>> + type = bpf_dynptr_get_type(ptr);
>> +
>> + switch (type) {
>> + case BPF_DYNPTR_TYPE_LOCAL:
>> + case BPF_DYNPTR_TYPE_RINGBUF:
>> + return ptr->data + ptr->offset;
>> + case BPF_DYNPTR_TYPE_SKB:
>> + return skb_pointer_if_linear(ptr->data, ptr->offset, __bpf_dynptr_size(ptr));
>> + case BPF_DYNPTR_TYPE_XDP:
>> + {
>> + void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset, __bpf_dynptr_size(ptr));
>> + if (!IS_ERR_OR_NULL(xdp_ptr))
>> + return xdp_ptr;
>> +
>> + return NULL;
>> + }
>> + default:
>> + WARN_ONCE(true, "unknown dynptr type %d\n", type);
>> + return NULL;
>> + }
>> +}
>> +
>> +/**
>> + * bpf_crypto_skcipher_ctx_create() - Create a mutable BPF crypto context.
>> + *
>> + * Allocates a crypto context that can be used, acquired, and released by
>> + * a BPF program. The crypto context returned by this function must either
>> + * be embedded in a map as a kptr, or freed with bpf_crypto_skcipher_ctx_release().
>> + *
>> + * bpf_crypto_skcipher_ctx_create() allocates memory using the BPF memory
>> + * allocator, and will not block. It may return NULL if no memory is available.
>> + * @algo: bpf_dynptr which holds string representation of algorithm.
>> + * @key: bpf_dynptr which holds cipher key to do crypto.
>> + */
>> +__bpf_kfunc struct bpf_crypto_skcipher_ctx *
>> +bpf_crypto_skcipher_ctx_create(const struct bpf_dynptr_kern *palgo,
>> + const struct bpf_dynptr_kern *pkey, int *err)
>> +{
>> + struct bpf_crypto_skcipher_ctx *ctx;
>> + char *algo;
>> +
>> + if (__bpf_dynptr_size(palgo) > CRYPTO_MAX_ALG_NAME) {
>> + *err = -EINVAL;
>> + return NULL;
>> + }
>> +
>> + algo = __bpf_dynptr_data_ptr(palgo);
>> +
>> + if (!crypto_has_skcipher(algo, CRYPTO_ALG_TYPE_SKCIPHER, CRYPTO_ALG_TYPE_MASK)) {
>> + *err = -EOPNOTSUPP;
>> + return NULL;
>> + }
>> +
>> + ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
>> + if (!ctx) {
>> + *err = -ENOMEM;
>> + return NULL;
>> + }
>> +
>> + memset(ctx, 0, sizeof(*ctx));
>> +
>> + ctx->tfm = crypto_alloc_sync_skcipher(algo, 0, 0);
>> + if (IS_ERR(ctx->tfm)) {
>> + *err = PTR_ERR(ctx->tfm);
>> + ctx->tfm = NULL;
>> + goto err;
>> + }
>> +
>> + *err = crypto_sync_skcipher_setkey(ctx->tfm, __bpf_dynptr_data_ptr(pkey),
>> + __bpf_dynptr_size(pkey));
>> + if (*err)
>> + goto err;
>> +
>> + refcount_set(&ctx->usage, 1);
>> +
>> + return ctx;
>> +err:
>> + if (ctx->tfm)
>> + crypto_free_sync_skcipher(ctx->tfm);
>> + kfree(ctx);
>> +
>> + return NULL;
>> +}
>> +
>> +static void crypto_free_sync_skcipher_cb(struct rcu_head *head)
>> +{
>> + struct bpf_crypto_skcipher_ctx *ctx;
>> +
>> + ctx = container_of(head, struct bpf_crypto_skcipher_ctx, rcu);
>> + crypto_free_sync_skcipher(ctx->tfm);
>> + kfree(ctx);
>> +}
>> +
>> +/**
>> + * bpf_crypto_skcipher_ctx_acquire() - Acquire a reference to a BPF crypto context.
>> + * @ctx: The BPF crypto context being acquired. The ctx must be a trusted
>> + * pointer.
>> + *
>> + * Acquires a reference to a BPF crypto context. The context returned by this function
>> + * must either be embedded in a map as a kptr, or freed with
>> + * bpf_crypto_skcipher_ctx_release().
>> + */
>> +__bpf_kfunc struct bpf_crypto_skcipher_ctx *
>> +bpf_crypto_skcipher_ctx_acquire(struct bpf_crypto_skcipher_ctx *ctx)
>> +{
>> + refcount_inc(&ctx->usage);
>> + return ctx;
>> +}
>> +
>> +/**
>> + * bpf_crypto_skcipher_ctx_release() - Release a previously acquired BPF crypto context.
>> + * @ctx: The crypto context being released.
>> + *
>> + * Releases a previously acquired reference to a BPF cpumask. When the final
>> + * reference of the BPF cpumask has been released, it is subsequently freed in
>> + * an RCU callback in the BPF memory allocator.
>> + */
>> +__bpf_kfunc void bpf_crypto_skcipher_ctx_release(struct bpf_crypto_skcipher_ctx *ctx)
>> +{
>> + if (refcount_dec_and_test(&ctx->usage))
>> + call_rcu(&ctx->rcu, crypto_free_sync_skcipher_cb);
>> +}
>> +
>> +static int bpf_crypto_skcipher_crypt(struct crypto_sync_skcipher *tfm,
>> + const struct bpf_dynptr_kern *src,
>> + struct bpf_dynptr_kern *dst,
>> + const struct bpf_dynptr_kern *iv,
>> + bool decrypt)
>> +{
>> + struct skcipher_request *req = NULL;
>> + struct scatterlist sgin, sgout;
>> + int err;
>> +
>> + if (crypto_sync_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
>> + return -EINVAL;
>> +
>> + if (__bpf_dynptr_is_rdonly(dst))
>> + return -EINVAL;
>> +
>> + if (!__bpf_dynptr_size(dst) || !__bpf_dynptr_size(src))
>> + return -EINVAL;
>> +
>> + if (__bpf_dynptr_size(iv) != crypto_sync_skcipher_ivsize(tfm))
>> + return -EINVAL;
>> +
>> + req = skcipher_request_alloc(&tfm->base, GFP_ATOMIC);
>> + if (!req)
>> + return -ENOMEM;
>> +
>> + sg_init_one(&sgin, __bpf_dynptr_data_ptr(src), __bpf_dynptr_size(src));
>> + sg_init_one(&sgout, __bpf_dynptr_data_ptr(dst), __bpf_dynptr_size(dst));
>> +
>> + skcipher_request_set_crypt(req, &sgin, &sgout, __bpf_dynptr_size(src),
>> + __bpf_dynptr_data_ptr(iv));
>> +
>> + err = decrypt ? crypto_skcipher_decrypt(req) : crypto_skcipher_encrypt(req);
>> +
>> + skcipher_request_free(req);
>> +
>> + return err;
>> +}
>> +
>> +/**
>> + * bpf_crypto_skcipher_decrypt() - Decrypt buffer using configured context and IV provided.
>> + * @ctx: The crypto context being used. The ctx must be a trusted pointer.
>> + * @src: bpf_dynptr to the encrypted data. Must be a trusted pointer.
>> + * @dst: bpf_dynptr to the buffer where to store the result. Must be a trusted pointer.
>> + * @iv: bpf_dynptr to IV data to be used by decryptor.
>> + *
>> + * Decrypts provided buffer using IV data and the crypto context. Crypto context must be configured.
>> + */
>> +__bpf_kfunc int bpf_crypto_skcipher_decrypt(struct bpf_crypto_skcipher_ctx *ctx,
>> + const struct bpf_dynptr_kern *src,
>> + struct bpf_dynptr_kern *dst,
>> + const struct bpf_dynptr_kern *iv)
>> +{
>> + return bpf_crypto_skcipher_crypt(ctx->tfm, src, dst, iv, true);
>> +}
>> +
>> +/**
>> + * bpf_crypto_skcipher_encrypt() - Encrypt buffer using configured context and IV provided.
>> + * @ctx: The crypto context being used. The ctx must be a trusted pointer.
>> + * @src: bpf_dynptr to the plain data. Must be a trusted pointer.
>> + * @dst: bpf_dynptr to buffer where to store the result. Must be a trusted pointer.
>> + * @iv: bpf_dynptr to IV data to be used by decryptor.
>> + *
>> + * Encrypts provided buffer using IV data and the crypto context. Crypto context must be configured.
>> + */
>> +__bpf_kfunc int bpf_crypto_skcipher_encrypt(struct bpf_crypto_skcipher_ctx *ctx,
>> + const struct bpf_dynptr_kern *src,
>> + struct bpf_dynptr_kern *dst,
>> + const struct bpf_dynptr_kern *iv)
>> +{
>> + return bpf_crypto_skcipher_crypt(ctx->tfm, src, dst, iv, false);
>> +}
>> +
>> +__diag_pop();
>> +
>> +BTF_SET8_START(crypt_skcipher_init_kfunc_btf_ids)
>> +BTF_ID_FLAGS(func, bpf_crypto_skcipher_ctx_create, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
>> +BTF_ID_FLAGS(func, bpf_crypto_skcipher_ctx_release, KF_RELEASE)
>> +BTF_ID_FLAGS(func, bpf_crypto_skcipher_ctx_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS)
>> +BTF_SET8_END(crypt_skcipher_init_kfunc_btf_ids)
>> +
>> +static const struct btf_kfunc_id_set crypt_skcipher_init_kfunc_set = {
>> + .owner = THIS_MODULE,
>> + .set = &crypt_skcipher_init_kfunc_btf_ids,
>> +};
>> +
>> +BTF_SET8_START(crypt_skcipher_kfunc_btf_ids)
>> +BTF_ID_FLAGS(func, bpf_crypto_skcipher_decrypt, KF_RCU)
>> +BTF_ID_FLAGS(func, bpf_crypto_skcipher_encrypt, KF_RCU)
>> +BTF_SET8_END(crypt_skcipher_kfunc_btf_ids)
>> +
>> +static const struct btf_kfunc_id_set crypt_skcipher_kfunc_set = {
>> + .owner = THIS_MODULE,
>> + .set = &crypt_skcipher_kfunc_btf_ids,
>> +};
>> +
>> +BTF_ID_LIST(crypto_skcipher_dtor_ids)
>> +BTF_ID(struct, bpf_crypto_skcipher_ctx)
>> +BTF_ID(func, bpf_crypto_skcipher_ctx_release)
>> +
>> +static int __init crypto_skcipher_kfunc_init(void)
>> +{
>> + int ret;
>> + const struct btf_id_dtor_kfunc crypto_skcipher_dtors[] = {
>> + {
>> + .btf_id = crypto_skcipher_dtor_ids[0],
>> + .kfunc_btf_id = crypto_skcipher_dtor_ids[1]
>> + },
>> + };
>> +
>> + ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &crypt_skcipher_kfunc_set);
>> + ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_ACT, &crypt_skcipher_kfunc_set);
>> + ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &crypt_skcipher_kfunc_set);
>> + ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC,
>> + &crypt_skcipher_init_kfunc_set);
>> + return ret ?: register_btf_id_dtor_kfuncs(crypto_skcipher_dtors,
>> + ARRAY_SIZE(crypto_skcipher_dtors),
>> + THIS_MODULE);
>> +}
>> +
>> +late_initcall(crypto_skcipher_kfunc_init);
>> diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
>> index 62a53ebfedf9..2020884fca3d 100644
>> --- a/kernel/bpf/helpers.c
>> +++ b/kernel/bpf/helpers.c
>> @@ -1429,7 +1429,7 @@ static const struct bpf_func_proto bpf_kptr_xchg_proto = {
>> #define DYNPTR_SIZE_MASK 0xFFFFFF
>> #define DYNPTR_RDONLY_BIT BIT(31)
>>
>> -static bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr)
>> +bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr)
>> {
>> return ptr->size & DYNPTR_RDONLY_BIT;
>> }
>> @@ -1444,7 +1444,7 @@ static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_typ
>> ptr->size |= type << DYNPTR_TYPE_SHIFT;
>> }
>>
>> -static enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr)
>> +enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr)
>> {
>> return (ptr->size & ~(DYNPTR_RDONLY_BIT)) >> DYNPTR_TYPE_SHIFT;
>> }
>> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
>> index bb58987e4844..75d2f47ca3cb 100644
>> --- a/kernel/bpf/verifier.c
>> +++ b/kernel/bpf/verifier.c
>> @@ -5184,6 +5184,7 @@ BTF_ID(struct, prog_test_ref_kfunc)
>> BTF_ID(struct, cgroup)
>> BTF_ID(struct, bpf_cpumask)
>> BTF_ID(struct, task_struct)
>> +BTF_ID(struct, bpf_crypto_skcipher_ctx)
>> BTF_SET_END(rcu_protected_types)
>>
>> static bool rcu_protected_object(const struct btf *btf, u32 btf_id)
>> --
>> 2.39.3
>>
>>

2023-11-01 21:56:00

by Martin KaFai Lau

[permalink] [raw]
Subject: Re: [PATCH bpf-next v3 1/2] bpf: add skcipher API support to TC/XDP programs

On 10/31/23 6:48 AM, Vadim Fedorenko wrote:
> --- /dev/null
> +++ b/kernel/bpf/crypto.c
> @@ -0,0 +1,280 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +/* Copyright (c) 2023 Meta, Inc */
> +#include <linux/bpf.h>
> +#include <linux/bpf_mem_alloc.h>
> +#include <linux/btf.h>
> +#include <linux/btf_ids.h>
> +#include <linux/filter.h>
> +#include <linux/scatterlist.h>
> +#include <linux/skbuff.h>
> +#include <crypto/skcipher.h>
> +
> +/**
> + * struct bpf_crypto_skcipher_ctx - refcounted BPF sync skcipher context structure
> + * @tfm: The pointer to crypto_sync_skcipher struct.
> + * @rcu: The RCU head used to free the crypto context with RCU safety.
> + * @usage: Object reference counter. When the refcount goes to 0, the
> + * memory is released back to the BPF allocator, which provides
> + * RCU safety.
> + */
> +struct bpf_crypto_skcipher_ctx {
> + struct crypto_sync_skcipher *tfm;
> + struct rcu_head rcu;
> + refcount_t usage;
> +};
> +
> +__diag_push();
> +__diag_ignore_all("-Wmissing-prototypes",
> + "Global kfuncs as their definitions will be in BTF");
> +
> +static void *__bpf_dynptr_data_ptr(const struct bpf_dynptr_kern *ptr)
> +{
> + enum bpf_dynptr_type type;
> +
> + if (!ptr->data)
> + return NULL;
> +
> + type = bpf_dynptr_get_type(ptr);
> +
> + switch (type) {
> + case BPF_DYNPTR_TYPE_LOCAL:
> + case BPF_DYNPTR_TYPE_RINGBUF:
> + return ptr->data + ptr->offset;
> + case BPF_DYNPTR_TYPE_SKB:
> + return skb_pointer_if_linear(ptr->data, ptr->offset, __bpf_dynptr_size(ptr));
> + case BPF_DYNPTR_TYPE_XDP:
> + {
> + void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset, __bpf_dynptr_size(ptr));

I suspect what it is doing here (for skb and xdp in particular) is very similar
to bpf_dynptr_slice. Please check if bpf_dynptr_slice(ptr, 0, NULL, sz) will work.


> + if (!IS_ERR_OR_NULL(xdp_ptr))
> + return xdp_ptr;
> +
> + return NULL;
> + }
> + default:
> + WARN_ONCE(true, "unknown dynptr type %d\n", type);
> + return NULL;
> + }
> +}
> +
> +/**
> + * bpf_crypto_skcipher_ctx_create() - Create a mutable BPF crypto context.
> + *
> + * Allocates a crypto context that can be used, acquired, and released by
> + * a BPF program. The crypto context returned by this function must either
> + * be embedded in a map as a kptr, or freed with bpf_crypto_skcipher_ctx_release().
> + *
> + * bpf_crypto_skcipher_ctx_create() allocates memory using the BPF memory
> + * allocator, and will not block. It may return NULL if no memory is available.
> + * @algo: bpf_dynptr which holds string representation of algorithm.
> + * @key: bpf_dynptr which holds cipher key to do crypto.
> + */
> +__bpf_kfunc struct bpf_crypto_skcipher_ctx *
> +bpf_crypto_skcipher_ctx_create(const struct bpf_dynptr_kern *palgo,

Song's patch on __const_str should help on the palgo (which is a string) also:
https://lore.kernel.org/bpf/[email protected]/

> + const struct bpf_dynptr_kern *pkey, int *err)
> +{
> + struct bpf_crypto_skcipher_ctx *ctx;
> + char *algo;
> +
> + if (__bpf_dynptr_size(palgo) > CRYPTO_MAX_ALG_NAME) {
> + *err = -EINVAL;
> + return NULL;
> + }
> +
> + algo = __bpf_dynptr_data_ptr(palgo);
> +
> + if (!crypto_has_skcipher(algo, CRYPTO_ALG_TYPE_SKCIPHER, CRYPTO_ALG_TYPE_MASK)) {
> + *err = -EOPNOTSUPP;
> + return NULL;
> + }
> +
> + ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
> + if (!ctx) {
> + *err = -ENOMEM;
> + return NULL;
> + }
> +
> + memset(ctx, 0, sizeof(*ctx));

nit. kzalloc()

> +
> + ctx->tfm = crypto_alloc_sync_skcipher(algo, 0, 0);
> + if (IS_ERR(ctx->tfm)) {
> + *err = PTR_ERR(ctx->tfm);
> + ctx->tfm = NULL;
> + goto err;
> + }
> +
> + *err = crypto_sync_skcipher_setkey(ctx->tfm, __bpf_dynptr_data_ptr(pkey),
> + __bpf_dynptr_size(pkey));
> + if (*err)
> + goto err;
> +
> + refcount_set(&ctx->usage, 1);
> +
> + return ctx;
> +err:
> + if (ctx->tfm)
> + crypto_free_sync_skcipher(ctx->tfm);
> + kfree(ctx);
> +
> + return NULL;
> +}

[ ... ]

> +static int bpf_crypto_skcipher_crypt(struct crypto_sync_skcipher *tfm,
> + const struct bpf_dynptr_kern *src,
> + struct bpf_dynptr_kern *dst,
> + const struct bpf_dynptr_kern *iv,
> + bool decrypt)
> +{
> + struct skcipher_request *req = NULL;
> + struct scatterlist sgin, sgout;
> + int err;
> +
> + if (crypto_sync_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
> + return -EINVAL;
> +
> + if (__bpf_dynptr_is_rdonly(dst))
> + return -EINVAL;
> +
> + if (!__bpf_dynptr_size(dst) || !__bpf_dynptr_size(src))
> + return -EINVAL;
> +
> + if (__bpf_dynptr_size(iv) != crypto_sync_skcipher_ivsize(tfm))
> + return -EINVAL;
> +
> + req = skcipher_request_alloc(&tfm->base, GFP_ATOMIC);

Doing alloc per packet may kill performance. Is it possible to optimize it
somehow? What is the usual size of the req (e.g. the example in the selftest)?

> + if (!req)
> + return -ENOMEM;
> +
> + sg_init_one(&sgin, __bpf_dynptr_data_ptr(src), __bpf_dynptr_size(src));
> + sg_init_one(&sgout, __bpf_dynptr_data_ptr(dst), __bpf_dynptr_size(dst));
> +
> + skcipher_request_set_crypt(req, &sgin, &sgout, __bpf_dynptr_size(src),
> + __bpf_dynptr_data_ptr(iv));
> +
> + err = decrypt ? crypto_skcipher_decrypt(req) : crypto_skcipher_encrypt(req);

I am hitting this with the selftest in patch 2:

[ 39.806675] =============================
[ 39.807243] WARNING: suspicious RCU usage
[ 39.807825] 6.6.0-rc7-02091-g17e023652cc1 #336 Tainted: G O
[ 39.808798] -----------------------------
[ 39.809368] kernel/sched/core.c:10149 Illegal context switch in RCU-bh
read-side critical section!
[ 39.810589]
[ 39.810589] other info that might help us debug this:
[ 39.810589]
[ 39.811696]
[ 39.811696] rcu_scheduler_active = 2, debug_locks = 1
[ 39.812616] 2 locks held by test_progs/1769:
[ 39.813249] #0: ffffffff84dce140 (rcu_read_lock){....}-{1:3}, at:
ip6_finish_output2+0x625/0x1b70
[ 39.814539] #1: ffffffff84dce1a0 (rcu_read_lock_bh){....}-{1:3}, at:
__dev_queue_xmit+0x1df/0x2c40
[ 39.815813]
[ 39.815813] stack backtrace:
[ 39.816441] CPU: 1 PID: 1769 Comm: test_progs Tainted: G O
6.6.0-rc7-02091-g17e023652cc1 #336
[ 39.817774] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS
rel-1.14.0-0-g155821a1990b-prebuilt.qemu.org 04/01/2014
[ 39.819312] Call Trace:
[ 39.819655] <TASK>
[ 39.819967] dump_stack_lvl+0x130/0x1d0
[ 39.822669] dump_stack+0x14/0x20
[ 39.823136] lockdep_rcu_suspicious+0x220/0x350
[ 39.823777] __might_resched+0xe0/0x660
[ 39.827915] __might_sleep+0x89/0xf0
[ 39.828423] skcipher_walk_virt+0x55/0x120
[ 39.828990] crypto_ecb_decrypt+0x159/0x310
[ 39.833846] crypto_skcipher_decrypt+0xa0/0xd0
[ 39.834481] bpf_crypto_skcipher_crypt+0x29a/0x3c0
[ 39.837100] bpf_crypto_skcipher_decrypt+0x56/0x70
[ 39.837770] bpf_prog_fa576505ab32d186_decrypt_sanity+0x180/0x185
[ 39.838627] cls_bpf_classify+0x3b6/0xf80
[ 39.839807] tcf_classify+0x2f4/0x550

> +
> + skcipher_request_free(req);
> +
> + return err;
> +}
> +


2023-11-01 22:54:32

by Martin KaFai Lau

[permalink] [raw]
Subject: Re: [PATCH bpf-next v3 2/2] selftests: bpf: crypto skcipher algo selftests

On 10/31/23 6:49 AM, Vadim Fedorenko wrote:
> Add simple tc hook selftests to show the way to work with new crypto
> BPF API. Some weird structre and map are added to setup program to make
> verifier happy about dynptr initialization from memory. Simple AES-ECB
> algo is used to demonstrate encryption and decryption of fixed size
> buffers.
>
> Signed-off-by: Vadim Fedorenko <[email protected]>
> ---
> v2 -> v3:
> - disable tests on s390 and aarch64 because of unknown Fatal exception
> in sg_init_one
> v1 -> v2:
> - add CONFIG_CRYPTO_AES and CONFIG_CRYPTO_ECB to selftest build config
> suggested by Daniel
>
> kernel/bpf/crypto.c | 5 +-
> tools/testing/selftests/bpf/DENYLIST.aarch64 | 1 +
> tools/testing/selftests/bpf/DENYLIST.s390x | 1 +
> tools/testing/selftests/bpf/config | 3 +
> .../selftests/bpf/prog_tests/crypto_sanity.c | 129 +++++++++++++++
> .../selftests/bpf/progs/crypto_common.h | 103 ++++++++++++
> .../selftests/bpf/progs/crypto_sanity.c | 154 ++++++++++++++++++
> 7 files changed, 394 insertions(+), 2 deletions(-)
> create mode 100644 tools/testing/selftests/bpf/prog_tests/crypto_sanity.c
> create mode 100644 tools/testing/selftests/bpf/progs/crypto_common.h
> create mode 100644 tools/testing/selftests/bpf/progs/crypto_sanity.c
>
> diff --git a/kernel/bpf/crypto.c b/kernel/bpf/crypto.c
> index c2a0703934fc..4ee6193165ca 100644
> --- a/kernel/bpf/crypto.c
> +++ b/kernel/bpf/crypto.c
> @@ -65,8 +65,9 @@ static void *__bpf_dynptr_data_ptr(const struct bpf_dynptr_kern *ptr)
> *
> * bpf_crypto_skcipher_ctx_create() allocates memory using the BPF memory
> * allocator, and will not block. It may return NULL if no memory is available.
> - * @algo: bpf_dynptr which holds string representation of algorithm.
> - * @key: bpf_dynptr which holds cipher key to do crypto.
> + * @palgo: bpf_dynptr which holds string representation of algorithm.
> + * @pkey: bpf_dynptr which holds cipher key to do crypto.
> + * @err: integer to store error code when NULL is returned
> */
> __bpf_kfunc struct bpf_crypto_skcipher_ctx *
> bpf_crypto_skcipher_ctx_create(const struct bpf_dynptr_kern *palgo,
> diff --git a/tools/testing/selftests/bpf/DENYLIST.aarch64 b/tools/testing/selftests/bpf/DENYLIST.aarch64
> index 5c2cc7e8c5d0..72c7ef3e5872 100644
> --- a/tools/testing/selftests/bpf/DENYLIST.aarch64
> +++ b/tools/testing/selftests/bpf/DENYLIST.aarch64
> @@ -1,5 +1,6 @@
> bpf_cookie/multi_kprobe_attach_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3
> bpf_cookie/multi_kprobe_link_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3
> +crypto_sanity # sg_init_one has exception on aarch64

What is the exception? Is arm64 just lacking the support?

> exceptions # JIT does not support calling kfunc bpf_throw: -524
> fexit_sleep # The test never returns. The remaining tests cannot start.
> kprobe_multi_bench_attach # needs CONFIG_FPROBE
> diff --git a/tools/testing/selftests/bpf/DENYLIST.s390x b/tools/testing/selftests/bpf/DENYLIST.s390x
> index 1a63996c0304..8ab7485bedb1 100644
> --- a/tools/testing/selftests/bpf/DENYLIST.s390x
> +++ b/tools/testing/selftests/bpf/DENYLIST.s390x
> @@ -1,5 +1,6 @@
> # TEMPORARY
> # Alphabetical order
> +crypto_sanity # sg_init_one has exception on s390 (exceptions)
> exceptions # JIT does not support calling kfunc bpf_throw (exceptions)
> get_stack_raw_tp # user_stack corrupted user stack (no backchain userspace)
> stacktrace_build_id # compare_map_keys stackid_hmap vs. stackmap err -2 errno 2 (?)
> diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
> index 02dd4409200e..48b570fd1752 100644
> --- a/tools/testing/selftests/bpf/config
> +++ b/tools/testing/selftests/bpf/config
> @@ -14,6 +14,9 @@ CONFIG_CGROUP_BPF=y
> CONFIG_CRYPTO_HMAC=y
> CONFIG_CRYPTO_SHA256=y
> CONFIG_CRYPTO_USER_API_HASH=y
> +CONFIG_CRYPTO_SKCIPHER=y
> +CONFIG_CRYPTO_ECB=y
> +CONFIG_CRYPTO_AES=y
> CONFIG_DEBUG_INFO=y
> CONFIG_DEBUG_INFO_BTF=y
> CONFIG_DEBUG_INFO_DWARF4=y
> diff --git a/tools/testing/selftests/bpf/prog_tests/crypto_sanity.c b/tools/testing/selftests/bpf/prog_tests/crypto_sanity.c
> new file mode 100644
> index 000000000000..a43969da6d15
> --- /dev/null
> +++ b/tools/testing/selftests/bpf/prog_tests/crypto_sanity.c
> @@ -0,0 +1,129 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */

s/2022/2023/ :)

> +
> +#include <sys/types.h>
> +#include <sys/socket.h>
> +#include <net/if.h>
> +#include <linux/in6.h>
> +
> +#include "test_progs.h"
> +#include "network_helpers.h"
> +#include "crypto_sanity.skel.h"
> +
> +#define NS_TEST "crypto_sanity_ns"
> +#define IPV6_IFACE_ADDR "face::1"
> +#define UDP_TEST_PORT 7777
> +static const char plain_text[] = "stringtoencrypt0";
> +static const char crypted_data[] = "\x5B\x59\x39\xEA\xD9\x7A\x2D\xAD\xA7\xE0\x43" \
> + "\x37\x8A\x77\x17\xB2";
> +
> +void test_crypto_sanity(void)
> +{
> + LIBBPF_OPTS(bpf_tc_hook, qdisc_hook, .attach_point = BPF_TC_EGRESS);
> + LIBBPF_OPTS(bpf_tc_opts, tc_attach_enc);
> + LIBBPF_OPTS(bpf_tc_opts, tc_attach_dec);
> + LIBBPF_OPTS(bpf_test_run_opts, opts,
> + .data_in = crypted_data,
> + .data_size_in = sizeof(crypted_data),

This should not be needed now because the test_run is on a tracing program.

> + .repeat = 1,
> + );
> + struct nstoken *nstoken = NULL;
> + struct crypto_sanity *skel;
> + struct sockaddr_in6 addr;
> + int sockfd, err, pfd;
> + socklen_t addrlen;
> +
> + skel = crypto_sanity__open();
> + if (!ASSERT_OK_PTR(skel, "skel open"))
> + return;
> +
> + bpf_program__set_autoload(skel->progs.skb_crypto_setup, true);

Remove the '?' from SEC("?fentry.s/bpf_fentry_test1") should save this
set_autoload call.

> +
> + SYS(fail, "ip netns add %s", NS_TEST);
> + SYS(fail, "ip -net %s -6 addr add %s/128 dev lo nodad", NS_TEST, IPV6_IFACE_ADDR);
> + SYS(fail, "ip -net %s link set dev lo up", NS_TEST);
> +
> + err = crypto_sanity__load(skel);
> + if (!ASSERT_OK(err, "crypto_sanity__load"))
> + goto fail;
> +
> + nstoken = open_netns(NS_TEST);
> + if (!ASSERT_OK_PTR(nstoken, "open_netns"))
> + goto fail;
> +
> + qdisc_hook.ifindex = if_nametoindex("lo");
> + if (!ASSERT_GT(qdisc_hook.ifindex, 0, "if_nametoindex lo"))
> + goto fail;
> +
> + err = crypto_sanity__attach(skel);
> + if (!ASSERT_OK(err, "crypto_sanity__attach"))
> + goto fail;
> +
> + pfd = bpf_program__fd(skel->progs.skb_crypto_setup);
> + if (!ASSERT_GT(pfd, 0, "skb_crypto_setup fd"))
> + goto fail;
> +
> + err = bpf_prog_test_run_opts(pfd, &opts);
> + if (!ASSERT_OK(err, "skb_crypto_setup") ||
> + !ASSERT_OK(opts.retval, "skb_crypto_setup retval"))
> + goto fail;
> +
> + if (!ASSERT_OK(skel->bss->status, "skb_crypto_setup status"))
> + goto fail;
> +
> + err = bpf_tc_hook_create(&qdisc_hook);
> + if (!ASSERT_OK(err, "create qdisc hook"))
> + goto fail;
> +
> + addrlen = sizeof(addr);
> + err = make_sockaddr(AF_INET6, IPV6_IFACE_ADDR, UDP_TEST_PORT,
> + (void *)&addr, &addrlen);
> + if (!ASSERT_OK(err, "make_sockaddr"))
> + goto fail;
> +
> + tc_attach_dec.prog_fd = bpf_program__fd(skel->progs.decrypt_sanity);
> + err = bpf_tc_attach(&qdisc_hook, &tc_attach_dec);
> + if (!ASSERT_OK(err, "attach decrypt filter"))
> + goto fail;
> +
> + sockfd = socket(AF_INET6, SOCK_DGRAM, 0);
> + if (!ASSERT_NEQ(sockfd, -1, "decrypt socket"))
> + goto fail;
> + err = sendto(sockfd, crypted_data, 16, 0, (void *)&addr, addrlen);
> + close(sockfd);
> + if (!ASSERT_EQ(err, 16, "decrypt send"))
> + goto fail;
> +
> + bpf_tc_detach(&qdisc_hook, &tc_attach_dec);
> + if (!ASSERT_OK(skel->bss->status, "decrypt status"))
> + goto fail;
> + if (!ASSERT_STRNEQ(skel->bss->dst, plain_text, sizeof(plain_text), "decrypt"))
> + goto fail;
> +
> + tc_attach_enc.prog_fd = bpf_program__fd(skel->progs.encrypt_sanity);
> + err = bpf_tc_attach(&qdisc_hook, &tc_attach_enc);
> + if (!ASSERT_OK(err, "attach encrypt filter"))
> + goto fail;
> +
> + sockfd = socket(AF_INET6, SOCK_DGRAM, 0);
> + if (!ASSERT_NEQ(sockfd, -1, "encrypt socket"))
> + goto fail;
> + err = sendto(sockfd, plain_text, 16, 0, (void *)&addr, addrlen);
> + close(sockfd);
> + if (!ASSERT_EQ(err, 16, "encrypt send"))
> + goto fail;
> +
> + bpf_tc_detach(&qdisc_hook, &tc_attach_enc);
> + if (!ASSERT_OK(skel->bss->status, "encrypt status"))
> + goto fail;
> + if (!ASSERT_STRNEQ(skel->bss->dst, crypted_data, sizeof(crypted_data), "encrypt"))
> + goto fail;
> +
> +fail:
> + if (nstoken) {
> + bpf_tc_hook_destroy(&qdisc_hook);
> + close_netns(nstoken);
> + }
> + SYS_NOFAIL("ip netns del " NS_TEST " &> /dev/null");
> + crypto_sanity__destroy(skel);
> +}
> diff --git a/tools/testing/selftests/bpf/progs/crypto_common.h b/tools/testing/selftests/bpf/progs/crypto_common.h
> new file mode 100644
> index 000000000000..584378bb6df8
> --- /dev/null
> +++ b/tools/testing/selftests/bpf/progs/crypto_common.h
> @@ -0,0 +1,103 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
> +
> +#ifndef _CRYPTO_COMMON_H
> +#define _CRYPTO_COMMON_H
> +
> +#include "errno.h"
> +#include <stdbool.h>
> +
> +#define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8)))
> +private(CTX) static struct bpf_crypto_skcipher_ctx __kptr * global_crypto_ctx;

Add a test to show how it is used?

> +
> +struct bpf_crypto_skcipher_ctx *bpf_crypto_skcipher_ctx_create(const struct bpf_dynptr *algo,
> + const struct bpf_dynptr *key,
> + int *err) __ksym;
> +struct bpf_crypto_skcipher_ctx *bpf_crypto_skcipher_ctx_acquire(struct bpf_crypto_skcipher_ctx *ctx) __ksym;
> +void bpf_crypto_skcipher_ctx_release(struct bpf_crypto_skcipher_ctx *ctx) __ksym;

Same for acquire and release kfunc. Add test cases.

btw, does it have use cases to store the same bpf_crypto_skcipher_ctx in
multiple maps?

> +int bpf_crypto_skcipher_encrypt(struct bpf_crypto_skcipher_ctx *ctx,
> + const struct bpf_dynptr *src, struct bpf_dynptr *dst,
> + const struct bpf_dynptr *iv) __ksym;
> +int bpf_crypto_skcipher_decrypt(struct bpf_crypto_skcipher_ctx *ctx,
> + const struct bpf_dynptr *src, struct bpf_dynptr *dst,
> + const struct bpf_dynptr *iv) __ksym;
> +
> +struct __crypto_skcipher_ctx_value {
> + struct bpf_crypto_skcipher_ctx __kptr * ctx;
> +};
> +
> +struct crypto_conf_value {
> + __u8 algo[32];
> + __u32 algo_size;
> + __u8 key[32];
> + __u32 key_size;
> + __u8 iv[32];
> + __u32 iv_size;
> + __u8 dst[32];
> + __u32 dst_size;
> +};
> +
> +struct array_conf_map {
> + __uint(type, BPF_MAP_TYPE_ARRAY);
> + __type(key, int);
> + __type(value, struct crypto_conf_value);
> + __uint(max_entries, 1);
> +} __crypto_conf_map SEC(".maps");
> +
> +struct array_map {
> + __uint(type, BPF_MAP_TYPE_ARRAY);
> + __type(key, int);
> + __type(value, struct __crypto_skcipher_ctx_value);
> + __uint(max_entries, 1);
> +} __crypto_skcipher_ctx_map SEC(".maps");
> +
> +static inline struct crypto_conf_value *crypto_conf_lookup(void)
> +{
> + struct crypto_conf_value *v, local = {};
> + u32 key = 0;
> +
> + v = bpf_map_lookup_elem(&__crypto_conf_map, &key);
> + if (v)
> + return v;
> +
> + bpf_map_update_elem(&__crypto_conf_map, &key, &local, 0);
> + return bpf_map_lookup_elem(&__crypto_conf_map, &key);

hmm... local is all 0 which became the map value. Where is it initialized?

> +}
> +
> +static inline struct __crypto_skcipher_ctx_value *crypto_skcipher_ctx_value_lookup(void)
> +{
> + u32 key = 0;
> +
> + return bpf_map_lookup_elem(&__crypto_skcipher_ctx_map, &key);
> +}
> +
> +static inline int crypto_skcipher_ctx_insert(struct bpf_crypto_skcipher_ctx *ctx)
> +{
> + struct __crypto_skcipher_ctx_value local, *v;
> + long status;

nit. s/status/err/

> + struct bpf_crypto_skcipher_ctx *old;
> + u32 key = 0;
> +
> + local.ctx = NULL;
> + status = bpf_map_update_elem(&__crypto_skcipher_ctx_map, &key, &local, 0);
> + if (status) {
> + bpf_crypto_skcipher_ctx_release(ctx);
> + return status;
> + }
> +
> + v = bpf_map_lookup_elem(&__crypto_skcipher_ctx_map, &key);
> + if (!v) {
> + bpf_crypto_skcipher_ctx_release(ctx);
> + return -ENOENT;
> + }
> +
> + old = bpf_kptr_xchg(&v->ctx, ctx);
> + if (old) {
> + bpf_crypto_skcipher_ctx_release(old);
> + return -EEXIST;
> + }
> +
> + return 0;
> +}
> +
> +#endif /* _CRYPTO_COMMON_H */
> diff --git a/tools/testing/selftests/bpf/progs/crypto_sanity.c b/tools/testing/selftests/bpf/progs/crypto_sanity.c
> new file mode 100644
> index 000000000000..71a172d8d2a2
> --- /dev/null
> +++ b/tools/testing/selftests/bpf/progs/crypto_sanity.c
> @@ -0,0 +1,154 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
> +
> +#include "vmlinux.h"
> +#include "bpf_tracing_net.h"
> +#include <bpf/bpf_helpers.h>
> +#include <bpf/bpf_endian.h>
> +#include <bpf/bpf_tracing.h>
> +#include "bpf_misc.h"
> +#include "bpf_kfuncs.h"
> +#include "crypto_common.h"
> +
> +#define UDP_TEST_PORT 7777
> +unsigned char crypto_key[16] = "testtest12345678";
> +char crypto_algo[9] = "ecb(aes)";
> +char dst[32] = {};
> +int status;
> +
> +static inline int skb_validate_test(const struct __sk_buff *skb)
> +{
> + struct ipv6hdr ip6h;
> + struct udphdr udph;
> + u32 offset;
> +
> + if (skb->protocol != __bpf_constant_htons(ETH_P_IPV6))
> + return -1;
> +
> + if (bpf_skb_load_bytes(skb, ETH_HLEN, &ip6h, sizeof(ip6h)))
> + return -1;
> +
> + if (ip6h.nexthdr != IPPROTO_UDP)
> + return -1;
> +
> + if (bpf_skb_load_bytes(skb, ETH_HLEN + sizeof(ip6h), &udph, sizeof(udph)))
> + return -1;
> +
> + if (udph.dest != __bpf_constant_htons(UDP_TEST_PORT))
> + return -1;
> +
> + offset = ETH_HLEN + sizeof(ip6h) + sizeof(udph);
> + if (skb->len < offset + 16)
> + return -1;
> +
> + return offset;
> +}
> +
> +SEC("?fentry.s/bpf_fentry_test1")

I wonder if others have better idea how to do this. This is basically setting up
the algo and key which requires to call a kfunc in sleepable context. Otherwise,
the tc's test_run would be a better fit.

> +int BPF_PROG(skb_crypto_setup)
> +{
> + struct crypto_conf_value *c;
> + struct bpf_dynptr algo, key;
> + int err = 0;
> +
> + status = 0;
> +
> + c = crypto_conf_lookup();

"c" is not used. Why lookup is needed? May be properly setup the
__crypto_conf_map so the example is more complete.

> + if (!c) {
> + status = -EINVAL;
> + return 0;
> + }
> +
> + bpf_dynptr_from_mem(crypto_algo, sizeof(crypto_algo), 0, &algo);
> + bpf_dynptr_from_mem(crypto_key, sizeof(crypto_key), 0, &key);
> + struct bpf_crypto_skcipher_ctx *cctx = bpf_crypto_skcipher_ctx_create(&algo, &key, &err);
> +
> + if (!cctx) {
> + status = err;
> + return 0;
> + }
> +
> + err = crypto_skcipher_ctx_insert(cctx);
> + if (err && err != -EEXIST)
> + status = err;
> +
> + return 0;
> +}
> +
> +SEC("tc")
> +int decrypt_sanity(struct __sk_buff *skb)
> +{
> + struct __crypto_skcipher_ctx_value *v;
> + struct bpf_crypto_skcipher_ctx *ctx;
> + struct bpf_dynptr psrc, pdst, iv;
> + int err;

nit. s/err/offset/. "err" is actually the offset for the common case.

btw, considering dynptr psrc has to be created from skb anyway, is it easier to
use bpf_dynptr_slice(psrc, 0, NULL, ETH_HLEN + sizeof(ip6h) + sizeof(udph)) in
the above skb_validate_test()?

> +
> + err = skb_validate_test(skb);
> + if (err < 0) {
> + status = err;
> + return TC_ACT_SHOT;
> + }
> +
> + v = crypto_skcipher_ctx_value_lookup();
> + if (!v) {
> + status = -ENOENT;
> + return TC_ACT_SHOT;
> + }
> +
> + ctx = v->ctx;
> + if (!ctx) {
> + status = -ENOENT;
> + return TC_ACT_SHOT;
> + }
> +
> + bpf_dynptr_from_skb(skb, 0, &psrc);
> + bpf_dynptr_adjust(&psrc, err, err + 16);
> + bpf_dynptr_from_mem(dst, sizeof(dst), 0, &pdst);
> + bpf_dynptr_from_mem(dst, 0, 0, &iv);
> +
> + bpf_crypto_skcipher_decrypt(ctx, &psrc, &pdst, &iv);

check decrypt return value before setting "status = 0;' below.

> +
> + status = 0;
> +
> + return TC_ACT_SHOT;
> +}
> +
> +SEC("tc")
> +int encrypt_sanity(struct __sk_buff *skb)
> +{
> + struct __crypto_skcipher_ctx_value *v;
> + struct bpf_crypto_skcipher_ctx *ctx;
> + struct bpf_dynptr psrc, pdst, iv;
> + int err;
> +
> + status = 0;
> +
> + err = skb_validate_test(skb);
> + if (err < 0) {
> + status = err;
> + return TC_ACT_SHOT;
> + }
> +
> + v = crypto_skcipher_ctx_value_lookup();
> + if (!v) {
> + status = -ENOENT;
> + return TC_ACT_SHOT;
> + }
> +
> + ctx = v->ctx;
> + if (!ctx) {
> + status = -ENOENT;
> + return TC_ACT_SHOT;
> + }
> +
> + bpf_dynptr_from_skb(skb, 0, &psrc);
> + bpf_dynptr_adjust(&psrc, err, err + 16);
> + bpf_dynptr_from_mem(dst, sizeof(dst), 0, &pdst);
> + bpf_dynptr_from_mem(dst, 0, 0, &iv);
> +
> + bpf_crypto_skcipher_encrypt(ctx, &psrc, &pdst, &iv);

Same here. check return value.

> +
> + return TC_ACT_SHOT;
> +}
> +
> +char __license[] SEC("license") = "GPL";

2023-11-01 23:00:42

by Martin KaFai Lau

[permalink] [raw]
Subject: Re: [PATCH bpf-next v3 1/2] bpf: add skcipher API support to TC/XDP programs

On 11/1/23 3:50 PM, Vadim Fedorenko wrote:
>>> +static int bpf_crypto_skcipher_crypt(struct crypto_sync_skcipher *tfm,
>>> +                     const struct bpf_dynptr_kern *src,
>>> +                     struct bpf_dynptr_kern *dst,
>>> +                     const struct bpf_dynptr_kern *iv,
>>> +                     bool decrypt)
>>> +{
>>> +    struct skcipher_request *req = NULL;
>>> +    struct scatterlist sgin, sgout;
>>> +    int err;
>>> +
>>> +    if (crypto_sync_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
>>> +        return -EINVAL;
>>> +
>>> +    if (__bpf_dynptr_is_rdonly(dst))
>>> +        return -EINVAL;
>>> +
>>> +    if (!__bpf_dynptr_size(dst) || !__bpf_dynptr_size(src))
>>> +        return -EINVAL;
>>> +
>>> +    if (__bpf_dynptr_size(iv) != crypto_sync_skcipher_ivsize(tfm))
>>> +        return -EINVAL;
>>> +
>>> +    req = skcipher_request_alloc(&tfm->base, GFP_ATOMIC);
>>
>> Doing alloc per packet may kill performance. Is it possible to optimize it
>> somehow? What is the usual size of the req (e.g. the example in the selftest)?
>>
>
> In ktls code aead_request is allocated every time encryption is invoked, see
> tls_decrypt_sg(), apparently per skb. Doesn't look like performance
> killer. For selftest it's only sizeof(struct skcipher_request).

ktls is doing the en/decrypt on the userspace behalf to compensate the cost.

When this kfunc is used in xdp to decrypt a few bytes for each packet and then
XDP_TX out, this extra alloc will be quite noticeable. If the size is usually
small, can it be done in the stack memory?

2023-11-01 23:00:48

by Vadim Fedorenko

[permalink] [raw]
Subject: Re: [PATCH bpf-next v3 1/2] bpf: add skcipher API support to TC/XDP programs

On 01/11/2023 21:49, Martin KaFai Lau wrote:
> On 10/31/23 6:48 AM, Vadim Fedorenko wrote:
>> --- /dev/null
>> +++ b/kernel/bpf/crypto.c
>> @@ -0,0 +1,280 @@
>> +// SPDX-License-Identifier: GPL-2.0-only
>> +/* Copyright (c) 2023 Meta, Inc */
>> +#include <linux/bpf.h>
>> +#include <linux/bpf_mem_alloc.h>
>> +#include <linux/btf.h>
>> +#include <linux/btf_ids.h>
>> +#include <linux/filter.h>
>> +#include <linux/scatterlist.h>
>> +#include <linux/skbuff.h>
>> +#include <crypto/skcipher.h>
>> +
>> +/**
>> + * struct bpf_crypto_skcipher_ctx - refcounted BPF sync skcipher
>> context structure
>> + * @tfm:    The pointer to crypto_sync_skcipher struct.
>> + * @rcu:    The RCU head used to free the crypto context with RCU
>> safety.
>> + * @usage:    Object reference counter. When the refcount goes to 0, the
>> + *        memory is released back to the BPF allocator, which provides
>> + *        RCU safety.
>> + */
>> +struct bpf_crypto_skcipher_ctx {
>> +    struct crypto_sync_skcipher *tfm;
>> +    struct rcu_head rcu;
>> +    refcount_t usage;
>> +};
>> +
>> +__diag_push();
>> +__diag_ignore_all("-Wmissing-prototypes",
>> +          "Global kfuncs as their definitions will be in BTF");
>> +
>> +static void *__bpf_dynptr_data_ptr(const struct bpf_dynptr_kern *ptr)
>> +{
>> +    enum bpf_dynptr_type type;
>> +
>> +    if (!ptr->data)
>> +        return NULL;
>> +
>> +    type = bpf_dynptr_get_type(ptr);
>> +
>> +    switch (type) {
>> +    case BPF_DYNPTR_TYPE_LOCAL:
>> +    case BPF_DYNPTR_TYPE_RINGBUF:
>> +        return ptr->data + ptr->offset;
>> +    case BPF_DYNPTR_TYPE_SKB:
>> +        return skb_pointer_if_linear(ptr->data, ptr->offset,
>> __bpf_dynptr_size(ptr));
>> +    case BPF_DYNPTR_TYPE_XDP:
>> +    {
>> +        void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset,
>> __bpf_dynptr_size(ptr));
>
> I suspect what it is doing here (for skb and xdp in particular) is very
> similar to bpf_dynptr_slice. Please check if bpf_dynptr_slice(ptr, 0,
> NULL, sz) will work.
>

Well, yes, it's simplified version of bpf_dynptr_slice. The problem is
that bpf_dynptr_slice bpf_kfunc which cannot be used in another
bpf_kfunc. Should I refactor the code to use it in both places? Like
create __bpf_dynptr_slice() which will be internal part of bpf_kfunc?

>
>> +        if (!IS_ERR_OR_NULL(xdp_ptr))
>> +            return xdp_ptr;
>> +
>> +        return NULL;
>> +    }
>> +    default:
>> +        WARN_ONCE(true, "unknown dynptr type %d\n", type);
>> +        return NULL;
>> +    }
>> +}
>> +
>> +/**
>> + * bpf_crypto_skcipher_ctx_create() - Create a mutable BPF crypto
>> context.
>> + *
>> + * Allocates a crypto context that can be used, acquired, and
>> released by
>> + * a BPF program. The crypto context returned by this function must
>> either
>> + * be embedded in a map as a kptr, or freed with
>> bpf_crypto_skcipher_ctx_release().
>> + *
>> + * bpf_crypto_skcipher_ctx_create() allocates memory using the BPF
>> memory
>> + * allocator, and will not block. It may return NULL if no memory is
>> available.
>> + * @algo: bpf_dynptr which holds string representation of algorithm.
>> + * @key:  bpf_dynptr which holds cipher key to do crypto.
>> + */
>> +__bpf_kfunc struct bpf_crypto_skcipher_ctx *
>> +bpf_crypto_skcipher_ctx_create(const struct bpf_dynptr_kern *palgo,
>
> Song's patch on __const_str should help on the palgo (which is a string)
> also:
> https://lore.kernel.org/bpf/[email protected]/
>

Got it, I'll update it.

>> +                   const struct bpf_dynptr_kern *pkey, int *err)
>> +{
>> +    struct bpf_crypto_skcipher_ctx *ctx;
>> +    char *algo;
>> +
>> +    if (__bpf_dynptr_size(palgo) > CRYPTO_MAX_ALG_NAME) {
>> +        *err = -EINVAL;
>> +        return NULL;
>> +    }
>> +
>> +    algo = __bpf_dynptr_data_ptr(palgo);
>> +
>> +    if (!crypto_has_skcipher(algo, CRYPTO_ALG_TYPE_SKCIPHER,
>> CRYPTO_ALG_TYPE_MASK)) {
>> +        *err = -EOPNOTSUPP;
>> +        return NULL;
>> +    }
>> +
>> +    ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
>> +    if (!ctx) {
>> +        *err = -ENOMEM;
>> +        return NULL;
>> +    }
>> +
>> +    memset(ctx, 0, sizeof(*ctx));
>
> nit. kzalloc()
>
>> +
>> +    ctx->tfm = crypto_alloc_sync_skcipher(algo, 0, 0);
>> +    if (IS_ERR(ctx->tfm)) {
>> +        *err = PTR_ERR(ctx->tfm);
>> +        ctx->tfm = NULL;
>> +        goto err;
>> +    }
>> +
>> +    *err = crypto_sync_skcipher_setkey(ctx->tfm,
>> __bpf_dynptr_data_ptr(pkey),
>> +                       __bpf_dynptr_size(pkey));
>> +    if (*err)
>> +        goto err;
>> +
>> +    refcount_set(&ctx->usage, 1);
>> +
>> +    return ctx;
>> +err:
>> +    if (ctx->tfm)
>> +        crypto_free_sync_skcipher(ctx->tfm);
>> +    kfree(ctx);
>> +
>> +    return NULL;
>> +}
>
> [ ... ]
>
>> +static int bpf_crypto_skcipher_crypt(struct crypto_sync_skcipher *tfm,
>> +                     const struct bpf_dynptr_kern *src,
>> +                     struct bpf_dynptr_kern *dst,
>> +                     const struct bpf_dynptr_kern *iv,
>> +                     bool decrypt)
>> +{
>> +    struct skcipher_request *req = NULL;
>> +    struct scatterlist sgin, sgout;
>> +    int err;
>> +
>> +    if (crypto_sync_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
>> +        return -EINVAL;
>> +
>> +    if (__bpf_dynptr_is_rdonly(dst))
>> +        return -EINVAL;
>> +
>> +    if (!__bpf_dynptr_size(dst) || !__bpf_dynptr_size(src))
>> +        return -EINVAL;
>> +
>> +    if (__bpf_dynptr_size(iv) != crypto_sync_skcipher_ivsize(tfm))
>> +        return -EINVAL;
>> +
>> +    req = skcipher_request_alloc(&tfm->base, GFP_ATOMIC);
>
> Doing alloc per packet may kill performance. Is it possible to optimize
> it somehow? What is the usual size of the req (e.g. the example in the
> selftest)?
>

In ktls code aead_request is allocated every time encryption is invoked,
see tls_decrypt_sg(), apparently per skb. Doesn't look like performance
killer. For selftest it's only sizeof(struct skcipher_request).

>> +    if (!req)
>> +        return -ENOMEM;
>> +
>> +    sg_init_one(&sgin, __bpf_dynptr_data_ptr(src),
>> __bpf_dynptr_size(src));
>> +    sg_init_one(&sgout, __bpf_dynptr_data_ptr(dst),
>> __bpf_dynptr_size(dst));
>> +
>> +    skcipher_request_set_crypt(req, &sgin, &sgout,
>> __bpf_dynptr_size(src),
>> +                   __bpf_dynptr_data_ptr(iv));
>> +
>> +    err = decrypt ? crypto_skcipher_decrypt(req) :
>> crypto_skcipher_encrypt(req);
>
> I am hitting this with the selftest in patch 2:
>
> [   39.806675] =============================
> [   39.807243] WARNING: suspicious RCU usage
> [   39.807825] 6.6.0-rc7-02091-g17e023652cc1 #336 Tainted: G           O
> [   39.808798] -----------------------------
> [   39.809368] kernel/sched/core.c:10149 Illegal context switch in
> RCU-bh read-side critical section!
> [   39.810589]
> [   39.810589] other info that might help us debug this:
> [   39.810589]
> [   39.811696]
> [   39.811696] rcu_scheduler_active = 2, debug_locks = 1
> [   39.812616] 2 locks held by test_progs/1769:
> [   39.813249]  #0: ffffffff84dce140 (rcu_read_lock){....}-{1:3}, at:
> ip6_finish_output2+0x625/0x1b70
> [   39.814539]  #1: ffffffff84dce1a0 (rcu_read_lock_bh){....}-{1:3}, at:
> __dev_queue_xmit+0x1df/0x2c40
> [   39.815813]
> [   39.815813] stack backtrace:
> [   39.816441] CPU: 1 PID: 1769 Comm: test_progs Tainted: G           O
> 6.6.0-rc7-02091-g17e023652cc1 #336
> [   39.817774] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996),
> BIOS rel-1.14.0-0-g155821a1990b-prebuilt.qemu.org 04/01/2014
> [   39.819312] Call Trace:
> [   39.819655]  <TASK>
> [   39.819967]  dump_stack_lvl+0x130/0x1d0
> [   39.822669]  dump_stack+0x14/0x20
> [   39.823136]  lockdep_rcu_suspicious+0x220/0x350
> [   39.823777]  __might_resched+0xe0/0x660
> [   39.827915]  __might_sleep+0x89/0xf0
> [   39.828423]  skcipher_walk_virt+0x55/0x120
> [   39.828990]  crypto_ecb_decrypt+0x159/0x310
> [   39.833846]  crypto_skcipher_decrypt+0xa0/0xd0
> [   39.834481]  bpf_crypto_skcipher_crypt+0x29a/0x3c0
> [   39.837100]  bpf_crypto_skcipher_decrypt+0x56/0x70
> [   39.837770]  bpf_prog_fa576505ab32d186_decrypt_sanity+0x180/0x185
> [   39.838627]  cls_bpf_classify+0x3b6/0xf80
> [   39.839807]  tcf_classify+0x2f4/0x550
>

That's odd. skcipher_walk_virt() checks for SKCIPHER_WALK_SLEEP which
depends on CRYPTO_TFM_REQ_MAY_SLEEP of tfm, which shouldn't be set for
crypto_alloc_sync_skcipher(). I think we need some crypto@ folks to jump
in and explain.

David, Herbert could you please take a look at the patchset to confirm
that crypto API is used properly.

>> +
>> +    skcipher_request_free(req);
>> +
>> +    return err;
>> +}
>> +
>
>

2023-11-01 23:51:29

by Martin KaFai Lau

[permalink] [raw]
Subject: Re: [PATCH bpf-next v3 1/2] bpf: add skcipher API support to TC/XDP programs

On 11/1/23 3:50 PM, Vadim Fedorenko wrote:
>>> +static void *__bpf_dynptr_data_ptr(const struct bpf_dynptr_kern *ptr)
>>> +{
>>> +    enum bpf_dynptr_type type;
>>> +
>>> +    if (!ptr->data)
>>> +        return NULL;
>>> +
>>> +    type = bpf_dynptr_get_type(ptr);
>>> +
>>> +    switch (type) {
>>> +    case BPF_DYNPTR_TYPE_LOCAL:
>>> +    case BPF_DYNPTR_TYPE_RINGBUF:
>>> +        return ptr->data + ptr->offset;
>>> +    case BPF_DYNPTR_TYPE_SKB:
>>> +        return skb_pointer_if_linear(ptr->data, ptr->offset,
>>> __bpf_dynptr_size(ptr));
>>> +    case BPF_DYNPTR_TYPE_XDP:
>>> +    {
>>> +        void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset,
>>> __bpf_dynptr_size(ptr));
>>
>> I suspect what it is doing here (for skb and xdp in particular) is very
>> similar to bpf_dynptr_slice. Please check if bpf_dynptr_slice(ptr, 0, NULL,
>> sz) will work.
>>
>
> Well, yes, it's simplified version of bpf_dynptr_slice. The problem is
> that bpf_dynptr_slice bpf_kfunc which cannot be used in another
> bpf_kfunc. Should I refactor the code to use it in both places? Like

Sorry, scrolled too fast in my earlier reply :(

I am not aware of this limitation. What error does it have?
The bpf_dynptr_slice_rdwr kfunc() is also calling the bpf_dynptr_slice() kfunc.

> create __bpf_dynptr_slice() which will be internal part of bpf_kfunc?






2023-11-02 00:31:26

by Vadim Fedorenko

[permalink] [raw]
Subject: Re: [PATCH bpf-next v3 1/2] bpf: add skcipher API support to TC/XDP programs

On 01.11.2023 22:59, Martin KaFai Lau wrote:
> On 11/1/23 3:50 PM, Vadim Fedorenko wrote:
>>>> +static int bpf_crypto_skcipher_crypt(struct crypto_sync_skcipher *tfm,
>>>> +                     const struct bpf_dynptr_kern *src,
>>>> +                     struct bpf_dynptr_kern *dst,
>>>> +                     const struct bpf_dynptr_kern *iv,
>>>> +                     bool decrypt)
>>>> +{
>>>> +    struct skcipher_request *req = NULL;
>>>> +    struct scatterlist sgin, sgout;
>>>> +    int err;
>>>> +
>>>> +    if (crypto_sync_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
>>>> +        return -EINVAL;
>>>> +
>>>> +    if (__bpf_dynptr_is_rdonly(dst))
>>>> +        return -EINVAL;
>>>> +
>>>> +    if (!__bpf_dynptr_size(dst) || !__bpf_dynptr_size(src))
>>>> +        return -EINVAL;
>>>> +
>>>> +    if (__bpf_dynptr_size(iv) != crypto_sync_skcipher_ivsize(tfm))
>>>> +        return -EINVAL;
>>>> +
>>>> +    req = skcipher_request_alloc(&tfm->base, GFP_ATOMIC);
>>>
>>> Doing alloc per packet may kill performance. Is it possible to optimize it
>>> somehow? What is the usual size of the req (e.g. the example in the selftest)?
>>>
>>
>> In ktls code aead_request is allocated every time encryption is invoked, see
>> tls_decrypt_sg(), apparently per skb. Doesn't look like performance
>> killer. For selftest it's only sizeof(struct skcipher_request).
>
> ktls is doing the en/decrypt on the userspace behalf to compensate the cost.
>
> When this kfunc is used in xdp to decrypt a few bytes for each packet and then
> XDP_TX out, this extra alloc will be quite noticeable. If the size is usually
> small, can it be done in the stack memory?

Hmm... looks like SYNC_SKCIPHER_REQUEST_ON_STACK will help us. Ok, I'll change
this part to use stack allocations.

2023-11-02 00:38:16

by Vadim Fedorenko

[permalink] [raw]
Subject: Re: [PATCH bpf-next v3 1/2] bpf: add skcipher API support to TC/XDP programs

On 01.11.2023 23:41, Martin KaFai Lau wrote:
> On 11/1/23 3:50 PM, Vadim Fedorenko wrote:
>>>> +static void *__bpf_dynptr_data_ptr(const struct bpf_dynptr_kern *ptr)
>>>> +{
>>>> +    enum bpf_dynptr_type type;
>>>> +
>>>> +    if (!ptr->data)
>>>> +        return NULL;
>>>> +
>>>> +    type = bpf_dynptr_get_type(ptr);
>>>> +
>>>> +    switch (type) {
>>>> +    case BPF_DYNPTR_TYPE_LOCAL:
>>>> +    case BPF_DYNPTR_TYPE_RINGBUF:
>>>> +        return ptr->data + ptr->offset;
>>>> +    case BPF_DYNPTR_TYPE_SKB:
>>>> +        return skb_pointer_if_linear(ptr->data, ptr->offset,
>>>> __bpf_dynptr_size(ptr));
>>>> +    case BPF_DYNPTR_TYPE_XDP:
>>>> +    {
>>>> +        void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset,
>>>> __bpf_dynptr_size(ptr));
>>>
>>> I suspect what it is doing here (for skb and xdp in particular) is very
>>> similar to bpf_dynptr_slice. Please check if bpf_dynptr_slice(ptr, 0, NULL,
>>> sz) will work.
>>>
>>
>> Well, yes, it's simplified version of bpf_dynptr_slice. The problem is
>> that bpf_dynptr_slice bpf_kfunc which cannot be used in another
>> bpf_kfunc. Should I refactor the code to use it in both places? Like
>
> Sorry, scrolled too fast in my earlier reply :(
>
> I am not aware of this limitation. What error does it have?
> The bpf_dynptr_slice_rdwr kfunc() is also calling the bpf_dynptr_slice() kfunc.
>

Yeah, but they are in the same module. We do not declare prototypes of kfuncs in
linux/bpf.h and that's why we cannot use them outside of helpers.c.
The same problem was with bpf_dynptr_is_rdonly() I believe.

>> create __bpf_dynptr_slice() which will be internal part of bpf_kfunc?
>
>
>
>
>
>

2023-11-02 00:55:03

by Vadim Fedorenko

[permalink] [raw]
Subject: Re: [PATCH bpf-next v3 2/2] selftests: bpf: crypto skcipher algo selftests

On 01.11.2023 22:53, Martin KaFai Lau wrote:
> On 10/31/23 6:49 AM, Vadim Fedorenko wrote:
>> Add simple tc hook selftests to show the way to work with new crypto
>> BPF API. Some weird structre and map are added to setup program to make
>> verifier happy about dynptr initialization from memory. Simple AES-ECB
>> algo is used to demonstrate encryption and decryption of fixed size
>> buffers.
>>
>> Signed-off-by: Vadim Fedorenko <[email protected]>
>> ---
>> v2 -> v3:
>> - disable tests on s390 and aarch64 because of unknown Fatal exception
>>    in sg_init_one
>> v1 -> v2:
>> - add CONFIG_CRYPTO_AES and CONFIG_CRYPTO_ECB to selftest build config
>>    suggested by Daniel
>>
>>   kernel/bpf/crypto.c                           |   5 +-
>>   tools/testing/selftests/bpf/DENYLIST.aarch64  |   1 +
>>   tools/testing/selftests/bpf/DENYLIST.s390x    |   1 +
>>   tools/testing/selftests/bpf/config            |   3 +
>>   .../selftests/bpf/prog_tests/crypto_sanity.c  | 129 +++++++++++++++
>>   .../selftests/bpf/progs/crypto_common.h       | 103 ++++++++++++
>>   .../selftests/bpf/progs/crypto_sanity.c       | 154 ++++++++++++++++++
>>   7 files changed, 394 insertions(+), 2 deletions(-)
>>   create mode 100644 tools/testing/selftests/bpf/prog_tests/crypto_sanity.c
>>   create mode 100644 tools/testing/selftests/bpf/progs/crypto_common.h
>>   create mode 100644 tools/testing/selftests/bpf/progs/crypto_sanity.c
>>
>> diff --git a/kernel/bpf/crypto.c b/kernel/bpf/crypto.c
>> index c2a0703934fc..4ee6193165ca 100644
>> --- a/kernel/bpf/crypto.c
>> +++ b/kernel/bpf/crypto.c
>> @@ -65,8 +65,9 @@ static void *__bpf_dynptr_data_ptr(const struct
>> bpf_dynptr_kern *ptr)
>>    *
>>    * bpf_crypto_skcipher_ctx_create() allocates memory using the BPF memory
>>    * allocator, and will not block. It may return NULL if no memory is available.
>> - * @algo: bpf_dynptr which holds string representation of algorithm.
>> - * @key:  bpf_dynptr which holds cipher key to do crypto.
>> + * @palgo: bpf_dynptr which holds string representation of algorithm.
>> + * @pkey:  bpf_dynptr which holds cipher key to do crypto.
>> + * @err:   integer to store error code when NULL is returned
>>    */
>>   __bpf_kfunc struct bpf_crypto_skcipher_ctx *
>>   bpf_crypto_skcipher_ctx_create(const struct bpf_dynptr_kern *palgo,
>> diff --git a/tools/testing/selftests/bpf/DENYLIST.aarch64
>> b/tools/testing/selftests/bpf/DENYLIST.aarch64
>> index 5c2cc7e8c5d0..72c7ef3e5872 100644
>> --- a/tools/testing/selftests/bpf/DENYLIST.aarch64
>> +++ b/tools/testing/selftests/bpf/DENYLIST.aarch64
>> @@ -1,5 +1,6 @@
>>   bpf_cookie/multi_kprobe_attach_api               #
>> kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3
>>   bpf_cookie/multi_kprobe_link_api                 #
>> kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3
>> +crypto_sanity                     # sg_init_one has exception on aarch64
>
> What is the exception? Is arm64 just lacking the support?

I have no idea, TBH. Might be because of different aligment requirements, but
it's just guess.

>
>>   exceptions                     # JIT does not support calling kfunc
>> bpf_throw: -524
>>   fexit_sleep                                      # The test never returns.
>> The remaining tests cannot start.
>>   kprobe_multi_bench_attach                        # needs CONFIG_FPROBE
>> diff --git a/tools/testing/selftests/bpf/DENYLIST.s390x
>> b/tools/testing/selftests/bpf/DENYLIST.s390x
>> index 1a63996c0304..8ab7485bedb1 100644
>> --- a/tools/testing/selftests/bpf/DENYLIST.s390x
>> +++ b/tools/testing/selftests/bpf/DENYLIST.s390x
>> @@ -1,5 +1,6 @@
>>   # TEMPORARY
>>   # Alphabetical order
>> +crypto_sanity                 # sg_init_one has exception on
>> s390                           (exceptions)
>>   exceptions                 # JIT does not support calling kfunc
>> bpf_throw                       (exceptions)
>>   get_stack_raw_tp                         # user_stack corrupted user
>> stack                                             (no backchain userspace)
>>   stacktrace_build_id                      # compare_map_keys stackid_hmap vs.
>> stackmap err -2 errno 2                   (?)
>> diff --git a/tools/testing/selftests/bpf/config
>> b/tools/testing/selftests/bpf/config
>> index 02dd4409200e..48b570fd1752 100644
>> --- a/tools/testing/selftests/bpf/config
>> +++ b/tools/testing/selftests/bpf/config
>> @@ -14,6 +14,9 @@ CONFIG_CGROUP_BPF=y
>>   CONFIG_CRYPTO_HMAC=y
>>   CONFIG_CRYPTO_SHA256=y
>>   CONFIG_CRYPTO_USER_API_HASH=y
>> +CONFIG_CRYPTO_SKCIPHER=y
>> +CONFIG_CRYPTO_ECB=y
>> +CONFIG_CRYPTO_AES=y
>>   CONFIG_DEBUG_INFO=y
>>   CONFIG_DEBUG_INFO_BTF=y
>>   CONFIG_DEBUG_INFO_DWARF4=y
>> diff --git a/tools/testing/selftests/bpf/prog_tests/crypto_sanity.c
>> b/tools/testing/selftests/bpf/prog_tests/crypto_sanity.c
>> new file mode 100644
>> index 000000000000..a43969da6d15
>> --- /dev/null
>> +++ b/tools/testing/selftests/bpf/prog_tests/crypto_sanity.c
>> @@ -0,0 +1,129 @@
>> +// SPDX-License-Identifier: GPL-2.0
>> +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
>
> s/2022/2023/ :)

ah, sure

>
>> +
>> +#include <sys/types.h>
>> +#include <sys/socket.h>
>> +#include <net/if.h>
>> +#include <linux/in6.h>
>> +
>> +#include "test_progs.h"
>> +#include "network_helpers.h"
>> +#include "crypto_sanity.skel.h"
>> +
>> +#define NS_TEST "crypto_sanity_ns"
>> +#define IPV6_IFACE_ADDR "face::1"
>> +#define UDP_TEST_PORT 7777
>> +static const char plain_text[] = "stringtoencrypt0";
>> +static const char crypted_data[] =
>> "\x5B\x59\x39\xEA\xD9\x7A\x2D\xAD\xA7\xE0\x43" \
>> +                   "\x37\x8A\x77\x17\xB2";
>> +
>> +void test_crypto_sanity(void)
>> +{
>> +    LIBBPF_OPTS(bpf_tc_hook, qdisc_hook, .attach_point = BPF_TC_EGRESS);
>> +    LIBBPF_OPTS(bpf_tc_opts, tc_attach_enc);
>> +    LIBBPF_OPTS(bpf_tc_opts, tc_attach_dec);
>> +    LIBBPF_OPTS(bpf_test_run_opts, opts,
>> +            .data_in = crypted_data,
>> +            .data_size_in = sizeof(crypted_data),
>
> This should not be needed now because the test_run is on a tracing program.

yep, will remove it

>
>> +            .repeat = 1,
>> +    );
>> +    struct nstoken *nstoken = NULL;
>> +    struct crypto_sanity *skel;
>> +    struct sockaddr_in6 addr;
>> +    int sockfd, err, pfd;
>> +    socklen_t addrlen;
>> +
>> +    skel = crypto_sanity__open();
>> +    if (!ASSERT_OK_PTR(skel, "skel open"))
>> +        return;
>> +
>> +    bpf_program__set_autoload(skel->progs.skb_crypto_setup, true);
>
> Remove the '?' from SEC("?fentry.s/bpf_fentry_test1") should save this
> set_autoload call

ok
.
>
>> +
>> +    SYS(fail, "ip netns add %s", NS_TEST);
>> +    SYS(fail, "ip -net %s -6 addr add %s/128 dev lo nodad", NS_TEST,
>> IPV6_IFACE_ADDR);
>> +    SYS(fail, "ip -net %s link set dev lo up", NS_TEST);
>> +
>> +    err = crypto_sanity__load(skel);
>> +    if (!ASSERT_OK(err, "crypto_sanity__load"))
>> +        goto fail;
>> +
>> +    nstoken = open_netns(NS_TEST);
>> +    if (!ASSERT_OK_PTR(nstoken, "open_netns"))
>> +        goto fail;
>> +
>> +    qdisc_hook.ifindex = if_nametoindex("lo");
>> +    if (!ASSERT_GT(qdisc_hook.ifindex, 0, "if_nametoindex lo"))
>> +        goto fail;
>> +
>> +    err = crypto_sanity__attach(skel);
>> +    if (!ASSERT_OK(err, "crypto_sanity__attach"))
>> +        goto fail;
>> +
>> +    pfd = bpf_program__fd(skel->progs.skb_crypto_setup);
>> +    if (!ASSERT_GT(pfd, 0, "skb_crypto_setup fd"))
>> +        goto fail;
>> +
>> +    err = bpf_prog_test_run_opts(pfd, &opts);
>> +    if (!ASSERT_OK(err, "skb_crypto_setup") ||
>> +        !ASSERT_OK(opts.retval, "skb_crypto_setup retval"))
>> +        goto fail;
>> +
>> +    if (!ASSERT_OK(skel->bss->status, "skb_crypto_setup status"))
>> +        goto fail;
>> +
>> +    err = bpf_tc_hook_create(&qdisc_hook);
>> +    if (!ASSERT_OK(err, "create qdisc hook"))
>> +        goto fail;
>> +
>> +    addrlen = sizeof(addr);
>> +    err = make_sockaddr(AF_INET6, IPV6_IFACE_ADDR, UDP_TEST_PORT,
>> +                (void *)&addr, &addrlen);
>> +    if (!ASSERT_OK(err, "make_sockaddr"))
>> +        goto fail;
>> +
>> +    tc_attach_dec.prog_fd = bpf_program__fd(skel->progs.decrypt_sanity);
>> +    err = bpf_tc_attach(&qdisc_hook, &tc_attach_dec);
>> +    if (!ASSERT_OK(err, "attach decrypt filter"))
>> +        goto fail;
>> +
>> +    sockfd = socket(AF_INET6, SOCK_DGRAM, 0);
>> +    if (!ASSERT_NEQ(sockfd, -1, "decrypt socket"))
>> +        goto fail;
>> +    err = sendto(sockfd, crypted_data, 16, 0, (void *)&addr, addrlen);
>> +    close(sockfd);
>> +    if (!ASSERT_EQ(err, 16, "decrypt send"))
>> +        goto fail;
>> +
>> +    bpf_tc_detach(&qdisc_hook, &tc_attach_dec);
>> +    if (!ASSERT_OK(skel->bss->status, "decrypt status"))
>> +        goto fail;
>> +    if (!ASSERT_STRNEQ(skel->bss->dst, plain_text, sizeof(plain_text),
>> "decrypt"))
>> +        goto fail;
>> +
>> +    tc_attach_enc.prog_fd = bpf_program__fd(skel->progs.encrypt_sanity);
>> +    err = bpf_tc_attach(&qdisc_hook, &tc_attach_enc);
>> +    if (!ASSERT_OK(err, "attach encrypt filter"))
>> +        goto fail;
>> +
>> +    sockfd = socket(AF_INET6, SOCK_DGRAM, 0);
>> +    if (!ASSERT_NEQ(sockfd, -1, "encrypt socket"))
>> +        goto fail;
>> +    err = sendto(sockfd, plain_text, 16, 0, (void *)&addr, addrlen);
>> +    close(sockfd);
>> +    if (!ASSERT_EQ(err, 16, "encrypt send"))
>> +        goto fail;
>> +
>> +    bpf_tc_detach(&qdisc_hook, &tc_attach_enc);
>> +    if (!ASSERT_OK(skel->bss->status, "encrypt status"))
>> +        goto fail;
>> +    if (!ASSERT_STRNEQ(skel->bss->dst, crypted_data, sizeof(crypted_data),
>> "encrypt"))
>> +        goto fail;
>> +
>> +fail:
>> +    if (nstoken) {
>> +        bpf_tc_hook_destroy(&qdisc_hook);
>> +        close_netns(nstoken);
>> +    }
>> +    SYS_NOFAIL("ip netns del " NS_TEST " &> /dev/null");
>> +    crypto_sanity__destroy(skel);
>> +}
>> diff --git a/tools/testing/selftests/bpf/progs/crypto_common.h
>> b/tools/testing/selftests/bpf/progs/crypto_common.h
>> new file mode 100644
>> index 000000000000..584378bb6df8
>> --- /dev/null
>> +++ b/tools/testing/selftests/bpf/progs/crypto_common.h
>> @@ -0,0 +1,103 @@
>> +/* SPDX-License-Identifier: GPL-2.0 */
>> +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
>> +
>> +#ifndef _CRYPTO_COMMON_H
>> +#define _CRYPTO_COMMON_H
>> +
>> +#include "errno.h"
>> +#include <stdbool.h>
>> +
>> +#define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8)))
>> +private(CTX) static struct bpf_crypto_skcipher_ctx __kptr * global_crypto_ctx;
>
> Add a test to show how it is used?

I'm thinking if we really need this? Looks like we don't.

>
>> +
>> +struct bpf_crypto_skcipher_ctx *bpf_crypto_skcipher_ctx_create(const struct
>> bpf_dynptr *algo,
>> +                                   const struct bpf_dynptr *key,
>> +                                   int *err) __ksym;
>> +struct bpf_crypto_skcipher_ctx *bpf_crypto_skcipher_ctx_acquire(struct
>> bpf_crypto_skcipher_ctx *ctx) __ksym;
>> +void bpf_crypto_skcipher_ctx_release(struct bpf_crypto_skcipher_ctx *ctx)
>> __ksym;
>
> Same for acquire and release kfunc. Add test cases.
>
> btw, does it have use cases to store the same bpf_crypto_skcipher_ctx in
> multiple maps?

Yeah, sure. You can potentially use the same algo with the same configured key
in parallel. All sensitive data is stored in the request structure.

>
>> +int bpf_crypto_skcipher_encrypt(struct bpf_crypto_skcipher_ctx *ctx,
>> +                const struct bpf_dynptr *src, struct bpf_dynptr *dst,
>> +                const struct bpf_dynptr *iv) __ksym;
>> +int bpf_crypto_skcipher_decrypt(struct bpf_crypto_skcipher_ctx *ctx,
>> +                const struct bpf_dynptr *src, struct bpf_dynptr *dst,
>> +                const struct bpf_dynptr *iv) __ksym;
>> +
>> +struct __crypto_skcipher_ctx_value {
>> +    struct bpf_crypto_skcipher_ctx __kptr * ctx;
>> +};
>> +
>> +struct crypto_conf_value {
>> +    __u8 algo[32];
>> +    __u32 algo_size;
>> +    __u8 key[32];
>> +    __u32 key_size;
>> +    __u8 iv[32];
>> +    __u32 iv_size;
>> +    __u8 dst[32];
>> +    __u32 dst_size;
>> +};
>> +
>> +struct array_conf_map {
>> +    __uint(type, BPF_MAP_TYPE_ARRAY);
>> +    __type(key, int);
>> +    __type(value, struct crypto_conf_value);
>> +    __uint(max_entries, 1);
>> +} __crypto_conf_map SEC(".maps");
>> +
>> +struct array_map {
>> +    __uint(type, BPF_MAP_TYPE_ARRAY);
>> +    __type(key, int);
>> +    __type(value, struct __crypto_skcipher_ctx_value);
>> +    __uint(max_entries, 1);
>> +} __crypto_skcipher_ctx_map SEC(".maps");
>> +
>> +static inline struct crypto_conf_value *crypto_conf_lookup(void)
>> +{
>> +    struct crypto_conf_value *v, local = {};
>> +    u32 key = 0;
>> +
>> +    v = bpf_map_lookup_elem(&__crypto_conf_map, &key);
>> +    if (v)
>> +        return v;
>> +
>> +    bpf_map_update_elem(&__crypto_conf_map, &key, &local, 0);
>> +    return bpf_map_lookup_elem(&__crypto_conf_map, &key);
>
> hmm... local is all 0 which became the map value. Where is it initialized?

ahh.. it's actually initialized in ctx_insert(). but looks like this part is
left from previous iteration. I'll adjust the code.

>
>> +}
>> +
>> +static inline struct __crypto_skcipher_ctx_value
>> *crypto_skcipher_ctx_value_lookup(void)
>> +{
>> +    u32 key = 0;
>> +
>> +    return bpf_map_lookup_elem(&__crypto_skcipher_ctx_map, &key);
>> +}
>> +
>> +static inline int crypto_skcipher_ctx_insert(struct bpf_crypto_skcipher_ctx
>> *ctx)
>> +{
>> +    struct __crypto_skcipher_ctx_value local, *v;
>> +    long status;
>
> nit. s/status/err/

sure

>
>> +    struct bpf_crypto_skcipher_ctx *old;
>> +    u32 key = 0;
>> +
>> +    local.ctx = NULL;
>> +    status = bpf_map_update_elem(&__crypto_skcipher_ctx_map, &key, &local, 0);
>> +    if (status) {
>> +        bpf_crypto_skcipher_ctx_release(ctx);
>> +        return status;
>> +    }
>> +
>> +    v = bpf_map_lookup_elem(&__crypto_skcipher_ctx_map, &key);
>> +    if (!v) {
>> +        bpf_crypto_skcipher_ctx_release(ctx);
>> +        return -ENOENT;
>> +    }
>> +
>> +    old = bpf_kptr_xchg(&v->ctx, ctx);
>> +    if (old) {
>> +        bpf_crypto_skcipher_ctx_release(old);
>> +        return -EEXIST;
>> +    }
>> +
>> +    return 0;
>> +}
>> +
>> +#endif /* _CRYPTO_COMMON_H */
>> diff --git a/tools/testing/selftests/bpf/progs/crypto_sanity.c
>> b/tools/testing/selftests/bpf/progs/crypto_sanity.c
>> new file mode 100644
>> index 000000000000..71a172d8d2a2
>> --- /dev/null
>> +++ b/tools/testing/selftests/bpf/progs/crypto_sanity.c
>> @@ -0,0 +1,154 @@
>> +// SPDX-License-Identifier: GPL-2.0
>> +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
>> +
>> +#include "vmlinux.h"
>> +#include "bpf_tracing_net.h"
>> +#include <bpf/bpf_helpers.h>
>> +#include <bpf/bpf_endian.h>
>> +#include <bpf/bpf_tracing.h>
>> +#include "bpf_misc.h"
>> +#include "bpf_kfuncs.h"
>> +#include "crypto_common.h"
>> +
>> +#define UDP_TEST_PORT 7777
>> +unsigned char crypto_key[16] = "testtest12345678";
>> +char crypto_algo[9] = "ecb(aes)";
>> +char dst[32] = {};
>> +int status;
>> +
>> +static inline int skb_validate_test(const struct __sk_buff *skb)
>> +{
>> +    struct ipv6hdr ip6h;
>> +    struct udphdr udph;
>> +    u32 offset;
>> +
>> +    if (skb->protocol != __bpf_constant_htons(ETH_P_IPV6))
>> +        return -1;
>> +
>> +    if (bpf_skb_load_bytes(skb, ETH_HLEN, &ip6h, sizeof(ip6h)))
>> +        return -1;
>> +
>> +    if (ip6h.nexthdr != IPPROTO_UDP)
>> +        return -1;
>> +
>> +    if (bpf_skb_load_bytes(skb, ETH_HLEN + sizeof(ip6h), &udph, sizeof(udph)))
>> +        return -1;
>> +
>> +    if (udph.dest != __bpf_constant_htons(UDP_TEST_PORT))
>> +        return -1;
>> +
>> +    offset = ETH_HLEN + sizeof(ip6h) + sizeof(udph);
>> +    if (skb->len < offset + 16)
>> +        return -1;
>> +
>> +    return offset;
>> +}
>> +
>> +SEC("?fentry.s/bpf_fentry_test1")
>
> I wonder if others have better idea how to do this. This is basically setting up
> the algo and key which requires to call a kfunc in sleepable context. Otherwise,
> the tc's test_run would be a better fit.

The other way can be implement crypto API function to allocate with GFP_ATOMIC.
Currently all skcipher allocations are hard-coded to GFP_KERNEL.

>
>> +int BPF_PROG(skb_crypto_setup)
>> +{
>> +    struct crypto_conf_value *c;
>> +    struct bpf_dynptr algo, key;
>> +    int err = 0;
>> +
>> +    status = 0;
>> +
>> +    c = crypto_conf_lookup();
>
> "c" is not used. Why lookup is needed? May be properly setup the
> __crypto_conf_map so the example is more complete.
>

This is most exciting part. Without this lookup verifier complains on the
initialization of dynptr from memory buffer. I don't really understand the
way it works or changes pointers types.

>> +    if (!c) {
>> +        status = -EINVAL;
>> +        return 0;
>> +    }
>> +
>> +    bpf_dynptr_from_mem(crypto_algo, sizeof(crypto_algo), 0, &algo);
>> +    bpf_dynptr_from_mem(crypto_key, sizeof(crypto_key), 0, &key);
>> +    struct bpf_crypto_skcipher_ctx *cctx =
>> bpf_crypto_skcipher_ctx_create(&algo, &key, &err);
>> +
>> +    if (!cctx) {
>> +        status = err;
>> +        return 0;
>> +    }
>> +
>> +    err = crypto_skcipher_ctx_insert(cctx);
>> +    if (err && err != -EEXIST)
>> +        status = err;
>> +
>> +    return 0;
>> +}
>> +
>> +SEC("tc")
>> +int decrypt_sanity(struct __sk_buff *skb)
>> +{
>> +    struct __crypto_skcipher_ctx_value *v;
>> +    struct bpf_crypto_skcipher_ctx *ctx;
>> +    struct bpf_dynptr psrc, pdst, iv;
>> +    int err;
>
> nit. s/err/offset/. "err" is actually the offset for the common case.
>
> btw, considering dynptr psrc has to be created from skb anyway, is it easier to
> use bpf_dynptr_slice(psrc, 0, NULL, ETH_HLEN + sizeof(ip6h) + sizeof(udph)) in
> the above skb_validate_test()?

Yeah, maybe it's better. The only problem is naming then...

>
>> +
>> +    err = skb_validate_test(skb);
>> +    if (err < 0) {
>> +        status = err;
>> +        return TC_ACT_SHOT;
>> +    }
>> +
>> +    v = crypto_skcipher_ctx_value_lookup();
>> +    if (!v) {
>> +        status = -ENOENT;
>> +        return TC_ACT_SHOT;
>> +    }
>> +
>> +    ctx = v->ctx;
>> +    if (!ctx) {
>> +        status = -ENOENT;
>> +        return TC_ACT_SHOT;
>> +    }
>> +
>> +    bpf_dynptr_from_skb(skb, 0, &psrc);
>> +    bpf_dynptr_adjust(&psrc, err, err + 16);
>> +    bpf_dynptr_from_mem(dst, sizeof(dst), 0, &pdst);
>> +    bpf_dynptr_from_mem(dst, 0, 0, &iv);
>> +
>> +    bpf_crypto_skcipher_decrypt(ctx, &psrc, &pdst, &iv);
>
> check decrypt return value before setting "status = 0;' below.

Agree, will adjust the code.

>> +
>> +    status = 0;
>> +
>> +    return TC_ACT_SHOT;
>> +}
>> +
>> +SEC("tc")
>> +int encrypt_sanity(struct __sk_buff *skb)
>> +{
>> +    struct __crypto_skcipher_ctx_value *v;
>> +    struct bpf_crypto_skcipher_ctx *ctx;
>> +    struct bpf_dynptr psrc, pdst, iv;
>> +    int err;
>> +
>> +    status = 0;
>> +
>> +    err = skb_validate_test(skb);
>> +    if (err < 0) {
>> +        status = err;
>> +        return TC_ACT_SHOT;
>> +    }
>> +
>> +    v = crypto_skcipher_ctx_value_lookup();
>> +    if (!v) {
>> +        status = -ENOENT;
>> +        return TC_ACT_SHOT;
>> +    }
>> +
>> +    ctx = v->ctx;
>> +    if (!ctx) {
>> +        status = -ENOENT;
>> +        return TC_ACT_SHOT;
>> +    }
>> +
>> +    bpf_dynptr_from_skb(skb, 0, &psrc);
>> +    bpf_dynptr_adjust(&psrc, err, err + 16);
>> +    bpf_dynptr_from_mem(dst, sizeof(dst), 0, &pdst);
>> +    bpf_dynptr_from_mem(dst, 0, 0, &iv);
>> +
>> +    bpf_crypto_skcipher_encrypt(ctx, &psrc, &pdst, &iv);
>
> Same here. check return value.
>

Yep.

>> +
>> +    return TC_ACT_SHOT;
>> +}
>> +
>> +char __license[] SEC("license") = "GPL";
>

2023-11-02 13:44:45

by Vadim Fedorenko

[permalink] [raw]
Subject: Re: [PATCH bpf-next v3 1/2] bpf: add skcipher API support to TC/XDP programs

On 01/11/2023 23:41, Martin KaFai Lau wrote:
> On 11/1/23 3:50 PM, Vadim Fedorenko wrote:
>>>> +static void *__bpf_dynptr_data_ptr(const struct bpf_dynptr_kern *ptr)
>>>> +{
>>>> +    enum bpf_dynptr_type type;
>>>> +
>>>> +    if (!ptr->data)
>>>> +        return NULL;
>>>> +
>>>> +    type = bpf_dynptr_get_type(ptr);
>>>> +
>>>> +    switch (type) {
>>>> +    case BPF_DYNPTR_TYPE_LOCAL:
>>>> +    case BPF_DYNPTR_TYPE_RINGBUF:
>>>> +        return ptr->data + ptr->offset;
>>>> +    case BPF_DYNPTR_TYPE_SKB:
>>>> +        return skb_pointer_if_linear(ptr->data, ptr->offset,
>>>> __bpf_dynptr_size(ptr));
>>>> +    case BPF_DYNPTR_TYPE_XDP:
>>>> +    {
>>>> +        void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset,
>>>> __bpf_dynptr_size(ptr));
>>>
>>> I suspect what it is doing here (for skb and xdp in particular) is
>>> very similar to bpf_dynptr_slice. Please check if
>>> bpf_dynptr_slice(ptr, 0, NULL, sz) will work.
>>>
>>
>> Well, yes, it's simplified version of bpf_dynptr_slice. The problem is
>> that bpf_dynptr_slice bpf_kfunc which cannot be used in another
>> bpf_kfunc. Should I refactor the code to use it in both places? Like
>
> Sorry, scrolled too fast in my earlier reply :(
>
> I am not aware of this limitation. What error does it have?
> The bpf_dynptr_slice_rdwr kfunc() is also calling the bpf_dynptr_slice()
> kfunc.
>
>> create __bpf_dynptr_slice() which will be internal part of bpf_kfunc?

Apparently Song has a patch to expose these bpf_dynptr_slice* functions
ton in-kernel users.

https://lore.kernel.org/bpf/[email protected]/

Should I wait for it to be merged before sending next version?

2023-11-02 15:36:33

by Alexei Starovoitov

[permalink] [raw]
Subject: Re: [PATCH bpf-next v3 1/2] bpf: add skcipher API support to TC/XDP programs

On Thu, Nov 2, 2023 at 6:44 AM Vadim Fedorenko
<[email protected]> wrote:
>
> On 01/11/2023 23:41, Martin KaFai Lau wrote:
> > On 11/1/23 3:50 PM, Vadim Fedorenko wrote:
> >>>> +static void *__bpf_dynptr_data_ptr(const struct bpf_dynptr_kern *ptr)
> >>>> +{
> >>>> + enum bpf_dynptr_type type;
> >>>> +
> >>>> + if (!ptr->data)
> >>>> + return NULL;
> >>>> +
> >>>> + type = bpf_dynptr_get_type(ptr);
> >>>> +
> >>>> + switch (type) {
> >>>> + case BPF_DYNPTR_TYPE_LOCAL:
> >>>> + case BPF_DYNPTR_TYPE_RINGBUF:
> >>>> + return ptr->data + ptr->offset;
> >>>> + case BPF_DYNPTR_TYPE_SKB:
> >>>> + return skb_pointer_if_linear(ptr->data, ptr->offset,
> >>>> __bpf_dynptr_size(ptr));
> >>>> + case BPF_DYNPTR_TYPE_XDP:
> >>>> + {
> >>>> + void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset,
> >>>> __bpf_dynptr_size(ptr));
> >>>
> >>> I suspect what it is doing here (for skb and xdp in particular) is
> >>> very similar to bpf_dynptr_slice. Please check if
> >>> bpf_dynptr_slice(ptr, 0, NULL, sz) will work.
> >>>
> >>
> >> Well, yes, it's simplified version of bpf_dynptr_slice. The problem is
> >> that bpf_dynptr_slice bpf_kfunc which cannot be used in another
> >> bpf_kfunc. Should I refactor the code to use it in both places? Like
> >
> > Sorry, scrolled too fast in my earlier reply :(
> >
> > I am not aware of this limitation. What error does it have?
> > The bpf_dynptr_slice_rdwr kfunc() is also calling the bpf_dynptr_slice()
> > kfunc.
> >
> >> create __bpf_dynptr_slice() which will be internal part of bpf_kfunc?
>
> Apparently Song has a patch to expose these bpf_dynptr_slice* functions
> ton in-kernel users.
>
> https://lore.kernel.org/bpf/[email protected]/
>
> Should I wait for it to be merged before sending next version?

If you need something from another developer it's best to ask them
explicitly :)
In this case Song can respin with just that change that you need.

2023-11-02 16:15:01

by Vadim Fedorenko

[permalink] [raw]
Subject: Re: [PATCH bpf-next v3 1/2] bpf: add skcipher API support to TC/XDP programs

On 02/11/2023 15:36, Alexei Starovoitov wrote:
> On Thu, Nov 2, 2023 at 6:44 AM Vadim Fedorenko
> <[email protected]> wrote:
>>
>> On 01/11/2023 23:41, Martin KaFai Lau wrote:
>>> On 11/1/23 3:50 PM, Vadim Fedorenko wrote:
>>>>>> +static void *__bpf_dynptr_data_ptr(const struct bpf_dynptr_kern *ptr)
>>>>>> +{
>>>>>> + enum bpf_dynptr_type type;
>>>>>> +
>>>>>> + if (!ptr->data)
>>>>>> + return NULL;
>>>>>> +
>>>>>> + type = bpf_dynptr_get_type(ptr);
>>>>>> +
>>>>>> + switch (type) {
>>>>>> + case BPF_DYNPTR_TYPE_LOCAL:
>>>>>> + case BPF_DYNPTR_TYPE_RINGBUF:
>>>>>> + return ptr->data + ptr->offset;
>>>>>> + case BPF_DYNPTR_TYPE_SKB:
>>>>>> + return skb_pointer_if_linear(ptr->data, ptr->offset,
>>>>>> __bpf_dynptr_size(ptr));
>>>>>> + case BPF_DYNPTR_TYPE_XDP:
>>>>>> + {
>>>>>> + void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset,
>>>>>> __bpf_dynptr_size(ptr));
>>>>>
>>>>> I suspect what it is doing here (for skb and xdp in particular) is
>>>>> very similar to bpf_dynptr_slice. Please check if
>>>>> bpf_dynptr_slice(ptr, 0, NULL, sz) will work.
>>>>>
>>>>
>>>> Well, yes, it's simplified version of bpf_dynptr_slice. The problem is
>>>> that bpf_dynptr_slice bpf_kfunc which cannot be used in another
>>>> bpf_kfunc. Should I refactor the code to use it in both places? Like
>>>
>>> Sorry, scrolled too fast in my earlier reply :(
>>>
>>> I am not aware of this limitation. What error does it have?
>>> The bpf_dynptr_slice_rdwr kfunc() is also calling the bpf_dynptr_slice()
>>> kfunc.
>>>
>>>> create __bpf_dynptr_slice() which will be internal part of bpf_kfunc?
>>
>> Apparently Song has a patch to expose these bpf_dynptr_slice* functions
>> ton in-kernel users.
>>
>> https://lore.kernel.org/bpf/[email protected]/
>>
>> Should I wait for it to be merged before sending next version?
>
> If you need something from another developer it's best to ask them
> explicitly :)
> In this case Song can respin with just that change that you need.

Got it. I actually need 2 different changes from the same patchset, I'll
ping Song in the appropriate thread, thanks!

2023-11-02 17:48:15

by Andrii Nakryiko

[permalink] [raw]
Subject: Re: [PATCH bpf-next v3 1/2] bpf: add skcipher API support to TC/XDP programs

On Thu, Nov 2, 2023 at 9:14 AM Vadim Fedorenko
<[email protected]> wrote:
>
> On 02/11/2023 15:36, Alexei Starovoitov wrote:
> > On Thu, Nov 2, 2023 at 6:44 AM Vadim Fedorenko
> > <[email protected]> wrote:
> >>
> >> On 01/11/2023 23:41, Martin KaFai Lau wrote:
> >>> On 11/1/23 3:50 PM, Vadim Fedorenko wrote:
> >>>>>> +static void *__bpf_dynptr_data_ptr(const struct bpf_dynptr_kern *ptr)
> >>>>>> +{
> >>>>>> + enum bpf_dynptr_type type;
> >>>>>> +
> >>>>>> + if (!ptr->data)
> >>>>>> + return NULL;
> >>>>>> +
> >>>>>> + type = bpf_dynptr_get_type(ptr);
> >>>>>> +
> >>>>>> + switch (type) {
> >>>>>> + case BPF_DYNPTR_TYPE_LOCAL:
> >>>>>> + case BPF_DYNPTR_TYPE_RINGBUF:
> >>>>>> + return ptr->data + ptr->offset;
> >>>>>> + case BPF_DYNPTR_TYPE_SKB:
> >>>>>> + return skb_pointer_if_linear(ptr->data, ptr->offset,
> >>>>>> __bpf_dynptr_size(ptr));
> >>>>>> + case BPF_DYNPTR_TYPE_XDP:
> >>>>>> + {
> >>>>>> + void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset,
> >>>>>> __bpf_dynptr_size(ptr));
> >>>>>
> >>>>> I suspect what it is doing here (for skb and xdp in particular) is
> >>>>> very similar to bpf_dynptr_slice. Please check if
> >>>>> bpf_dynptr_slice(ptr, 0, NULL, sz) will work.
> >>>>>
> >>>>
> >>>> Well, yes, it's simplified version of bpf_dynptr_slice. The problem is
> >>>> that bpf_dynptr_slice bpf_kfunc which cannot be used in another
> >>>> bpf_kfunc. Should I refactor the code to use it in both places? Like
> >>>
> >>> Sorry, scrolled too fast in my earlier reply :(
> >>>
> >>> I am not aware of this limitation. What error does it have?
> >>> The bpf_dynptr_slice_rdwr kfunc() is also calling the bpf_dynptr_slice()
> >>> kfunc.
> >>>
> >>>> create __bpf_dynptr_slice() which will be internal part of bpf_kfunc?
> >>
> >> Apparently Song has a patch to expose these bpf_dynptr_slice* functions
> >> ton in-kernel users.
> >>
> >> https://lore.kernel.org/bpf/[email protected]/
> >>
> >> Should I wait for it to be merged before sending next version?
> >
> > If you need something from another developer it's best to ask them
> > explicitly :)
> > In this case Song can respin with just that change that you need.
>
> Got it. I actually need 2 different changes from the same patchset, I'll
> ping Song in the appropriate thread, thanks!
>

Please also check my ramblings in [0]

[0] https://patchwork.kernel.org/project/netdevbpf/patch/[email protected]/