2023-12-02 02:35:58

by Vadim Fedorenko

[permalink] [raw]
Subject: [PATCH bpf-next v7 1/3] bpf: make common crypto API for TC/XDP programs

Add crypto API support to BPF to be able to decrypt or encrypt packets
in TC/XDP BPF programs. Special care should be taken for initialization
part of crypto algo because crypto alloc) doesn't work with preemtion
disabled, it can be run only in sleepable BPF program. Also async crypto
is not supported because of the very same issue - TC/XDP BPF programs
are not sleepable.

Signed-off-by: Vadim Fedorenko <[email protected]>
---
v6 -> v7:
- style fixes
v5 -> v6:
- replace lskcipher with infrastructure to provide pluggable cipher
types
- add BPF skcipher as plug-in module in a separate patch
v4 -> v5:
- replace crypto API to use lskcipher (suggested by Herbert Xu)
- remove SG list usage and provide raw buffers
v3 -> v4:
- reuse __bpf_dynptr_data and remove own implementation
- use const __str to provide algorithm name
- use kfunc macroses to avoid compilator warnings
v2 -> v3:
- fix kdoc issues
v1 -> v2:
- use kmalloc in sleepable func, suggested by Alexei
- use __bpf_dynptr_is_rdonly() to check destination, suggested by Jakub
- use __bpf_dynptr_data_ptr() for all dynptr accesses
---
include/linux/bpf.h | 1 +
include/linux/bpf_crypto.h | 23 +++
kernel/bpf/Makefile | 3 +
kernel/bpf/crypto.c | 364 +++++++++++++++++++++++++++++++++++++
kernel/bpf/helpers.c | 2 +-
kernel/bpf/verifier.c | 1 +
6 files changed, 393 insertions(+), 1 deletion(-)
create mode 100644 include/linux/bpf_crypto.h
create mode 100644 kernel/bpf/crypto.c

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index eb447b0a9423..0143ff6c93a1 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1228,6 +1228,7 @@ int bpf_dynptr_check_size(u32 size);
u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr);
const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len);
void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len);
+bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr);

#ifdef CONFIG_BPF_JIT
int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
diff --git a/include/linux/bpf_crypto.h b/include/linux/bpf_crypto.h
new file mode 100644
index 000000000000..e81bd8ab979c
--- /dev/null
+++ b/include/linux/bpf_crypto.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+#ifndef _BPF_CRYPTO_H
+#define _BPF_CRYPTO_H
+
+struct bpf_crypto_type {
+ void *(*alloc_tfm)(const char *algo);
+ void (*free_tfm)(void *tfm);
+ int (*has_algo)(const char *algo);
+ int (*setkey)(void *tfm, const u8 *key, unsigned int keylen);
+ int (*setauthsize)(void *tfm, unsigned int authsize);
+ int (*encrypt)(void *tfm, const u8 *src, u8 *dst, unsigned int len, u8 *iv);
+ int (*decrypt)(void *tfm, const u8 *src, u8 *dst, unsigned int len, u8 *iv);
+ unsigned int (*ivsize)(void *tfm);
+ u32 (*get_flags)(void *tfm);
+ struct module *owner;
+ char name[14];
+};
+
+int bpf_crypto_register_type(const struct bpf_crypto_type *type);
+int bpf_crypto_unregister_type(const struct bpf_crypto_type *type);
+
+#endif /* _BPF_CRYPTO_H */
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index f526b7573e97..bcde762bb2c2 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -41,6 +41,9 @@ obj-$(CONFIG_BPF_SYSCALL) += bpf_struct_ops.o
obj-$(CONFIG_BPF_SYSCALL) += cpumask.o
obj-${CONFIG_BPF_LSM} += bpf_lsm.o
endif
+ifeq ($(CONFIG_CRYPTO),y)
+obj-$(CONFIG_BPF_SYSCALL) += crypto.o
+endif
obj-$(CONFIG_BPF_PRELOAD) += preload/

obj-$(CONFIG_BPF_SYSCALL) += relo_core.o
diff --git a/kernel/bpf/crypto.c b/kernel/bpf/crypto.c
new file mode 100644
index 000000000000..a1e543d1d7fe
--- /dev/null
+++ b/kernel/bpf/crypto.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2023 Meta, Inc */
+#include <linux/bpf.h>
+#include <linux/bpf_crypto.h>
+#include <linux/bpf_mem_alloc.h>
+#include <linux/btf.h>
+#include <linux/btf_ids.h>
+#include <linux/filter.h>
+#include <linux/scatterlist.h>
+#include <linux/skbuff.h>
+#include <crypto/skcipher.h>
+
+struct bpf_crypto_type_list {
+ const struct bpf_crypto_type *type;
+ struct list_head list;
+};
+
+static LIST_HEAD(bpf_crypto_types);
+static DECLARE_RWSEM(bpf_crypto_types_sem);
+
+/**
+ * struct bpf_crypto_ctx - refcounted BPF crypto context structure
+ * @type: The pointer to bpf crypto type
+ * @tfm: The pointer to instance of crypto API struct.
+ * @rcu: The RCU head used to free the crypto context with RCU safety.
+ * @usage: Object reference counter. When the refcount goes to 0, the
+ * memory is released back to the BPF allocator, which provides
+ * RCU safety.
+ */
+struct bpf_crypto_ctx {
+ const struct bpf_crypto_type *type;
+ void *tfm;
+ struct rcu_head rcu;
+ refcount_t usage;
+};
+
+int bpf_crypto_register_type(const struct bpf_crypto_type *type)
+{
+ struct bpf_crypto_type_list *node;
+ int err = -EEXIST;
+
+ down_write(&bpf_crypto_types_sem);
+ list_for_each_entry(node, &bpf_crypto_types, list) {
+ if (!strcmp(node->type->name, type->name))
+ goto unlock;
+ }
+
+ node = kmalloc(sizeof(*node), GFP_KERNEL);
+ err = -ENOMEM;
+ if (!node)
+ goto unlock;
+
+ node->type = type;
+ list_add(&node->list, &bpf_crypto_types);
+ err = 0;
+
+unlock:
+ up_write(&bpf_crypto_types_sem);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(bpf_crypto_register_type);
+
+int bpf_crypto_unregister_type(const struct bpf_crypto_type *type)
+{
+ struct bpf_crypto_type_list *node;
+ int err = -ENOENT;
+
+ down_write(&bpf_crypto_types_sem);
+ list_for_each_entry(node, &bpf_crypto_types, list) {
+ if (strcmp(node->type->name, type->name))
+ continue;
+
+ list_del(&node->list);
+ kfree(node);
+ err = 0;
+ break;
+ }
+ up_write(&bpf_crypto_types_sem);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(bpf_crypto_unregister_type);
+
+static const struct bpf_crypto_type *bpf_crypto_get_type(const char *name)
+{
+ const struct bpf_crypto_type *type = ERR_PTR(-ENOENT);
+ struct bpf_crypto_type_list *node;
+
+ down_read(&bpf_crypto_types_sem);
+ list_for_each_entry(node, &bpf_crypto_types, list) {
+ if (strcmp(node->type->name, name))
+ continue;
+
+ if (try_module_get(node->type->owner))
+ type = node->type;
+ break;
+ }
+ up_read(&bpf_crypto_types_sem);
+
+ return type;
+}
+
+__bpf_kfunc_start_defs();
+
+/**
+ * bpf_crypto_ctx_create() - Create a mutable BPF crypto context.
+ *
+ * Allocates a crypto context that can be used, acquired, and released by
+ * a BPF program. The crypto context returned by this function must either
+ * be embedded in a map as a kptr, or freed with bpf_crypto_ctx_release().
+ * As crypto API functions use GFP_KERNEL allocations, this function can
+ * only be used in sleepable BPF programs.
+ *
+ * bpf_crypto_ctx_create() allocates memory for crypto context.
+ * It may return NULL if no memory is available.
+ * @type__str: pointer to string representation of crypto type.
+ * @algo__str: pointer to string representation of algorithm.
+ * @pkey: bpf_dynptr which holds cipher key to do crypto.
+ * @err: integer to store error code when NULL is returned
+ */
+__bpf_kfunc struct bpf_crypto_ctx *
+bpf_crypto_ctx_create(const char *type__str, const char *algo__str,
+ const struct bpf_dynptr_kern *pkey,
+ unsigned int authsize, int *err)
+{
+ const struct bpf_crypto_type *type = bpf_crypto_get_type(type__str);
+ struct bpf_crypto_ctx *ctx;
+ const u8 *key;
+ u32 key_len;
+
+ type = bpf_crypto_get_type(type__str);
+ if (IS_ERR(type)) {
+ *err = PTR_ERR(type);
+ return NULL;
+ }
+
+ if (!type->has_algo(algo__str)) {
+ *err = -EOPNOTSUPP;
+ goto err;
+ }
+
+ if (!authsize && type->setauthsize) {
+ *err = -EOPNOTSUPP;
+ goto err;
+ }
+
+ if (authsize && !type->setauthsize) {
+ *err = -EOPNOTSUPP;
+ goto err;
+ }
+
+ key_len = __bpf_dynptr_size(pkey);
+ if (!key_len) {
+ *err = -EINVAL;
+ goto err;
+ }
+ key = __bpf_dynptr_data(pkey, key_len);
+ if (!key) {
+ *err = -EINVAL;
+ goto err;
+ }
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ *err = -ENOMEM;
+ goto err;
+ }
+
+ ctx->type = type;
+ ctx->tfm = type->alloc_tfm(algo__str);
+ if (IS_ERR(ctx->tfm)) {
+ *err = PTR_ERR(ctx->tfm);
+ ctx->tfm = NULL;
+ goto err;
+ }
+
+ if (authsize) {
+ *err = type->setauthsize(ctx->tfm, authsize);
+ if (*err)
+ goto err;
+ }
+
+ *err = type->setkey(ctx->tfm, key, key_len);
+ if (*err)
+ goto err;
+
+ refcount_set(&ctx->usage, 1);
+
+ return ctx;
+err:
+ if (ctx->tfm)
+ type->free_tfm(ctx->tfm);
+ kfree(ctx);
+ module_put(type->owner);
+
+ return NULL;
+}
+
+static void crypto_free_cb(struct rcu_head *head)
+{
+ struct bpf_crypto_ctx *ctx;
+
+ ctx = container_of(head, struct bpf_crypto_ctx, rcu);
+ ctx->type->free_tfm(ctx->tfm);
+ module_put(ctx->type->owner);
+ kfree(ctx);
+}
+
+/**
+ * bpf_crypto_ctx_acquire() - Acquire a reference to a BPF crypto context.
+ * @ctx: The BPF crypto context being acquired. The ctx must be a trusted
+ * pointer.
+ *
+ * Acquires a reference to a BPF crypto context. The context returned by this function
+ * must either be embedded in a map as a kptr, or freed with
+ * bpf_crypto_skcipher_ctx_release().
+ */
+__bpf_kfunc struct bpf_crypto_ctx *
+bpf_crypto_ctx_acquire(struct bpf_crypto_ctx *ctx)
+{
+ refcount_inc(&ctx->usage);
+ return ctx;
+}
+
+/**
+ * bpf_crypto_ctx_release() - Release a previously acquired BPF crypto context.
+ * @ctx: The crypto context being released.
+ *
+ * Releases a previously acquired reference to a BPF crypto context. When the final
+ * reference of the BPF crypto context has been released, it is subsequently freed in
+ * an RCU callback in the BPF memory allocator.
+ */
+__bpf_kfunc void bpf_crypto_ctx_release(struct bpf_crypto_ctx *ctx)
+{
+ if (refcount_dec_and_test(&ctx->usage))
+ call_rcu(&ctx->rcu, crypto_free_cb);
+}
+
+static int bpf_crypto_crypt(const struct bpf_crypto_ctx *ctx,
+ const struct bpf_dynptr_kern *src,
+ struct bpf_dynptr_kern *dst,
+ const struct bpf_dynptr_kern *iv,
+ bool decrypt)
+{
+ u32 src_len, dst_len, iv_len;
+ const u8 *psrc;
+ u8 *pdst, *piv;
+ int err;
+
+ if (ctx->type->get_flags(ctx->tfm) & CRYPTO_TFM_NEED_KEY)
+ return -EINVAL;
+
+ if (__bpf_dynptr_is_rdonly(dst))
+ return -EINVAL;
+
+ iv_len = __bpf_dynptr_size(iv);
+ src_len = __bpf_dynptr_size(src);
+ dst_len = __bpf_dynptr_size(dst);
+ if (!src_len || !dst_len)
+ return -EINVAL;
+
+ if (iv_len != ctx->type->ivsize(ctx->tfm))
+ return -EINVAL;
+
+ psrc = __bpf_dynptr_data(src, src_len);
+ if (!psrc)
+ return -EINVAL;
+ pdst = __bpf_dynptr_data_rw(dst, dst_len);
+ if (!pdst)
+ return -EINVAL;
+
+ piv = iv_len ? __bpf_dynptr_data_rw(iv, iv_len) : NULL;
+ if (iv_len && !piv)
+ return -EINVAL;
+
+ err = decrypt ? ctx->type->decrypt(ctx->tfm, psrc, pdst, src_len, piv)
+ : ctx->type->encrypt(ctx->tfm, psrc, pdst, src_len, piv);
+
+ return err;
+}
+
+/**
+ * bpf_crypto_decrypt() - Decrypt buffer using configured context and IV provided.
+ * @ctx: The crypto context being used. The ctx must be a trusted pointer.
+ * @src: bpf_dynptr to the encrypted data. Must be a trusted pointer.
+ * @dst: bpf_dynptr to the buffer where to store the result. Must be a trusted pointer.
+ * @iv: bpf_dynptr to IV data to be used by decryptor.
+ *
+ * Decrypts provided buffer using IV data and the crypto context. Crypto context must be configured.
+ */
+__bpf_kfunc int bpf_crypto_decrypt(struct bpf_crypto_ctx *ctx,
+ const struct bpf_dynptr_kern *src,
+ struct bpf_dynptr_kern *dst,
+ struct bpf_dynptr_kern *iv)
+{
+ return bpf_crypto_crypt(ctx, src, dst, iv, true);
+}
+
+/**
+ * bpf_crypto_encrypt() - Encrypt buffer using configured context and IV provided.
+ * @ctx: The crypto context being used. The ctx must be a trusted pointer.
+ * @src: bpf_dynptr to the plain data. Must be a trusted pointer.
+ * @dst: bpf_dynptr to buffer where to store the result. Must be a trusted pointer.
+ * @iv: bpf_dynptr to IV data to be used by decryptor.
+ *
+ * Encrypts provided buffer using IV data and the crypto context. Crypto context must be configured.
+ */
+__bpf_kfunc int bpf_crypto_encrypt(struct bpf_crypto_ctx *ctx,
+ const struct bpf_dynptr_kern *src,
+ struct bpf_dynptr_kern *dst,
+ struct bpf_dynptr_kern *iv)
+{
+ return bpf_crypto_crypt(ctx, src, dst, iv, false);
+}
+
+__bpf_kfunc_end_defs();
+
+BTF_SET8_START(crypt_init_kfunc_btf_ids)
+BTF_ID_FLAGS(func, bpf_crypto_ctx_create, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
+BTF_ID_FLAGS(func, bpf_crypto_ctx_release, KF_RELEASE)
+BTF_ID_FLAGS(func, bpf_crypto_ctx_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS)
+BTF_SET8_END(crypt_init_kfunc_btf_ids)
+
+static const struct btf_kfunc_id_set crypt_init_kfunc_set = {
+ .owner = THIS_MODULE,
+ .set = &crypt_init_kfunc_btf_ids,
+};
+
+BTF_SET8_START(crypt_kfunc_btf_ids)
+BTF_ID_FLAGS(func, bpf_crypto_decrypt, KF_RCU)
+BTF_ID_FLAGS(func, bpf_crypto_encrypt, KF_RCU)
+BTF_SET8_END(crypt_kfunc_btf_ids)
+
+static const struct btf_kfunc_id_set crypt_kfunc_set = {
+ .owner = THIS_MODULE,
+ .set = &crypt_kfunc_btf_ids,
+};
+
+BTF_ID_LIST(bpf_crypto_dtor_ids)
+BTF_ID(struct, bpf_crypto_ctx)
+BTF_ID(func, bpf_crypto_ctx_release)
+
+static int __init crypto_kfunc_init(void)
+{
+ int ret;
+ const struct btf_id_dtor_kfunc bpf_crypto_dtors[] = {
+ {
+ .btf_id = bpf_crypto_dtor_ids[0],
+ .kfunc_btf_id = bpf_crypto_dtor_ids[1]
+ },
+ };
+
+ ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &crypt_kfunc_set);
+ ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_ACT, &crypt_kfunc_set);
+ ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &crypt_kfunc_set);
+ ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC,
+ &crypt_init_kfunc_set);
+ return ret ?: register_btf_id_dtor_kfuncs(bpf_crypto_dtors,
+ ARRAY_SIZE(bpf_crypto_dtors),
+ THIS_MODULE);
+}
+
+late_initcall(crypto_kfunc_init);
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index b45a8381f9bd..b73314c0124e 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -1436,7 +1436,7 @@ static const struct bpf_func_proto bpf_kptr_xchg_proto = {
#define DYNPTR_SIZE_MASK 0xFFFFFF
#define DYNPTR_RDONLY_BIT BIT(31)

-static bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr)
+bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr)
{
return ptr->size & DYNPTR_RDONLY_BIT;
}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 8e7b6072e3f4..c54716966d5d 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -5162,6 +5162,7 @@ BTF_ID(struct, cgroup)
#endif
BTF_ID(struct, bpf_cpumask)
BTF_ID(struct, task_struct)
+BTF_ID(struct, bpf_crypto_ctx)
BTF_SET_END(rcu_protected_types)

static bool rcu_protected_object(const struct btf *btf, u32 btf_id)
--
2.39.3



2023-12-02 02:36:06

by Vadim Fedorenko

[permalink] [raw]
Subject: [PATCH bpf-next v7 3/3] selftests: bpf: crypto skcipher algo selftests

Add simple tc hook selftests to show the way to work with new crypto
BPF API. Some weird structre and map are added to setup program to make
verifier happy about dynptr initialization from memory. Simple AES-ECB
algo is used to demonstrate encryption and decryption of fixed size
buffers.

Signed-off-by: Vadim Fedorenko <[email protected]>
---
v6 -> v7:
- style issues
v5 -> v6:
- use AF_ALG socket to confirm proper algorithm test
- adjust test kernel config to include AF_ALG
v4 -> v5:
- adjust selftests to use new naming
- restore tests on aarch64 and s390 as no sg lists are used
v3 -> v4:
- adjust selftests to use new syntax of helpers
- add tests for acquire and release
v2 -> v3:
- disable tests on s390 and aarch64 because of unknown Fatal exception
in sg_init_one
v1 -> v2:
- add CONFIG_CRYPTO_AES and CONFIG_CRYPTO_ECB to selftest build config
suggested by Daniel
---
tools/testing/selftests/bpf/config | 5 +
.../selftests/bpf/prog_tests/crypto_sanity.c | 215 ++++++++++++++++++
.../selftests/bpf/progs/crypto_common.h | 67 ++++++
.../selftests/bpf/progs/crypto_sanity.c | 192 ++++++++++++++++
4 files changed, 479 insertions(+)
create mode 100644 tools/testing/selftests/bpf/prog_tests/crypto_sanity.c
create mode 100644 tools/testing/selftests/bpf/progs/crypto_common.h
create mode 100644 tools/testing/selftests/bpf/progs/crypto_sanity.c

diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index c125c441abc7..2221994a36d6 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -13,7 +13,12 @@ CONFIG_BPF_SYSCALL=y
CONFIG_CGROUP_BPF=y
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_USER_API=y
CONFIG_CRYPTO_USER_API_HASH=y
+CONFIG_CRYPTO_USER_API_SKCIPHER=y
+CONFIG_CRYPTO_SKCIPHER=y
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_AES=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_INFO_BTF=y
CONFIG_DEBUG_INFO_DWARF4=y
diff --git a/tools/testing/selftests/bpf/prog_tests/crypto_sanity.c b/tools/testing/selftests/bpf/prog_tests/crypto_sanity.c
new file mode 100644
index 000000000000..2dd73cb248be
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/crypto_sanity.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <net/if.h>
+#include <linux/in6.h>
+#include <linux/if_alg.h>
+
+#include "test_progs.h"
+#include "network_helpers.h"
+#include "crypto_sanity.skel.h"
+
+#define NS_TEST "crypto_sanity_ns"
+#define IPV6_IFACE_ADDR "face::1"
+#define UDP_TEST_PORT 7777
+static const unsigned char crypto_key[] = "testtest12345678";
+static const char plain_text[] = "stringtoencrypt0";
+static int opfd, tfmfd;
+
+int init_afalg(void)
+{
+ struct sockaddr_alg sa = {
+ .salg_family = AF_ALG,
+ .salg_type = "skcipher",
+ .salg_name = "ecb(aes)"
+ };
+
+ tfmfd = socket(AF_ALG, SOCK_SEQPACKET, 0);
+ if (tfmfd == -1)
+ return errno;
+ if (bind(tfmfd, (struct sockaddr *)&sa, sizeof(sa)) == -1)
+ return errno;
+ if (setsockopt(tfmfd, SOL_ALG, ALG_SET_KEY, crypto_key, 16) == -1)
+ return errno;
+ opfd = accept(tfmfd, NULL, 0);
+ if (opfd == -1)
+ return errno;
+ return 0;
+}
+
+void deinit_afalg(void)
+{
+ if (tfmfd)
+ close(tfmfd);
+ if (opfd)
+ close(opfd);
+}
+
+void do_crypt_afalg(const void *src, void *dst, int size, bool encrypt)
+{
+ struct msghdr msg = {};
+ struct cmsghdr *cmsg;
+ char cbuf[CMSG_SPACE(4)] = {0};
+ struct iovec iov;
+
+ msg.msg_control = cbuf;
+ msg.msg_controllen = sizeof(cbuf);
+
+ cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg->cmsg_level = SOL_ALG;
+ cmsg->cmsg_type = ALG_SET_OP;
+ cmsg->cmsg_len = CMSG_LEN(4);
+ *(__u32 *)CMSG_DATA(cmsg) = encrypt ? ALG_OP_ENCRYPT : ALG_OP_DECRYPT;
+
+ iov.iov_base = (char *)src;
+ iov.iov_len = size;
+
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+
+ sendmsg(opfd, &msg, 0);
+ read(opfd, dst, size);
+}
+
+void test_crypto_sanity(void)
+{
+ LIBBPF_OPTS(bpf_tc_hook, qdisc_hook, .attach_point = BPF_TC_EGRESS);
+ LIBBPF_OPTS(bpf_tc_opts, tc_attach_enc);
+ LIBBPF_OPTS(bpf_tc_opts, tc_attach_dec);
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .repeat = 1,
+ );
+ struct nstoken *nstoken = NULL;
+ struct crypto_sanity *skel;
+ char afalg_plain[16] = {0};
+ char afalg_dst[16] = {0};
+ struct sockaddr_in6 addr;
+ int sockfd, err, pfd;
+ socklen_t addrlen;
+
+ skel = crypto_sanity__open();
+ if (!ASSERT_OK_PTR(skel, "skel open"))
+ return;
+
+ bpf_program__set_autoload(skel->progs.crypto_accuire, true);
+
+ err = crypto_sanity__load(skel);
+ if (!ASSERT_ERR(err, "crypto_accuire unexpected load success"))
+ goto fail;
+
+ crypto_sanity__destroy(skel);
+
+ skel = crypto_sanity__open();
+ if (!ASSERT_OK_PTR(skel, "skel open"))
+ return;
+
+ bpf_program__set_autoload(skel->progs.crypto_accuire, false);
+
+ SYS(fail, "ip netns add %s", NS_TEST);
+ SYS(fail, "ip -net %s -6 addr add %s/128 dev lo nodad", NS_TEST, IPV6_IFACE_ADDR);
+ SYS(fail, "ip -net %s link set dev lo up", NS_TEST);
+
+ err = crypto_sanity__load(skel);
+ if (!ASSERT_OK(err, "crypto_sanity__load"))
+ goto fail;
+
+ nstoken = open_netns(NS_TEST);
+ if (!ASSERT_OK_PTR(nstoken, "open_netns"))
+ goto fail;
+
+ err = init_afalg();
+ if (!ASSERT_OK(err, "AF_ALG init fail"))
+ goto fail;
+
+ qdisc_hook.ifindex = if_nametoindex("lo");
+ if (!ASSERT_GT(qdisc_hook.ifindex, 0, "if_nametoindex lo"))
+ goto fail;
+
+ err = crypto_sanity__attach(skel);
+ if (!ASSERT_OK(err, "crypto_sanity__attach"))
+ goto fail;
+
+ pfd = bpf_program__fd(skel->progs.crypto_release);
+ if (!ASSERT_GT(pfd, 0, "crypto_release fd"))
+ goto fail;
+
+ err = bpf_prog_test_run_opts(pfd, &opts);
+ if (!ASSERT_OK(err, "crypto_release") ||
+ !ASSERT_OK(opts.retval, "crypto_release retval"))
+ goto fail;
+
+ pfd = bpf_program__fd(skel->progs.skb_crypto_setup);
+ if (!ASSERT_GT(pfd, 0, "skb_crypto_setup fd"))
+ goto fail;
+
+ err = bpf_prog_test_run_opts(pfd, &opts);
+ if (!ASSERT_OK(err, "skb_crypto_setup") ||
+ !ASSERT_OK(opts.retval, "skb_crypto_setup retval"))
+ goto fail;
+
+ if (!ASSERT_OK(skel->bss->status, "skb_crypto_setup status"))
+ goto fail;
+
+ err = bpf_tc_hook_create(&qdisc_hook);
+ if (!ASSERT_OK(err, "create qdisc hook"))
+ goto fail;
+
+ addrlen = sizeof(addr);
+ err = make_sockaddr(AF_INET6, IPV6_IFACE_ADDR, UDP_TEST_PORT,
+ (void *)&addr, &addrlen);
+ if (!ASSERT_OK(err, "make_sockaddr"))
+ goto fail;
+
+ tc_attach_enc.prog_fd = bpf_program__fd(skel->progs.encrypt_sanity);
+ err = bpf_tc_attach(&qdisc_hook, &tc_attach_enc);
+ if (!ASSERT_OK(err, "attach encrypt filter"))
+ goto fail;
+
+ sockfd = socket(AF_INET6, SOCK_DGRAM, 0);
+ if (!ASSERT_NEQ(sockfd, -1, "encrypt socket"))
+ goto fail;
+ err = sendto(sockfd, plain_text, 16, 0, (void *)&addr, addrlen);
+ close(sockfd);
+ if (!ASSERT_EQ(err, 16, "encrypt send"))
+ goto fail;
+
+ do_crypt_afalg(plain_text, afalg_dst, 16, true);
+
+ bpf_tc_detach(&qdisc_hook, &tc_attach_enc);
+ if (!ASSERT_OK(skel->bss->status, "encrypt status"))
+ goto fail;
+ if (!ASSERT_STRNEQ(skel->bss->dst, afalg_dst, sizeof(afalg_dst), "encrypt AF_ALG"))
+ goto fail;
+
+ tc_attach_dec.prog_fd = bpf_program__fd(skel->progs.decrypt_sanity);
+ err = bpf_tc_attach(&qdisc_hook, &tc_attach_dec);
+ if (!ASSERT_OK(err, "attach decrypt filter"))
+ goto fail;
+
+ sockfd = socket(AF_INET6, SOCK_DGRAM, 0);
+ if (!ASSERT_NEQ(sockfd, -1, "decrypt socket"))
+ goto fail;
+ err = sendto(sockfd, afalg_dst, 16, 0, (void *)&addr, addrlen);
+ close(sockfd);
+ if (!ASSERT_EQ(err, 16, "decrypt send"))
+ goto fail;
+
+ do_crypt_afalg(afalg_dst, afalg_plain, 16, false);
+
+ bpf_tc_detach(&qdisc_hook, &tc_attach_dec);
+ if (!ASSERT_OK(skel->bss->status, "decrypt status"))
+ goto fail;
+ if (!ASSERT_STRNEQ(skel->bss->dst, afalg_plain, sizeof(afalg_plain), "decrypt AF_ALG"))
+ goto fail;
+
+fail:
+ if (nstoken) {
+ bpf_tc_hook_destroy(&qdisc_hook);
+ close_netns(nstoken);
+ }
+ deinit_afalg();
+ SYS_NOFAIL("ip netns del " NS_TEST " &> /dev/null");
+ crypto_sanity__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/progs/crypto_common.h b/tools/testing/selftests/bpf/progs/crypto_common.h
new file mode 100644
index 000000000000..260b9a0fb4ed
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/crypto_common.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#ifndef _CRYPTO_COMMON_H
+#define _CRYPTO_COMMON_H
+
+#include "errno.h"
+#include <stdbool.h>
+
+struct bpf_crypto_ctx *bpf_crypto_ctx_create(const char *type__str, const char *algo__str,
+ const struct bpf_dynptr *pkey,
+ unsigned int authsize, int *err) __ksym;
+struct bpf_crypto_ctx *bpf_crypto_ctx_acquire(struct bpf_crypto_ctx *ctx) __ksym;
+void bpf_crypto_ctx_release(struct bpf_crypto_ctx *ctx) __ksym;
+int bpf_crypto_encrypt(struct bpf_crypto_ctx *ctx, const struct bpf_dynptr *src,
+ struct bpf_dynptr *dst, struct bpf_dynptr *iv) __ksym;
+int bpf_crypto_decrypt(struct bpf_crypto_ctx *ctx, const struct bpf_dynptr *src,
+ struct bpf_dynptr *dst, struct bpf_dynptr *iv) __ksym;
+
+struct __crypto_ctx_value {
+ struct bpf_crypto_ctx __kptr * ctx;
+};
+
+struct array_map {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct __crypto_ctx_value);
+ __uint(max_entries, 1);
+} __crypto_ctx_map SEC(".maps");
+
+static inline struct __crypto_ctx_value *crypto_ctx_value_lookup(void)
+{
+ u32 key = 0;
+
+ return bpf_map_lookup_elem(&__crypto_ctx_map, &key);
+}
+
+static inline int crypto_ctx_insert(struct bpf_crypto_ctx *ctx)
+{
+ struct __crypto_ctx_value local, *v;
+ struct bpf_crypto_ctx *old;
+ u32 key = 0;
+ int err;
+
+ local.ctx = NULL;
+ err = bpf_map_update_elem(&__crypto_ctx_map, &key, &local, 0);
+ if (err) {
+ bpf_crypto_ctx_release(ctx);
+ return err;
+ }
+
+ v = bpf_map_lookup_elem(&__crypto_ctx_map, &key);
+ if (!v) {
+ bpf_crypto_ctx_release(ctx);
+ return -ENOENT;
+ }
+
+ old = bpf_kptr_xchg(&v->ctx, ctx);
+ if (old) {
+ bpf_crypto_ctx_release(old);
+ return -EEXIST;
+ }
+
+ return 0;
+}
+
+#endif /* _CRYPTO_COMMON_H */
diff --git a/tools/testing/selftests/bpf/progs/crypto_sanity.c b/tools/testing/selftests/bpf/progs/crypto_sanity.c
new file mode 100644
index 000000000000..f566ff189b7e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/crypto_sanity.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "bpf_kfuncs.h"
+#include "crypto_common.h"
+
+#define UDP_TEST_PORT 7777
+unsigned char crypto_key[16] = "testtest12345678";
+char dst[32] = {};
+int status;
+
+static int skb_dynptr_validate(struct __sk_buff *skb, struct bpf_dynptr *psrc)
+{
+ struct ipv6hdr ip6h;
+ struct udphdr udph;
+ u32 offset;
+
+ if (skb->protocol != __bpf_constant_htons(ETH_P_IPV6))
+ return -1;
+
+ if (bpf_skb_load_bytes(skb, ETH_HLEN, &ip6h, sizeof(ip6h)))
+ return -1;
+
+ if (ip6h.nexthdr != IPPROTO_UDP)
+ return -1;
+
+ if (bpf_skb_load_bytes(skb, ETH_HLEN + sizeof(ip6h), &udph, sizeof(udph)))
+ return -1;
+
+ if (udph.dest != __bpf_constant_htons(UDP_TEST_PORT))
+ return -1;
+
+ offset = ETH_HLEN + sizeof(ip6h) + sizeof(udph);
+ if (skb->len < offset + 16)
+ return -1;
+
+ bpf_dynptr_from_skb(skb, 0, psrc);
+ bpf_dynptr_adjust(psrc, offset, offset + 16);
+
+ return 0;
+}
+
+SEC("fentry.s/bpf_fentry_test1")
+int BPF_PROG(skb_crypto_setup)
+{
+ struct bpf_crypto_ctx *cctx;
+ struct bpf_dynptr key = {};
+ int err = 0;
+
+ status = 0;
+
+ bpf_dynptr_from_mem(crypto_key, sizeof(crypto_key), 0, &key);
+ cctx = bpf_crypto_ctx_create("skcipher", "ecb(aes)", &key, 0, &err);
+
+ if (!cctx) {
+ status = err;
+ return 0;
+ }
+
+ err = crypto_ctx_insert(cctx);
+ if (err && err != -EEXIST)
+ status = err;
+
+ return 0;
+}
+
+SEC("fentry.s/bpf_fentry_test1")
+int BPF_PROG(crypto_release)
+{
+ struct bpf_crypto_ctx *cctx;
+ struct bpf_dynptr key = {};
+ int err = 0;
+
+ status = 0;
+
+ bpf_dynptr_from_mem(crypto_key, sizeof(crypto_key), 0, &key);
+ cctx = bpf_crypto_ctx_create("skcipher", "ecb(aes)", &key, 0, &err);
+
+ if (!cctx) {
+ status = err;
+ return 0;
+ }
+
+ bpf_crypto_ctx_release(cctx);
+
+ return 0;
+}
+
+SEC("?fentry.s/bpf_fentry_test1")
+__failure __msg("Unreleased reference")
+int BPF_PROG(crypto_accuire)
+{
+ struct bpf_crypto_ctx *cctx;
+ struct bpf_dynptr key = {};
+ int err = 0;
+
+ status = 0;
+
+ bpf_dynptr_from_mem(crypto_key, sizeof(crypto_key), 0, &key);
+ cctx = bpf_crypto_ctx_create("skcipher", "ecb(aes)", &key, 0, &err);
+
+ if (!cctx) {
+ status = err;
+ return 0;
+ }
+
+ cctx = bpf_crypto_ctx_acquire(cctx);
+ if (!cctx)
+ return -EINVAL;
+
+ bpf_crypto_ctx_release(cctx);
+
+ return 0;
+}
+
+SEC("tc")
+int decrypt_sanity(struct __sk_buff *skb)
+{
+ struct __crypto_ctx_value *v;
+ struct bpf_crypto_ctx *ctx;
+ struct bpf_dynptr psrc, pdst, iv;
+ int err;
+
+ err = skb_dynptr_validate(skb, &psrc);
+ if (err < 0) {
+ status = err;
+ return TC_ACT_SHOT;
+ }
+
+ v = crypto_ctx_value_lookup();
+ if (!v) {
+ status = -ENOENT;
+ return TC_ACT_SHOT;
+ }
+
+ ctx = v->ctx;
+ if (!ctx) {
+ status = -ENOENT;
+ return TC_ACT_SHOT;
+ }
+
+ bpf_dynptr_from_mem(dst, sizeof(dst), 0, &pdst);
+ bpf_dynptr_from_mem(dst, 0, 0, &iv);
+
+ status = bpf_crypto_decrypt(ctx, &psrc, &pdst, &iv);
+
+ return TC_ACT_SHOT;
+}
+
+SEC("tc")
+int encrypt_sanity(struct __sk_buff *skb)
+{
+ struct __crypto_ctx_value *v;
+ struct bpf_crypto_ctx *ctx;
+ struct bpf_dynptr psrc, pdst, iv;
+ int err;
+
+ status = 0;
+
+ err = skb_dynptr_validate(skb, &psrc);
+ if (err < 0) {
+ status = err;
+ return TC_ACT_SHOT;
+ }
+
+ v = crypto_ctx_value_lookup();
+ if (!v) {
+ status = -ENOENT;
+ return TC_ACT_SHOT;
+ }
+
+ ctx = v->ctx;
+ if (!ctx) {
+ status = -ENOENT;
+ return TC_ACT_SHOT;
+ }
+
+ bpf_dynptr_from_mem(dst, sizeof(dst), 0, &pdst);
+ bpf_dynptr_from_mem(dst, 0, 0, &iv);
+
+ status = bpf_crypto_encrypt(ctx, &psrc, &pdst, &iv);
+
+ return TC_ACT_SHOT;
+}
+
+char __license[] SEC("license") = "GPL";
--
2.39.3


2023-12-02 02:36:15

by Vadim Fedorenko

[permalink] [raw]
Subject: [PATCH bpf-next v7 2/3] bpf: crypto: add skcipher to bpf crypto

Implement skcipher crypto in BPF crypto framework.

Signed-off-by: Vadim Fedorenko <[email protected]>
---
v6 - v7:
- style issues
v6:
- introduce new file
---
kernel/bpf/Makefile | 3 ++
kernel/bpf/crypto_skcipher.c | 76 ++++++++++++++++++++++++++++++++++++
2 files changed, 79 insertions(+)
create mode 100644 kernel/bpf/crypto_skcipher.c

diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index bcde762bb2c2..f4827bb72bee 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -43,6 +43,9 @@ obj-${CONFIG_BPF_LSM} += bpf_lsm.o
endif
ifeq ($(CONFIG_CRYPTO),y)
obj-$(CONFIG_BPF_SYSCALL) += crypto.o
+ifeq ($(CONFIG_CRYPTO_SKCIPHER),y)
+obj-$(CONFIG_BPF_SYSCALL) += crypto_skcipher.o
+endif
endif
obj-$(CONFIG_BPF_PRELOAD) += preload/

diff --git a/kernel/bpf/crypto_skcipher.c b/kernel/bpf/crypto_skcipher.c
new file mode 100644
index 000000000000..d036eb64c1e2
--- /dev/null
+++ b/kernel/bpf/crypto_skcipher.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2023 Meta, Inc */
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/bpf_crypto.h>
+#include <crypto/skcipher.h>
+
+static void *bpf_crypto_lskcipher_alloc_tfm(const char *algo)
+{
+ return crypto_alloc_lskcipher(algo, 0, 0);
+}
+
+static void bpf_crypto_lskcipher_free_tfm(void *tfm)
+{
+ crypto_free_lskcipher(tfm);
+}
+
+static int bpf_crypto_lskcipher_has_algo(const char *algo)
+{
+ return crypto_has_skcipher(algo, CRYPTO_ALG_TYPE_LSKCIPHER, CRYPTO_ALG_TYPE_MASK);
+}
+
+static int bpf_crypto_lskcipher_setkey(void *tfm, const u8 *key, unsigned int keylen)
+{
+ return crypto_lskcipher_setkey(tfm, key, keylen);
+}
+
+static u32 bpf_crypto_lskcipher_get_flags(void *tfm)
+{
+ return crypto_lskcipher_get_flags(tfm);
+}
+
+static unsigned int bpf_crypto_lskcipher_ivsize(void *tfm)
+{
+ return crypto_lskcipher_ivsize(tfm);
+}
+
+static int bpf_crypto_lskcipher_encrypt(void *tfm, const u8 *src, u8 *dst,
+ unsigned int len, u8 *iv)
+{
+ return crypto_lskcipher_encrypt(tfm, src, dst, len, iv);
+}
+
+static int bpf_crypto_lskcipher_decrypt(void *tfm, const u8 *src, u8 *dst,
+ unsigned int len, u8 *iv)
+{
+ return crypto_lskcipher_decrypt(tfm, src, dst, len, iv);
+}
+
+static const struct bpf_crypto_type bpf_crypto_lskcipher_type = {
+ .alloc_tfm = bpf_crypto_lskcipher_alloc_tfm,
+ .free_tfm = bpf_crypto_lskcipher_free_tfm,
+ .has_algo = bpf_crypto_lskcipher_has_algo,
+ .setkey = bpf_crypto_lskcipher_setkey,
+ .encrypt = bpf_crypto_lskcipher_encrypt,
+ .decrypt = bpf_crypto_lskcipher_decrypt,
+ .ivsize = bpf_crypto_lskcipher_ivsize,
+ .get_flags = bpf_crypto_lskcipher_get_flags,
+ .owner = THIS_MODULE,
+ .name = "skcipher",
+};
+
+static int __init bpf_crypto_skcipher_init(void)
+{
+ return bpf_crypto_register_type(&bpf_crypto_lskcipher_type);
+}
+
+static void __exit bpf_crypto_skcipher_exit(void)
+{
+ int err = bpf_crypto_unregister_type(&bpf_crypto_lskcipher_type);
+ WARN_ON_ONCE(err);
+}
+
+module_init(bpf_crypto_skcipher_init);
+module_exit(bpf_crypto_skcipher_exit);
+MODULE_LICENSE("GPL");
--
2.39.3


2023-12-02 02:36:26

by Martin KaFai Lau

[permalink] [raw]
Subject: Re: [PATCH bpf-next v7 1/3] bpf: make common crypto API for TC/XDP programs

On 12/1/23 5:06 PM, Vadim Fedorenko wrote:
> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> index eb447b0a9423..0143ff6c93a1 100644
> --- a/include/linux/bpf.h
> +++ b/include/linux/bpf.h
> @@ -1228,6 +1228,7 @@ int bpf_dynptr_check_size(u32 size);
> u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr);
> const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len);
> void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len);
> +bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr);
>
> #ifdef CONFIG_BPF_JIT
> int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
> diff --git a/include/linux/bpf_crypto.h b/include/linux/bpf_crypto.h
> new file mode 100644
> index 000000000000..e81bd8ab979c
> --- /dev/null
> +++ b/include/linux/bpf_crypto.h
> @@ -0,0 +1,23 @@
> +/* SPDX-License-Identifier: GPL-2.0-only */
> +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
> +#ifndef _BPF_CRYPTO_H
> +#define _BPF_CRYPTO_H
> +
> +struct bpf_crypto_type {
> + void *(*alloc_tfm)(const char *algo);
> + void (*free_tfm)(void *tfm);
> + int (*has_algo)(const char *algo);
> + int (*setkey)(void *tfm, const u8 *key, unsigned int keylen);
> + int (*setauthsize)(void *tfm, unsigned int authsize);
> + int (*encrypt)(void *tfm, const u8 *src, u8 *dst, unsigned int len, u8 *iv);
> + int (*decrypt)(void *tfm, const u8 *src, u8 *dst, unsigned int len, u8 *iv);
> + unsigned int (*ivsize)(void *tfm);
> + u32 (*get_flags)(void *tfm);
> + struct module *owner;
> + char name[14];

Does it have a macro (from crypto ?) that can be reused here instead of a
numeric constant?

> +};
> +
> +int bpf_crypto_register_type(const struct bpf_crypto_type *type);
> +int bpf_crypto_unregister_type(const struct bpf_crypto_type *type);
> +
> +#endif /* _BPF_CRYPTO_H */
> diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
> index f526b7573e97..bcde762bb2c2 100644
> --- a/kernel/bpf/Makefile
> +++ b/kernel/bpf/Makefile
> @@ -41,6 +41,9 @@ obj-$(CONFIG_BPF_SYSCALL) += bpf_struct_ops.o
> obj-$(CONFIG_BPF_SYSCALL) += cpumask.o
> obj-${CONFIG_BPF_LSM} += bpf_lsm.o
> endif
> +ifeq ($(CONFIG_CRYPTO),y)
> +obj-$(CONFIG_BPF_SYSCALL) += crypto.o
> +endif
> obj-$(CONFIG_BPF_PRELOAD) += preload/
>
> obj-$(CONFIG_BPF_SYSCALL) += relo_core.o
> diff --git a/kernel/bpf/crypto.c b/kernel/bpf/crypto.c
> new file mode 100644
> index 000000000000..a1e543d1d7fe
> --- /dev/null
> +++ b/kernel/bpf/crypto.c
> @@ -0,0 +1,364 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +/* Copyright (c) 2023 Meta, Inc */
> +#include <linux/bpf.h>
> +#include <linux/bpf_crypto.h>
> +#include <linux/bpf_mem_alloc.h>
> +#include <linux/btf.h>
> +#include <linux/btf_ids.h>
> +#include <linux/filter.h>
> +#include <linux/scatterlist.h>
> +#include <linux/skbuff.h>
> +#include <crypto/skcipher.h>
> +
> +struct bpf_crypto_type_list {
> + const struct bpf_crypto_type *type;
> + struct list_head list;
> +};
> +
> +static LIST_HEAD(bpf_crypto_types);
> +static DECLARE_RWSEM(bpf_crypto_types_sem);
> +
> +/**
> + * struct bpf_crypto_ctx - refcounted BPF crypto context structure
> + * @type: The pointer to bpf crypto type
> + * @tfm: The pointer to instance of crypto API struct.
> + * @rcu: The RCU head used to free the crypto context with RCU safety.
> + * @usage: Object reference counter. When the refcount goes to 0, the
> + * memory is released back to the BPF allocator, which provides
> + * RCU safety.
> + */
> +struct bpf_crypto_ctx {
> + const struct bpf_crypto_type *type;
> + void *tfm;
> + struct rcu_head rcu;
> + refcount_t usage;
> +};
> +
> +int bpf_crypto_register_type(const struct bpf_crypto_type *type)
> +{
> + struct bpf_crypto_type_list *node;
> + int err = -EEXIST;
> +
> + down_write(&bpf_crypto_types_sem);
> + list_for_each_entry(node, &bpf_crypto_types, list) {
> + if (!strcmp(node->type->name, type->name))
> + goto unlock;
> + }
> +
> + node = kmalloc(sizeof(*node), GFP_KERNEL);
> + err = -ENOMEM;
> + if (!node)
> + goto unlock;
> +
> + node->type = type;
> + list_add(&node->list, &bpf_crypto_types);
> + err = 0;
> +
> +unlock:
> + up_write(&bpf_crypto_types_sem);
> +
> + return err;
> +}
> +EXPORT_SYMBOL_GPL(bpf_crypto_register_type);
> +
> +int bpf_crypto_unregister_type(const struct bpf_crypto_type *type)
> +{
> + struct bpf_crypto_type_list *node;
> + int err = -ENOENT;
> +
> + down_write(&bpf_crypto_types_sem);
> + list_for_each_entry(node, &bpf_crypto_types, list) {
> + if (strcmp(node->type->name, type->name))
> + continue;
> +
> + list_del(&node->list);
> + kfree(node);
> + err = 0;
> + break;
> + }
> + up_write(&bpf_crypto_types_sem);
> +
> + return err;
> +}
> +EXPORT_SYMBOL_GPL(bpf_crypto_unregister_type);
> +
> +static const struct bpf_crypto_type *bpf_crypto_get_type(const char *name)
> +{
> + const struct bpf_crypto_type *type = ERR_PTR(-ENOENT);
> + struct bpf_crypto_type_list *node;
> +
> + down_read(&bpf_crypto_types_sem);
> + list_for_each_entry(node, &bpf_crypto_types, list) {
> + if (strcmp(node->type->name, name))
> + continue;
> +
> + if (try_module_get(node->type->owner))

If I read patch 2 correctly, it is always built-in. I am not sure I understand
the module_put/get in this patch.

> + type = node->type;
> + break;
> + }
> + up_read(&bpf_crypto_types_sem);
> +
> + return type;
> +}
> +
> +__bpf_kfunc_start_defs();
> +
> +/**
> + * bpf_crypto_ctx_create() - Create a mutable BPF crypto context.
> + *
> + * Allocates a crypto context that can be used, acquired, and released by
> + * a BPF program. The crypto context returned by this function must either
> + * be embedded in a map as a kptr, or freed with bpf_crypto_ctx_release().
> + * As crypto API functions use GFP_KERNEL allocations, this function can
> + * only be used in sleepable BPF programs.
> + *
> + * bpf_crypto_ctx_create() allocates memory for crypto context.
> + * It may return NULL if no memory is available.
> + * @type__str: pointer to string representation of crypto type.
> + * @algo__str: pointer to string representation of algorithm.
> + * @pkey: bpf_dynptr which holds cipher key to do crypto.
> + * @err: integer to store error code when NULL is returned
> + */
> +__bpf_kfunc struct bpf_crypto_ctx *
> +bpf_crypto_ctx_create(const char *type__str, const char *algo__str,
> + const struct bpf_dynptr_kern *pkey,
> + unsigned int authsize, int *err)
> +{
> + const struct bpf_crypto_type *type = bpf_crypto_get_type(type__str);
> + struct bpf_crypto_ctx *ctx;
> + const u8 *key;
> + u32 key_len;
> +
> + type = bpf_crypto_get_type(type__str);
> + if (IS_ERR(type)) {
> + *err = PTR_ERR(type);
> + return NULL;
> + }
> +
> + if (!type->has_algo(algo__str)) {
> + *err = -EOPNOTSUPP;
> + goto err;

ctx is still not initialized. The error path will crash.

> + }
> +
> + if (!authsize && type->setauthsize) {
> + *err = -EOPNOTSUPP;
> + goto err;
> + }
> +
> + if (authsize && !type->setauthsize) {
> + *err = -EOPNOTSUPP;
> + goto err;
> + }
> +
> + key_len = __bpf_dynptr_size(pkey);
> + if (!key_len) {
> + *err = -EINVAL;
> + goto err;
> + }
> + key = __bpf_dynptr_data(pkey, key_len);
> + if (!key) {
> + *err = -EINVAL;
> + goto err;
> + }
> +
> + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
> + if (!ctx) {
> + *err = -ENOMEM;
> + goto err;
> + }
> +
> + ctx->type = type;
> + ctx->tfm = type->alloc_tfm(algo__str);
> + if (IS_ERR(ctx->tfm)) {
> + *err = PTR_ERR(ctx->tfm);
> + ctx->tfm = NULL;
> + goto err;
> + }
> +
> + if (authsize) {
> + *err = type->setauthsize(ctx->tfm, authsize);
> + if (*err)
> + goto err;
> + }
> +
> + *err = type->setkey(ctx->tfm, key, key_len);
> + if (*err)
> + goto err;
> +
> + refcount_set(&ctx->usage, 1);
> +
> + return ctx;
> +err:
> + if (ctx->tfm)
> + type->free_tfm(ctx->tfm);
> + kfree(ctx);
> + module_put(type->owner);
> +
> + return NULL;
> +}
> +
> +static void crypto_free_cb(struct rcu_head *head)
> +{
> + struct bpf_crypto_ctx *ctx;
> +
> + ctx = container_of(head, struct bpf_crypto_ctx, rcu);
> + ctx->type->free_tfm(ctx->tfm);
> + module_put(ctx->type->owner);
> + kfree(ctx);
> +}
> +
> +/**
> + * bpf_crypto_ctx_acquire() - Acquire a reference to a BPF crypto context.
> + * @ctx: The BPF crypto context being acquired. The ctx must be a trusted
> + * pointer.
> + *
> + * Acquires a reference to a BPF crypto context. The context returned by this function
> + * must either be embedded in a map as a kptr, or freed with
> + * bpf_crypto_skcipher_ctx_release().
> + */
> +__bpf_kfunc struct bpf_crypto_ctx *
> +bpf_crypto_ctx_acquire(struct bpf_crypto_ctx *ctx)
> +{
> + refcount_inc(&ctx->usage);
> + return ctx;
> +}
> +
> +/**
> + * bpf_crypto_ctx_release() - Release a previously acquired BPF crypto context.
> + * @ctx: The crypto context being released.
> + *
> + * Releases a previously acquired reference to a BPF crypto context. When the final
> + * reference of the BPF crypto context has been released, it is subsequently freed in
> + * an RCU callback in the BPF memory allocator.
> + */
> +__bpf_kfunc void bpf_crypto_ctx_release(struct bpf_crypto_ctx *ctx)
> +{
> + if (refcount_dec_and_test(&ctx->usage))
> + call_rcu(&ctx->rcu, crypto_free_cb);
> +}
> +
> +static int bpf_crypto_crypt(const struct bpf_crypto_ctx *ctx,
> + const struct bpf_dynptr_kern *src,
> + struct bpf_dynptr_kern *dst,
> + const struct bpf_dynptr_kern *iv,
> + bool decrypt)
> +{
> + u32 src_len, dst_len, iv_len;
> + const u8 *psrc;
> + u8 *pdst, *piv;
> + int err;
> +
> + if (ctx->type->get_flags(ctx->tfm) & CRYPTO_TFM_NEED_KEY)
> + return -EINVAL;
> +
> + if (__bpf_dynptr_is_rdonly(dst))
> + return -EINVAL;
> +
> + iv_len = __bpf_dynptr_size(iv);
> + src_len = __bpf_dynptr_size(src);
> + dst_len = __bpf_dynptr_size(dst);
> + if (!src_len || !dst_len)
> + return -EINVAL;
> +
> + if (iv_len != ctx->type->ivsize(ctx->tfm))
> + return -EINVAL;
> +
> + psrc = __bpf_dynptr_data(src, src_len);
> + if (!psrc)
> + return -EINVAL;
> + pdst = __bpf_dynptr_data_rw(dst, dst_len);
> + if (!pdst)
> + return -EINVAL;
> +
> + piv = iv_len ? __bpf_dynptr_data_rw(iv, iv_len) : NULL;
> + if (iv_len && !piv)
> + return -EINVAL;
> +
> + err = decrypt ? ctx->type->decrypt(ctx->tfm, psrc, pdst, src_len, piv)
> + : ctx->type->encrypt(ctx->tfm, psrc, pdst, src_len, piv);
> +
> + return err;
> +}
> +
> +/**
> + * bpf_crypto_decrypt() - Decrypt buffer using configured context and IV provided.
> + * @ctx: The crypto context being used. The ctx must be a trusted pointer.
> + * @src: bpf_dynptr to the encrypted data. Must be a trusted pointer.
> + * @dst: bpf_dynptr to the buffer where to store the result. Must be a trusted pointer.
> + * @iv: bpf_dynptr to IV data to be used by decryptor.
> + *
> + * Decrypts provided buffer using IV data and the crypto context. Crypto context must be configured.
> + */
> +__bpf_kfunc int bpf_crypto_decrypt(struct bpf_crypto_ctx *ctx,
> + const struct bpf_dynptr_kern *src,
> + struct bpf_dynptr_kern *dst,
> + struct bpf_dynptr_kern *iv)
> +{
> + return bpf_crypto_crypt(ctx, src, dst, iv, true);
> +}
> +
> +/**
> + * bpf_crypto_encrypt() - Encrypt buffer using configured context and IV provided.
> + * @ctx: The crypto context being used. The ctx must be a trusted pointer.
> + * @src: bpf_dynptr to the plain data. Must be a trusted pointer.
> + * @dst: bpf_dynptr to buffer where to store the result. Must be a trusted pointer.
> + * @iv: bpf_dynptr to IV data to be used by decryptor.
> + *
> + * Encrypts provided buffer using IV data and the crypto context. Crypto context must be configured.
> + */
> +__bpf_kfunc int bpf_crypto_encrypt(struct bpf_crypto_ctx *ctx,
> + const struct bpf_dynptr_kern *src,
> + struct bpf_dynptr_kern *dst,
> + struct bpf_dynptr_kern *iv)
> +{
> + return bpf_crypto_crypt(ctx, src, dst, iv, false);
> +}
> +
> +__bpf_kfunc_end_defs();
> +
> +BTF_SET8_START(crypt_init_kfunc_btf_ids)
> +BTF_ID_FLAGS(func, bpf_crypto_ctx_create, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
> +BTF_ID_FLAGS(func, bpf_crypto_ctx_release, KF_RELEASE)
> +BTF_ID_FLAGS(func, bpf_crypto_ctx_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS)

Considering bpf_crypto_ctx is rcu protected, the acquire may use "KF_ACQUIRE |
KF_RCU | KF_RET_NULL" such that the bpf_crypto_ctx_acquire(ctx_from_map_value)
will work and the user will prepare checking NULL from day one.

> +BTF_SET8_END(crypt_init_kfunc_btf_ids)
> +
> +static const struct btf_kfunc_id_set crypt_init_kfunc_set = {
> + .owner = THIS_MODULE,
> + .set = &crypt_init_kfunc_btf_ids,
> +};
> +
> +BTF_SET8_START(crypt_kfunc_btf_ids)
> +BTF_ID_FLAGS(func, bpf_crypto_decrypt, KF_RCU)
> +BTF_ID_FLAGS(func, bpf_crypto_encrypt, KF_RCU)
> +BTF_SET8_END(crypt_kfunc_btf_ids)
> +
> +static const struct btf_kfunc_id_set crypt_kfunc_set = {
> + .owner = THIS_MODULE,
> + .set = &crypt_kfunc_btf_ids,
> +};
> +
> +BTF_ID_LIST(bpf_crypto_dtor_ids)
> +BTF_ID(struct, bpf_crypto_ctx)
> +BTF_ID(func, bpf_crypto_ctx_release)
> +
> +static int __init crypto_kfunc_init(void)
> +{
> + int ret;
> + const struct btf_id_dtor_kfunc bpf_crypto_dtors[] = {
> + {
> + .btf_id = bpf_crypto_dtor_ids[0],
> + .kfunc_btf_id = bpf_crypto_dtor_ids[1]
> + },
> + };
> +
> + ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &crypt_kfunc_set);
> + ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_ACT, &crypt_kfunc_set);
> + ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &crypt_kfunc_set);
> + ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC,
> + &crypt_init_kfunc_set);
> + return ret ?: register_btf_id_dtor_kfuncs(bpf_crypto_dtors,
> + ARRAY_SIZE(bpf_crypto_dtors),
> + THIS_MODULE);
> +}
> +
> +late_initcall(crypto_kfunc_init);
> diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
> index b45a8381f9bd..b73314c0124e 100644
> --- a/kernel/bpf/helpers.c
> +++ b/kernel/bpf/helpers.c
> @@ -1436,7 +1436,7 @@ static const struct bpf_func_proto bpf_kptr_xchg_proto = {
> #define DYNPTR_SIZE_MASK 0xFFFFFF
> #define DYNPTR_RDONLY_BIT BIT(31)
>
> -static bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr)
> +bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr)
> {
> return ptr->size & DYNPTR_RDONLY_BIT;
> }
> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> index 8e7b6072e3f4..c54716966d5d 100644
> --- a/kernel/bpf/verifier.c
> +++ b/kernel/bpf/verifier.c
> @@ -5162,6 +5162,7 @@ BTF_ID(struct, cgroup)
> #endif
> BTF_ID(struct, bpf_cpumask)
> BTF_ID(struct, task_struct)
> +BTF_ID(struct, bpf_crypto_ctx)
> BTF_SET_END(rcu_protected_types)
>
> static bool rcu_protected_object(const struct btf *btf, u32 btf_id)


2023-12-02 04:33:07

by Herbert Xu

[permalink] [raw]
Subject: Re: [PATCH bpf-next v7 2/3] bpf: crypto: add skcipher to bpf crypto

On Fri, Dec 01, 2023 at 05:06:03PM -0800, Vadim Fedorenko wrote:
>
> +static int bpf_crypto_lskcipher_encrypt(void *tfm, const u8 *src, u8 *dst,
> + unsigned int len, u8 *iv)
> +{
> + return crypto_lskcipher_encrypt(tfm, src, dst, len, iv);
> +}

Please note that the API has been updated and the iv field is now
the siv. For algorithms with a non-zero statesize, that means that
the IV must be followed by enough memory to store the internal state,
i.e., crypto_lskcipher_statesize(tfm).

Thanks,
--
Email: Herbert Xu <[email protected]>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

2023-12-03 12:31:43

by Simon Horman

[permalink] [raw]
Subject: Re: [PATCH bpf-next v7 1/3] bpf: make common crypto API for TC/XDP programs

On Fri, Dec 01, 2023 at 05:06:02PM -0800, Vadim Fedorenko wrote:
> Add crypto API support to BPF to be able to decrypt or encrypt packets
> in TC/XDP BPF programs. Special care should be taken for initialization
> part of crypto algo because crypto alloc) doesn't work with preemtion
> disabled, it can be run only in sleepable BPF program. Also async crypto
> is not supported because of the very same issue - TC/XDP BPF programs
> are not sleepable.
>
> Signed-off-by: Vadim Fedorenko <[email protected]>

...

> +/**
> + * bpf_crypto_ctx_create() - Create a mutable BPF crypto context.
> + *
> + * Allocates a crypto context that can be used, acquired, and released by
> + * a BPF program. The crypto context returned by this function must either
> + * be embedded in a map as a kptr, or freed with bpf_crypto_ctx_release().
> + * As crypto API functions use GFP_KERNEL allocations, this function can
> + * only be used in sleepable BPF programs.
> + *
> + * bpf_crypto_ctx_create() allocates memory for crypto context.
> + * It may return NULL if no memory is available.
> + * @type__str: pointer to string representation of crypto type.
> + * @algo__str: pointer to string representation of algorithm.
> + * @pkey: bpf_dynptr which holds cipher key to do crypto.

Hi Vadim,

a minor nit from my side: something about @authsize should go here.

> + * @err: integer to store error code when NULL is returned
> + */
> +__bpf_kfunc struct bpf_crypto_ctx *
> +bpf_crypto_ctx_create(const char *type__str, const char *algo__str,
> + const struct bpf_dynptr_kern *pkey,
> + unsigned int authsize, int *err)

...

2023-12-03 12:31:53

by Simon Horman

[permalink] [raw]
Subject: Re: [PATCH bpf-next v7 3/3] selftests: bpf: crypto skcipher algo selftests

On Fri, Dec 01, 2023 at 05:06:04PM -0800, Vadim Fedorenko wrote:
> Add simple tc hook selftests to show the way to work with new crypto
> BPF API. Some weird structre and map are added to setup program to make

Hi Vadim,

as it looks like there will be a new revision of this series,
please consider updating the spelling of structure.

> verifier happy about dynptr initialization from memory. Simple AES-ECB
> algo is used to demonstrate encryption and decryption of fixed size
> buffers.
>
> Signed-off-by: Vadim Fedorenko <[email protected]>

...

2023-12-03 20:33:18

by Vadim Fedorenko

[permalink] [raw]
Subject: Re: [PATCH bpf-next v7 3/3] selftests: bpf: crypto skcipher algo selftests

On 03.12.2023 10:59, Simon Horman wrote:
> On Fri, Dec 01, 2023 at 05:06:04PM -0800, Vadim Fedorenko wrote:
>> Add simple tc hook selftests to show the way to work with new crypto
>> BPF API. Some weird structre and map are added to setup program to make
>
> Hi Vadim,
>
> as it looks like there will be a new revision of this series,
> please consider updating the spelling of structure.
>

Hi Simon!

Thanks for pointing it out. Actually with alignment fixes in BPF UAPI this
sentence is no longer actual and the test program is rewritten without
hacks. I'll remove this from commit message in new revision.

>> verifier happy about dynptr initialization from memory. Simple AES-ECB
>> algo is used to demonstrate encryption and decryption of fixed size
>> buffers.
>>
>> Signed-off-by: Vadim Fedorenko <[email protected]>
>
> ...


2023-12-03 20:33:29

by Vadim Fedorenko

[permalink] [raw]
Subject: Re: [PATCH bpf-next v7 1/3] bpf: make common crypto API for TC/XDP programs

On 02.12.2023 01:48, Martin KaFai Lau wrote:
> On 12/1/23 5:06 PM, Vadim Fedorenko wrote:
>> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
>> index eb447b0a9423..0143ff6c93a1 100644
>> --- a/include/linux/bpf.h
>> +++ b/include/linux/bpf.h
>> @@ -1228,6 +1228,7 @@ int bpf_dynptr_check_size(u32 size);
>>   u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr);
>>   const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len);
>>   void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len);
>> +bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr);
>>   #ifdef CONFIG_BPF_JIT
>>   int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct
>> bpf_trampoline *tr);
>> diff --git a/include/linux/bpf_crypto.h b/include/linux/bpf_crypto.h
>> new file mode 100644
>> index 000000000000..e81bd8ab979c
>> --- /dev/null
>> +++ b/include/linux/bpf_crypto.h
>> @@ -0,0 +1,23 @@
>> +/* SPDX-License-Identifier: GPL-2.0-only */
>> +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
>> +#ifndef _BPF_CRYPTO_H
>> +#define _BPF_CRYPTO_H
>> +
>> +struct bpf_crypto_type {
>> +    void *(*alloc_tfm)(const char *algo);
>> +    void (*free_tfm)(void *tfm);
>> +    int (*has_algo)(const char *algo);
>> +    int (*setkey)(void *tfm, const u8 *key, unsigned int keylen);
>> +    int (*setauthsize)(void *tfm, unsigned int authsize);
>> +    int (*encrypt)(void *tfm, const u8 *src, u8 *dst, unsigned int len, u8 *iv);
>> +    int (*decrypt)(void *tfm, const u8 *src, u8 *dst, unsigned int len, u8 *iv);
>> +    unsigned int (*ivsize)(void *tfm);
>> +    u32 (*get_flags)(void *tfm);
>> +    struct module *owner;
>> +    char name[14];
>
> Does it have a macro (from crypto ?) that can be reused here instead of a
> numeric constant?
>
I have checked AF_ALG which uses the same way and there is no constant
unfortunately:
https://elixir.bootlin.com/linux/v6.7-rc3/source/include/crypto/if_alg.h#L55

Maybe Herbert will suggest some constant instead?

>> +};
>> +
>> +int bpf_crypto_register_type(const struct bpf_crypto_type *type);
>> +int bpf_crypto_unregister_type(const struct bpf_crypto_type *type);
>> +
>> +#endif /* _BPF_CRYPTO_H */
>> diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
>> index f526b7573e97..bcde762bb2c2 100644
>> --- a/kernel/bpf/Makefile
>> +++ b/kernel/bpf/Makefile
>> @@ -41,6 +41,9 @@ obj-$(CONFIG_BPF_SYSCALL) += bpf_struct_ops.o
>>   obj-$(CONFIG_BPF_SYSCALL) += cpumask.o
>>   obj-${CONFIG_BPF_LSM} += bpf_lsm.o
>>   endif
>> +ifeq ($(CONFIG_CRYPTO),y)
>> +obj-$(CONFIG_BPF_SYSCALL) += crypto.o
>> +endif
>>   obj-$(CONFIG_BPF_PRELOAD) += preload/
>>   obj-$(CONFIG_BPF_SYSCALL) += relo_core.o
>> diff --git a/kernel/bpf/crypto.c b/kernel/bpf/crypto.c
>> new file mode 100644
>> index 000000000000..a1e543d1d7fe
>> --- /dev/null
>> +++ b/kernel/bpf/crypto.c
>> @@ -0,0 +1,364 @@
>> +// SPDX-License-Identifier: GPL-2.0-only
>> +/* Copyright (c) 2023 Meta, Inc */
>> +#include <linux/bpf.h>
>> +#include <linux/bpf_crypto.h>
>> +#include <linux/bpf_mem_alloc.h>
>> +#include <linux/btf.h>
>> +#include <linux/btf_ids.h>
>> +#include <linux/filter.h>
>> +#include <linux/scatterlist.h>
>> +#include <linux/skbuff.h>
>> +#include <crypto/skcipher.h>
>> +
>> +struct bpf_crypto_type_list {
>> +    const struct bpf_crypto_type *type;
>> +    struct list_head list;
>> +};
>> +
>> +static LIST_HEAD(bpf_crypto_types);
>> +static DECLARE_RWSEM(bpf_crypto_types_sem);
>> +
>> +/**
>> + * struct bpf_crypto_ctx - refcounted BPF crypto context structure
>> + * @type:    The pointer to bpf crypto type
>> + * @tfm:    The pointer to instance of crypto API struct.
>> + * @rcu:    The RCU head used to free the crypto context with RCU safety.
>> + * @usage:    Object reference counter. When the refcount goes to 0, the
>> + *        memory is released back to the BPF allocator, which provides
>> + *        RCU safety.
>> + */
>> +struct bpf_crypto_ctx {
>> +    const struct bpf_crypto_type *type;
>> +    void *tfm;
>> +    struct rcu_head rcu;
>> +    refcount_t usage;
>> +};
>> +
>> +int bpf_crypto_register_type(const struct bpf_crypto_type *type)
>> +{
>> +    struct bpf_crypto_type_list *node;
>> +    int err = -EEXIST;
>> +
>> +    down_write(&bpf_crypto_types_sem);
>> +    list_for_each_entry(node, &bpf_crypto_types, list) {
>> +        if (!strcmp(node->type->name, type->name))
>> +            goto unlock;
>> +    }
>> +
>> +    node = kmalloc(sizeof(*node), GFP_KERNEL);
>> +    err = -ENOMEM;
>> +    if (!node)
>> +        goto unlock;
>> +
>> +    node->type = type;
>> +    list_add(&node->list, &bpf_crypto_types);
>> +    err = 0;
>> +
>> +unlock:
>> +    up_write(&bpf_crypto_types_sem);
>> +
>> +    return err;
>> +}
>> +EXPORT_SYMBOL_GPL(bpf_crypto_register_type);
>> +
>> +int bpf_crypto_unregister_type(const struct bpf_crypto_type *type)
>> +{
>> +    struct bpf_crypto_type_list *node;
>> +    int err = -ENOENT;
>> +
>> +    down_write(&bpf_crypto_types_sem);
>> +    list_for_each_entry(node, &bpf_crypto_types, list) {
>> +        if (strcmp(node->type->name, type->name))
>> +            continue;
>> +
>> +        list_del(&node->list);
>> +        kfree(node);
>> +        err = 0;
>> +        break;
>> +    }
>> +    up_write(&bpf_crypto_types_sem);
>> +
>> +    return err;
>> +}
>> +EXPORT_SYMBOL_GPL(bpf_crypto_unregister_type);
>> +
>> +static const struct bpf_crypto_type *bpf_crypto_get_type(const char *name)
>> +{
>> +    const struct bpf_crypto_type *type = ERR_PTR(-ENOENT);
>> +    struct bpf_crypto_type_list *node;
>> +
>> +    down_read(&bpf_crypto_types_sem);
>> +    list_for_each_entry(node, &bpf_crypto_types, list) {
>> +        if (strcmp(node->type->name, name))
>> +            continue;
>> +
>> +        if (try_module_get(node->type->owner))
>
> If I read patch 2 correctly, it is always built-in. I am not sure I understand
> the module_put/get in this patch.

Well, yeah, right now it's built-in, but it can be easily converted to module
with it's own Kconfig option. Especially if we think about adding aead crypto
and using bpf in embedded setups with less amount of resources.

>> +            type = node->type;
>> +        break;
>> +    }
>> +    up_read(&bpf_crypto_types_sem);
>> +
>> +    return type;
>> +}
>> +
>> +__bpf_kfunc_start_defs();
>> +
>> +/**
>> + * bpf_crypto_ctx_create() - Create a mutable BPF crypto context.
>> + *
>> + * Allocates a crypto context that can be used, acquired, and released by
>> + * a BPF program. The crypto context returned by this function must either
>> + * be embedded in a map as a kptr, or freed with bpf_crypto_ctx_release().
>> + * As crypto API functions use GFP_KERNEL allocations, this function can
>> + * only be used in sleepable BPF programs.
>> + *
>> + * bpf_crypto_ctx_create() allocates memory for crypto context.
>> + * It may return NULL if no memory is available.
>> + * @type__str: pointer to string representation of crypto type.
>> + * @algo__str: pointer to string representation of algorithm.
>> + * @pkey:      bpf_dynptr which holds cipher key to do crypto.
>> + * @err:       integer to store error code when NULL is returned
>> + */
>> +__bpf_kfunc struct bpf_crypto_ctx *
>> +bpf_crypto_ctx_create(const char *type__str, const char *algo__str,
>> +              const struct bpf_dynptr_kern *pkey,
>> +              unsigned int authsize, int *err)
>> +{
>> +    const struct bpf_crypto_type *type = bpf_crypto_get_type(type__str);
>> +    struct bpf_crypto_ctx *ctx;
>> +    const u8 *key;
>> +    u32 key_len;
>> +
>> +    type = bpf_crypto_get_type(type__str);
>> +    if (IS_ERR(type)) {
>> +        *err = PTR_ERR(type);
>> +        return NULL;
>> +    }
>> +
>> +    if (!type->has_algo(algo__str)) {
>> +        *err = -EOPNOTSUPP;
>> +        goto err;
>
> ctx is still not initialized. The error path will crash.

good catch, thanks!

>> +    }
>> +
>> +    if (!authsize && type->setauthsize) {
>> +        *err = -EOPNOTSUPP;
>> +        goto err;
>> +    }
>> +
>> +    if (authsize && !type->setauthsize) {
>> +        *err = -EOPNOTSUPP;
>> +        goto err;
>> +    }
>> +
>> +    key_len = __bpf_dynptr_size(pkey);
>> +    if (!key_len) {
>> +        *err = -EINVAL;
>> +        goto err;
>> +    }
>> +    key = __bpf_dynptr_data(pkey, key_len);
>> +    if (!key) {
>> +        *err = -EINVAL;
>> +        goto err;
>> +    }
>> +
>> +    ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
>> +    if (!ctx) {
>> +        *err = -ENOMEM;
>> +        goto err;
>> +    }
>> +
>> +    ctx->type = type;
>> +    ctx->tfm = type->alloc_tfm(algo__str);
>> +    if (IS_ERR(ctx->tfm)) {
>> +        *err = PTR_ERR(ctx->tfm);
>> +        ctx->tfm = NULL;
>> +        goto err;
>> +    }
>> +
>> +    if (authsize) {
>> +        *err = type->setauthsize(ctx->tfm, authsize);
>> +        if (*err)
>> +            goto err;
>> +    }
>> +
>> +    *err = type->setkey(ctx->tfm, key, key_len);
>> +    if (*err)
>> +        goto err;
>> +
>> +    refcount_set(&ctx->usage, 1);
>> +
>> +    return ctx;
>> +err:
>> +    if (ctx->tfm)
>> +        type->free_tfm(ctx->tfm);
>> +    kfree(ctx);
>> +    module_put(type->owner);
>> +
>> +    return NULL;
>> +}
>> +
>> +static void crypto_free_cb(struct rcu_head *head)
>> +{
>> +    struct bpf_crypto_ctx *ctx;
>> +
>> +    ctx = container_of(head, struct bpf_crypto_ctx, rcu);
>> +    ctx->type->free_tfm(ctx->tfm);
>> +    module_put(ctx->type->owner);
>> +    kfree(ctx);
>> +}
>> +
>> +/**
>> + * bpf_crypto_ctx_acquire() - Acquire a reference to a BPF crypto context.
>> + * @ctx: The BPF crypto context being acquired. The ctx must be a trusted
>> + *         pointer.
>> + *
>> + * Acquires a reference to a BPF crypto context. The context returned by this
>> function
>> + * must either be embedded in a map as a kptr, or freed with
>> + * bpf_crypto_skcipher_ctx_release().
>> + */
>> +__bpf_kfunc struct bpf_crypto_ctx *
>> +bpf_crypto_ctx_acquire(struct bpf_crypto_ctx *ctx)
>> +{
>> +    refcount_inc(&ctx->usage);
>> +    return ctx;
>> +}
>> +
>> +/**
>> + * bpf_crypto_ctx_release() - Release a previously acquired BPF crypto context.
>> + * @ctx: The crypto context being released.
>> + *
>> + * Releases a previously acquired reference to a BPF crypto context. When the
>> final
>> + * reference of the BPF crypto context has been released, it is subsequently
>> freed in
>> + * an RCU callback in the BPF memory allocator.
>> + */
>> +__bpf_kfunc void bpf_crypto_ctx_release(struct bpf_crypto_ctx *ctx)
>> +{
>> +    if (refcount_dec_and_test(&ctx->usage))
>> +        call_rcu(&ctx->rcu, crypto_free_cb);
>> +}
>> +
>> +static int bpf_crypto_crypt(const struct bpf_crypto_ctx *ctx,
>> +                const struct bpf_dynptr_kern *src,
>> +                struct bpf_dynptr_kern *dst,
>> +                const struct bpf_dynptr_kern *iv,
>> +                bool decrypt)
>> +{
>> +    u32 src_len, dst_len, iv_len;
>> +    const u8 *psrc;
>> +    u8 *pdst, *piv;
>> +    int err;
>> +
>> +    if (ctx->type->get_flags(ctx->tfm) & CRYPTO_TFM_NEED_KEY)
>> +        return -EINVAL;
>> +
>> +    if (__bpf_dynptr_is_rdonly(dst))
>> +        return -EINVAL;
>> +
>> +    iv_len = __bpf_dynptr_size(iv);
>> +    src_len = __bpf_dynptr_size(src);
>> +    dst_len = __bpf_dynptr_size(dst);
>> +    if (!src_len || !dst_len)
>> +        return -EINVAL;
>> +
>> +    if (iv_len != ctx->type->ivsize(ctx->tfm))
>> +        return -EINVAL;
>> +
>> +    psrc = __bpf_dynptr_data(src, src_len);
>> +    if (!psrc)
>> +        return -EINVAL;
>> +    pdst = __bpf_dynptr_data_rw(dst, dst_len);
>> +    if (!pdst)
>> +        return -EINVAL;
>> +
>> +    piv = iv_len ? __bpf_dynptr_data_rw(iv, iv_len) : NULL;
>> +    if (iv_len && !piv)
>> +        return -EINVAL;
>> +
>> +    err = decrypt ? ctx->type->decrypt(ctx->tfm, psrc, pdst, src_len, piv)
>> +              : ctx->type->encrypt(ctx->tfm, psrc, pdst, src_len, piv);
>> +
>> +    return err;
>> +}
>> +
>> +/**
>> + * bpf_crypto_decrypt() - Decrypt buffer using configured context and IV
>> provided.
>> + * @ctx:    The crypto context being used. The ctx must be a trusted pointer.
>> + * @src:    bpf_dynptr to the encrypted data. Must be a trusted pointer.
>> + * @dst:    bpf_dynptr to the buffer where to store the result. Must be a
>> trusted pointer.
>> + * @iv:        bpf_dynptr to IV data to be used by decryptor.
>> + *
>> + * Decrypts provided buffer using IV data and the crypto context. Crypto
>> context must be configured.
>> + */
>> +__bpf_kfunc int bpf_crypto_decrypt(struct bpf_crypto_ctx *ctx,
>> +                   const struct bpf_dynptr_kern *src,
>> +                   struct bpf_dynptr_kern *dst,
>> +                   struct bpf_dynptr_kern *iv)
>> +{
>> +    return bpf_crypto_crypt(ctx, src, dst, iv, true);
>> +}
>> +
>> +/**
>> + * bpf_crypto_encrypt() - Encrypt buffer using configured context and IV
>> provided.
>> + * @ctx:    The crypto context being used. The ctx must be a trusted pointer.
>> + * @src:    bpf_dynptr to the plain data. Must be a trusted pointer.
>> + * @dst:    bpf_dynptr to buffer where to store the result. Must be a trusted
>> pointer.
>> + * @iv:        bpf_dynptr to IV data to be used by decryptor.
>> + *
>> + * Encrypts provided buffer using IV data and the crypto context. Crypto
>> context must be configured.
>> + */
>> +__bpf_kfunc int bpf_crypto_encrypt(struct bpf_crypto_ctx *ctx,
>> +                   const struct bpf_dynptr_kern *src,
>> +                   struct bpf_dynptr_kern *dst,
>> +                   struct bpf_dynptr_kern *iv)
>> +{
>> +    return bpf_crypto_crypt(ctx, src, dst, iv, false);
>> +}
>> +
>> +__bpf_kfunc_end_defs();
>> +
>> +BTF_SET8_START(crypt_init_kfunc_btf_ids)
>> +BTF_ID_FLAGS(func, bpf_crypto_ctx_create, KF_ACQUIRE | KF_RET_NULL |
>> KF_SLEEPABLE)
>> +BTF_ID_FLAGS(func, bpf_crypto_ctx_release, KF_RELEASE)
>> +BTF_ID_FLAGS(func, bpf_crypto_ctx_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS)
>
> Considering bpf_crypto_ctx is rcu protected, the acquire may use "KF_ACQUIRE |
> KF_RCU | KF_RET_NULL" such that the bpf_crypto_ctx_acquire(ctx_from_map_value)
> will work and the user will prepare checking NULL from day one.
>

Got it. What about create? Should it also include KF_RCU?


2023-12-03 20:33:38

by Vadim Fedorenko

[permalink] [raw]
Subject: Re: [PATCH bpf-next v7 1/3] bpf: make common crypto API for TC/XDP programs

On 03.12.2023 10:57, Simon Horman wrote:
> On Fri, Dec 01, 2023 at 05:06:02PM -0800, Vadim Fedorenko wrote:
>> Add crypto API support to BPF to be able to decrypt or encrypt packets
>> in TC/XDP BPF programs. Special care should be taken for initialization
>> part of crypto algo because crypto alloc) doesn't work with preemtion
>> disabled, it can be run only in sleepable BPF program. Also async crypto
>> is not supported because of the very same issue - TC/XDP BPF programs
>> are not sleepable.
>>
>> Signed-off-by: Vadim Fedorenko <[email protected]>
>
> ...
>
>> +/**
>> + * bpf_crypto_ctx_create() - Create a mutable BPF crypto context.
>> + *
>> + * Allocates a crypto context that can be used, acquired, and released by
>> + * a BPF program. The crypto context returned by this function must either
>> + * be embedded in a map as a kptr, or freed with bpf_crypto_ctx_release().
>> + * As crypto API functions use GFP_KERNEL allocations, this function can
>> + * only be used in sleepable BPF programs.
>> + *
>> + * bpf_crypto_ctx_create() allocates memory for crypto context.
>> + * It may return NULL if no memory is available.
>> + * @type__str: pointer to string representation of crypto type.
>> + * @algo__str: pointer to string representation of algorithm.
>> + * @pkey: bpf_dynptr which holds cipher key to do crypto.
>
> Hi Vadim,
>
> a minor nit from my side: something about @authsize should go here.
>
Hi Simon!

Good catch, I'll definitely add description to authsize, thanks!

>> + * @err: integer to store error code when NULL is returned
>> + */
>> +__bpf_kfunc struct bpf_crypto_ctx *
>> +bpf_crypto_ctx_create(const char *type__str, const char *algo__str,
>> + const struct bpf_dynptr_kern *pkey,
>> + unsigned int authsize, int *err)
>
> ...


2023-12-03 20:33:47

by Vadim Fedorenko

[permalink] [raw]
Subject: Re: [PATCH bpf-next v7 2/3] bpf: crypto: add skcipher to bpf crypto

On 02.12.2023 03:52, Herbert Xu wrote:
> On Fri, Dec 01, 2023 at 05:06:03PM -0800, Vadim Fedorenko wrote:
>>
>> +static int bpf_crypto_lskcipher_encrypt(void *tfm, const u8 *src, u8 *dst,
>> + unsigned int len, u8 *iv)
>> +{
>> + return crypto_lskcipher_encrypt(tfm, src, dst, len, iv);
>> +}
>
> Please note that the API has been updated and the iv field is now
> the siv. For algorithms with a non-zero statesize, that means that
> the IV must be followed by enough memory to store the internal state,
> i.e., crypto_lskcipher_statesize(tfm).
>
> Thanks,

Hi Herbert!

Thanks for the reminder. I have read v3 of your patchset and AFAIU only arc4
is affected right now. All other algorithms still have statesize=0, so should
work without any changes. I'll make a TODO note for myself to add state size
check in bpf_crypto part once different trees are merged during merge window.

Am I right that it only affects skcipher and AEAD crypto will not be changed?

Thanks,
Vadim

2023-12-05 00:34:09

by Martin KaFai Lau

[permalink] [raw]
Subject: Re: [PATCH bpf-next v7 1/3] bpf: make common crypto API for TC/XDP programs

On 12/3/23 11:02 AM, Vadim Fedorenko wrote:
>>> +static const struct bpf_crypto_type *bpf_crypto_get_type(const char *name)
>>> +{
>>> +    const struct bpf_crypto_type *type = ERR_PTR(-ENOENT);
>>> +    struct bpf_crypto_type_list *node;
>>> +
>>> +    down_read(&bpf_crypto_types_sem);
>>> +    list_for_each_entry(node, &bpf_crypto_types, list) {
>>> +        if (strcmp(node->type->name, name))
>>> +            continue;
>>> +
>>> +        if (try_module_get(node->type->owner))
>>
>> If I read patch 2 correctly, it is always built-in. I am not sure I understand
>> the module_put/get in this patch.
>
> Well, yeah, right now it's built-in, but it can be easily converted to module
> with it's own Kconfig option. Especially if we think about adding aead crypto
> and using bpf in embedded setups with less amount of resources.

What code is missing to support module? It sounds like all codes are ready.
and does it really need a separate kconfig option? Can it depend on
CONFIG_BPF_SYSCALL and CONFIG_CRYPTO_SKCIPHER?

>>> +BTF_SET8_START(crypt_init_kfunc_btf_ids)
>>> +BTF_ID_FLAGS(func, bpf_crypto_ctx_create, KF_ACQUIRE | KF_RET_NULL |
>>> KF_SLEEPABLE)
>>> +BTF_ID_FLAGS(func, bpf_crypto_ctx_release, KF_RELEASE)
>>> +BTF_ID_FLAGS(func, bpf_crypto_ctx_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS)
>>
>> Considering bpf_crypto_ctx is rcu protected, the acquire may use "KF_ACQUIRE |
>> KF_RCU | KF_RET_NULL" such that the bpf_crypto_ctx_acquire(ctx_from_map_value)
>> will work and the user will prepare checking NULL from day one.
>>
>
> Got it. What about create? Should it also include KF_RCU?

create should not need KF_RCU. The return value is a trusted/refcounted pointer.
It has a reg->ref_obj_id => is_trusted_reg().

2023-12-05 02:42:04

by Martin KaFai Lau

[permalink] [raw]
Subject: Re: [PATCH bpf-next v7 3/3] selftests: bpf: crypto skcipher algo selftests

On 12/1/23 5:06 PM, Vadim Fedorenko wrote:
> Add simple tc hook selftests to show the way to work with new crypto
> BPF API. Some weird structre and map are added to setup program to make
> verifier happy about dynptr initialization from memory. Simple AES-ECB
> algo is used to demonstrate encryption and decryption of fixed size
> buffers.
>
> Signed-off-by: Vadim Fedorenko <[email protected]>
> ---
> v6 -> v7:
> - style issues
> v5 -> v6:
> - use AF_ALG socket to confirm proper algorithm test
> - adjust test kernel config to include AF_ALG
> v4 -> v5:
> - adjust selftests to use new naming
> - restore tests on aarch64 and s390 as no sg lists are used
> v3 -> v4:
> - adjust selftests to use new syntax of helpers
> - add tests for acquire and release
> v2 -> v3:
> - disable tests on s390 and aarch64 because of unknown Fatal exception
> in sg_init_one
> v1 -> v2:
> - add CONFIG_CRYPTO_AES and CONFIG_CRYPTO_ECB to selftest build config
> suggested by Daniel
> ---
> tools/testing/selftests/bpf/config | 5 +
> .../selftests/bpf/prog_tests/crypto_sanity.c | 215 ++++++++++++++++++
> .../selftests/bpf/progs/crypto_common.h | 67 ++++++
> .../selftests/bpf/progs/crypto_sanity.c | 192 ++++++++++++++++
> 4 files changed, 479 insertions(+)
> create mode 100644 tools/testing/selftests/bpf/prog_tests/crypto_sanity.c
> create mode 100644 tools/testing/selftests/bpf/progs/crypto_common.h
> create mode 100644 tools/testing/selftests/bpf/progs/crypto_sanity.c
>
> diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
> index c125c441abc7..2221994a36d6 100644
> --- a/tools/testing/selftests/bpf/config
> +++ b/tools/testing/selftests/bpf/config
> @@ -13,7 +13,12 @@ CONFIG_BPF_SYSCALL=y
> CONFIG_CGROUP_BPF=y
> CONFIG_CRYPTO_HMAC=y
> CONFIG_CRYPTO_SHA256=y
> +CONFIG_CRYPTO_USER_API=y
> CONFIG_CRYPTO_USER_API_HASH=y
> +CONFIG_CRYPTO_USER_API_SKCIPHER=y
> +CONFIG_CRYPTO_SKCIPHER=y
> +CONFIG_CRYPTO_ECB=y
> +CONFIG_CRYPTO_AES=y
> CONFIG_DEBUG_INFO=y
> CONFIG_DEBUG_INFO_BTF=y
> CONFIG_DEBUG_INFO_DWARF4=y
> diff --git a/tools/testing/selftests/bpf/prog_tests/crypto_sanity.c b/tools/testing/selftests/bpf/prog_tests/crypto_sanity.c
> new file mode 100644
> index 000000000000..2dd73cb248be
> --- /dev/null
> +++ b/tools/testing/selftests/bpf/prog_tests/crypto_sanity.c
> @@ -0,0 +1,215 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
> +
> +#include <sys/types.h>
> +#include <sys/socket.h>
> +#include <net/if.h>
> +#include <linux/in6.h>
> +#include <linux/if_alg.h>
> +
> +#include "test_progs.h"
> +#include "network_helpers.h"
> +#include "crypto_sanity.skel.h"
> +
> +#define NS_TEST "crypto_sanity_ns"
> +#define IPV6_IFACE_ADDR "face::1"
> +#define UDP_TEST_PORT 7777
> +static const unsigned char crypto_key[] = "testtest12345678";
> +static const char plain_text[] = "stringtoencrypt0";
> +static int opfd, tfmfd;

nit. init to -1 and test for -1.

> +
> +int init_afalg(void)

static

> +{
> + struct sockaddr_alg sa = {
> + .salg_family = AF_ALG,
> + .salg_type = "skcipher",
> + .salg_name = "ecb(aes)"
> + };
> +
> + tfmfd = socket(AF_ALG, SOCK_SEQPACKET, 0);
> + if (tfmfd == -1)
> + return errno;
> + if (bind(tfmfd, (struct sockaddr *)&sa, sizeof(sa)) == -1)
> + return errno;
> + if (setsockopt(tfmfd, SOL_ALG, ALG_SET_KEY, crypto_key, 16) == -1)
> + return errno;
> + opfd = accept(tfmfd, NULL, 0);
> + if (opfd == -1)
> + return errno;
> + return 0;
> +}
> +
> +void deinit_afalg(void)

static

> +{
> + if (tfmfd)
> + close(tfmfd);
> + if (opfd)
> + close(opfd);
> +}
> +
> +void do_crypt_afalg(const void *src, void *dst, int size, bool encrypt)

static

> +{
> + struct msghdr msg = {};
> + struct cmsghdr *cmsg;
> + char cbuf[CMSG_SPACE(4)] = {0};
> + struct iovec iov;
> +
> + msg.msg_control = cbuf;
> + msg.msg_controllen = sizeof(cbuf);
> +
> + cmsg = CMSG_FIRSTHDR(&msg);
> + cmsg->cmsg_level = SOL_ALG;
> + cmsg->cmsg_type = ALG_SET_OP;
> + cmsg->cmsg_len = CMSG_LEN(4);
> + *(__u32 *)CMSG_DATA(cmsg) = encrypt ? ALG_OP_ENCRYPT : ALG_OP_DECRYPT;
> +
> + iov.iov_base = (char *)src;
> + iov.iov_len = size;
> +
> + msg.msg_iov = &iov;
> + msg.msg_iovlen = 1;
> +
> + sendmsg(opfd, &msg, 0);
> + read(opfd, dst, size);
> +}
> +
> +void test_crypto_sanity(void)
> +{
> + LIBBPF_OPTS(bpf_tc_hook, qdisc_hook, .attach_point = BPF_TC_EGRESS);
> + LIBBPF_OPTS(bpf_tc_opts, tc_attach_enc);
> + LIBBPF_OPTS(bpf_tc_opts, tc_attach_dec);
> + LIBBPF_OPTS(bpf_test_run_opts, opts,
> + .repeat = 1,
> + );
> + struct nstoken *nstoken = NULL;
> + struct crypto_sanity *skel;
> + char afalg_plain[16] = {0};
> + char afalg_dst[16] = {0};
> + struct sockaddr_in6 addr;
> + int sockfd, err, pfd;
> + socklen_t addrlen;
> +
> + skel = crypto_sanity__open();
> + if (!ASSERT_OK_PTR(skel, "skel open"))
> + return;
> +
> + bpf_program__set_autoload(skel->progs.crypto_accuire, true);
> +
> + err = crypto_sanity__load(skel);
> + if (!ASSERT_ERR(err, "crypto_accuire unexpected load success"))
> + goto fail;
> +
> + crypto_sanity__destroy(skel);
> +
> + skel = crypto_sanity__open();
> + if (!ASSERT_OK_PTR(skel, "skel open"))
> + return;
> +
> + bpf_program__set_autoload(skel->progs.crypto_accuire, false);

Is it needed?

> +
> + SYS(fail, "ip netns add %s", NS_TEST);
> + SYS(fail, "ip -net %s -6 addr add %s/128 dev lo nodad", NS_TEST, IPV6_IFACE_ADDR);
> + SYS(fail, "ip -net %s link set dev lo up", NS_TEST);
> +
> + err = crypto_sanity__load(skel);
> + if (!ASSERT_OK(err, "crypto_sanity__load"))
> + goto fail;
> +
> + nstoken = open_netns(NS_TEST);
> + if (!ASSERT_OK_PTR(nstoken, "open_netns"))
> + goto fail;
> +
> + err = init_afalg();
> + if (!ASSERT_OK(err, "AF_ALG init fail"))
> + goto fail;
> +
> + qdisc_hook.ifindex = if_nametoindex("lo");
> + if (!ASSERT_GT(qdisc_hook.ifindex, 0, "if_nametoindex lo"))
> + goto fail;
> +
> + err = crypto_sanity__attach(skel);
> + if (!ASSERT_OK(err, "crypto_sanity__attach"))
> + goto fail;
> +
> + pfd = bpf_program__fd(skel->progs.crypto_release);
> + if (!ASSERT_GT(pfd, 0, "crypto_release fd"))
> + goto fail;
> +
> + err = bpf_prog_test_run_opts(pfd, &opts);
> + if (!ASSERT_OK(err, "crypto_release") ||
> + !ASSERT_OK(opts.retval, "crypto_release retval"))
> + goto fail;
> +
> + pfd = bpf_program__fd(skel->progs.skb_crypto_setup);
> + if (!ASSERT_GT(pfd, 0, "skb_crypto_setup fd"))
> + goto fail;
> +
> + err = bpf_prog_test_run_opts(pfd, &opts);
> + if (!ASSERT_OK(err, "skb_crypto_setup") ||
> + !ASSERT_OK(opts.retval, "skb_crypto_setup retval"))
> + goto fail;
> +
> + if (!ASSERT_OK(skel->bss->status, "skb_crypto_setup status"))
> + goto fail;
> +
> + err = bpf_tc_hook_create(&qdisc_hook);
> + if (!ASSERT_OK(err, "create qdisc hook"))
> + goto fail;
> +
> + addrlen = sizeof(addr);
> + err = make_sockaddr(AF_INET6, IPV6_IFACE_ADDR, UDP_TEST_PORT,
> + (void *)&addr, &addrlen);
> + if (!ASSERT_OK(err, "make_sockaddr"))
> + goto fail;
> +
> + tc_attach_enc.prog_fd = bpf_program__fd(skel->progs.encrypt_sanity);
> + err = bpf_tc_attach(&qdisc_hook, &tc_attach_enc);
> + if (!ASSERT_OK(err, "attach encrypt filter"))
> + goto fail;
> +
> + sockfd = socket(AF_INET6, SOCK_DGRAM, 0);
> + if (!ASSERT_NEQ(sockfd, -1, "encrypt socket"))
> + goto fail;
> + err = sendto(sockfd, plain_text, 16, 0, (void *)&addr, addrlen);
> + close(sockfd);
> + if (!ASSERT_EQ(err, 16, "encrypt send"))
> + goto fail;
> +
> + do_crypt_afalg(plain_text, afalg_dst, 16, true);
> +
> + bpf_tc_detach(&qdisc_hook, &tc_attach_enc);
> + if (!ASSERT_OK(skel->bss->status, "encrypt status"))
> + goto fail;
> + if (!ASSERT_STRNEQ(skel->bss->dst, afalg_dst, sizeof(afalg_dst), "encrypt AF_ALG"))

nit. Please replace all numeric value "16" usages in this patch with sizeof() or
a macro.

> + goto fail;
> +
> + tc_attach_dec.prog_fd = bpf_program__fd(skel->progs.decrypt_sanity);
> + err = bpf_tc_attach(&qdisc_hook, &tc_attach_dec);
> + if (!ASSERT_OK(err, "attach decrypt filter"))
> + goto fail;
> +
> + sockfd = socket(AF_INET6, SOCK_DGRAM, 0);
> + if (!ASSERT_NEQ(sockfd, -1, "decrypt socket"))
> + goto fail;
> + err = sendto(sockfd, afalg_dst, 16, 0, (void *)&addr, addrlen);
> + close(sockfd);
> + if (!ASSERT_EQ(err, 16, "decrypt send"))
> + goto fail;
> +
> + do_crypt_afalg(afalg_dst, afalg_plain, 16, false);
> +
> + bpf_tc_detach(&qdisc_hook, &tc_attach_dec);
> + if (!ASSERT_OK(skel->bss->status, "decrypt status"))
> + goto fail;
> + if (!ASSERT_STRNEQ(skel->bss->dst, afalg_plain, sizeof(afalg_plain), "decrypt AF_ALG"))
> + goto fail;
> +
> +fail:
> + if (nstoken) {
> + bpf_tc_hook_destroy(&qdisc_hook);
> + close_netns(nstoken);
> + }
> + deinit_afalg();
> + SYS_NOFAIL("ip netns del " NS_TEST " &> /dev/null");
> + crypto_sanity__destroy(skel);
> +}

[ ... ]

> diff --git a/tools/testing/selftests/bpf/progs/crypto_sanity.c b/tools/testing/selftests/bpf/progs/crypto_sanity.c
> new file mode 100644
> index 000000000000..f566ff189b7e
> --- /dev/null
> +++ b/tools/testing/selftests/bpf/progs/crypto_sanity.c
> @@ -0,0 +1,192 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
> +
> +#include "vmlinux.h"
> +#include "bpf_tracing_net.h"
> +#include <bpf/bpf_helpers.h>
> +#include <bpf/bpf_endian.h>
> +#include <bpf/bpf_tracing.h>
> +#include "bpf_misc.h"
> +#include "bpf_kfuncs.h"
> +#include "crypto_common.h"
> +
> +#define UDP_TEST_PORT 7777
> +unsigned char crypto_key[16] = "testtest12345678";

nit. Initialize the crypto_key from the prog_tests/crypto_sanity.c through
skel->bss->crypto_key.

> +char dst[32] = {};

Please add comment on why 32 is needed instead of 16.

> +int status;
> +
> +static int skb_dynptr_validate(struct __sk_buff *skb, struct bpf_dynptr *psrc)
> +{
> + struct ipv6hdr ip6h;
> + struct udphdr udph;
> + u32 offset;
> +
> + if (skb->protocol != __bpf_constant_htons(ETH_P_IPV6))
> + return -1;
> +
> + if (bpf_skb_load_bytes(skb, ETH_HLEN, &ip6h, sizeof(ip6h)))
> + return -1;
> +
> + if (ip6h.nexthdr != IPPROTO_UDP)
> + return -1;
> +
> + if (bpf_skb_load_bytes(skb, ETH_HLEN + sizeof(ip6h), &udph, sizeof(udph)))
> + return -1;
> +
> + if (udph.dest != __bpf_constant_htons(UDP_TEST_PORT))
> + return -1;
> +
> + offset = ETH_HLEN + sizeof(ip6h) + sizeof(udph);
> + if (skb->len < offset + 16)
> + return -1;
> +
> + bpf_dynptr_from_skb(skb, 0, psrc);
> + bpf_dynptr_adjust(psrc, offset, offset + 16);

The bpf_crypto_(de|en)crypt kfunc requires the 16 bytes to be in the linear area
of psrc, so it is better to do a bpf_skb_pull_data() if it is not in the linear
area. People will directly borrow the code from here.

> +
> + return 0;
> +}
> +
> +SEC("fentry.s/bpf_fentry_test1")
> +int BPF_PROG(skb_crypto_setup)
> +{
> + struct bpf_crypto_ctx *cctx;
> + struct bpf_dynptr key = {};
> + int err = 0;
> +
> + status = 0;
> +
> + bpf_dynptr_from_mem(crypto_key, sizeof(crypto_key), 0, &key);
> + cctx = bpf_crypto_ctx_create("skcipher", "ecb(aes)", &key, 0, &err);
> +
> + if (!cctx) {
> + status = err;
> + return 0;
> + }
> +
> + err = crypto_ctx_insert(cctx);
> + if (err && err != -EEXIST)
> + status = err;
> +
> + return 0;
> +}
> +
> +SEC("fentry.s/bpf_fentry_test1")
> +int BPF_PROG(crypto_release)
> +{
> + struct bpf_crypto_ctx *cctx;
> + struct bpf_dynptr key = {};
> + int err = 0;
> +
> + status = 0;
> +
> + bpf_dynptr_from_mem(crypto_key, sizeof(crypto_key), 0, &key);
> + cctx = bpf_crypto_ctx_create("skcipher", "ecb(aes)", &key, 0, &err);
> +
> + if (!cctx) {
> + status = err;
> + return 0;
> + }
> +
> + bpf_crypto_ctx_release(cctx);
> +
> + return 0;
> +}
> +
> +SEC("?fentry.s/bpf_fentry_test1")
> +__failure __msg("Unreleased reference")
> +int BPF_PROG(crypto_accuire)

Does it mean to be s/accuire/acquire/ ?

> +{
> + struct bpf_crypto_ctx *cctx;
> + struct bpf_dynptr key = {};
> + int err = 0;
> +
> + status = 0;
> +
> + bpf_dynptr_from_mem(crypto_key, sizeof(crypto_key), 0, &key);
> + cctx = bpf_crypto_ctx_create("skcipher", "ecb(aes)", &key, 0, &err);
> +
> + if (!cctx) {
> + status = err;
> + return 0;
> + }
> +
> + cctx = bpf_crypto_ctx_acquire(cctx);
> + if (!cctx)
> + return -EINVAL;
> +
> + bpf_crypto_ctx_release(cctx);
> +
> + return 0;
> +}
> +
> +SEC("tc")
> +int decrypt_sanity(struct __sk_buff *skb)
> +{
> + struct __crypto_ctx_value *v;
> + struct bpf_crypto_ctx *ctx;
> + struct bpf_dynptr psrc, pdst, iv;
> + int err;
> +
> + err = skb_dynptr_validate(skb, &psrc);
> + if (err < 0) {
> + status = err;
> + return TC_ACT_SHOT;
> + }
> +
> + v = crypto_ctx_value_lookup();
> + if (!v) {
> + status = -ENOENT;
> + return TC_ACT_SHOT;
> + }
> +
> + ctx = v->ctx;
> + if (!ctx) {
> + status = -ENOENT;
> + return TC_ACT_SHOT;
> + }
> +
> + bpf_dynptr_from_mem(dst, sizeof(dst), 0, &pdst);
> + bpf_dynptr_from_mem(dst, 0, 0, &iv);

This is quite non-intutive to say iv is not needed. A better user experience is
to allow passing NULL to the kfunc. This probably could be improved in the
future without breaking the kfunc api. Please add a comment here.

> +
> + status = bpf_crypto_decrypt(ctx, &psrc, &pdst, &iv);
> +
> + return TC_ACT_SHOT;
> +}
> +
> +SEC("tc")
> +int encrypt_sanity(struct __sk_buff *skb)
> +{
> + struct __crypto_ctx_value *v;
> + struct bpf_crypto_ctx *ctx;
> + struct bpf_dynptr psrc, pdst, iv;
> + int err;
> +
> + status = 0;
> +
> + err = skb_dynptr_validate(skb, &psrc);
> + if (err < 0) {
> + status = err;
> + return TC_ACT_SHOT;
> + }
> +
> + v = crypto_ctx_value_lookup();
> + if (!v) {
> + status = -ENOENT;
> + return TC_ACT_SHOT;
> + }
> +
> + ctx = v->ctx;
> + if (!ctx) {
> + status = -ENOENT;
> + return TC_ACT_SHOT;
> + }
> +
> + bpf_dynptr_from_mem(dst, sizeof(dst), 0, &pdst);
> + bpf_dynptr_from_mem(dst, 0, 0, &iv);
> +
> + status = bpf_crypto_encrypt(ctx, &psrc, &pdst, &iv);
> +
> + return TC_ACT_SHOT;
> +}
> +
> +char __license[] SEC("license") = "GPL";


2023-12-05 20:34:55

by kernel test robot

[permalink] [raw]
Subject: Re: [PATCH bpf-next v7 1/3] bpf: make common crypto API for TC/XDP programs

Hi Vadim,

kernel test robot noticed the following build warnings:

[auto build test WARNING on bpf-next/master]

url: https://github.com/intel-lab-lkp/linux/commits/Vadim-Fedorenko/bpf-crypto-add-skcipher-to-bpf-crypto/20231202-091254
base: https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git master
patch link: https://lore.kernel.org/r/20231202010604.1877561-1-vadfed%40meta.com
patch subject: [PATCH bpf-next v7 1/3] bpf: make common crypto API for TC/XDP programs
config: x86_64-buildonly-randconfig-001-20231202 (https://download.01.org/0day-ci/archive/20231206/[email protected]/config)
compiler: clang version 16.0.4 (https://github.com/llvm/llvm-project.git ae42196bc493ffe877a7e3dff8be32035dea4d07)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20231206/[email protected]/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <[email protected]>
| Closes: https://lore.kernel.org/oe-kbuild-all/[email protected]/

All warnings (new ones prefixed by >>):

>> kernel/bpf/crypto.c:159:6: warning: variable 'ctx' is used uninitialized whenever 'if' condition is true [-Wsometimes-uninitialized]
if (!key) {
^~~~
kernel/bpf/crypto.c:192:6: note: uninitialized use occurs here
if (ctx->tfm)
^~~
kernel/bpf/crypto.c:159:2: note: remove the 'if' if its condition is always false
if (!key) {
^~~~~~~~~~~
kernel/bpf/crypto.c:154:6: warning: variable 'ctx' is used uninitialized whenever 'if' condition is true [-Wsometimes-uninitialized]
if (!key_len) {
^~~~~~~~
kernel/bpf/crypto.c:192:6: note: uninitialized use occurs here
if (ctx->tfm)
^~~
kernel/bpf/crypto.c:154:2: note: remove the 'if' if its condition is always false
if (!key_len) {
^~~~~~~~~~~~~~~
kernel/bpf/crypto.c:148:6: warning: variable 'ctx' is used uninitialized whenever 'if' condition is true [-Wsometimes-uninitialized]
if (authsize && !type->setauthsize) {
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
kernel/bpf/crypto.c:192:6: note: uninitialized use occurs here
if (ctx->tfm)
^~~
kernel/bpf/crypto.c:148:2: note: remove the 'if' if its condition is always false
if (authsize && !type->setauthsize) {
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
kernel/bpf/crypto.c:143:6: warning: variable 'ctx' is used uninitialized whenever 'if' condition is true [-Wsometimes-uninitialized]
if (!authsize && type->setauthsize) {
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
kernel/bpf/crypto.c:192:6: note: uninitialized use occurs here
if (ctx->tfm)
^~~
kernel/bpf/crypto.c:143:2: note: remove the 'if' if its condition is always false
if (!authsize && type->setauthsize) {
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
kernel/bpf/crypto.c:138:6: warning: variable 'ctx' is used uninitialized whenever 'if' condition is true [-Wsometimes-uninitialized]
if (!type->has_algo(algo__str)) {
^~~~~~~~~~~~~~~~~~~~~~~~~~
kernel/bpf/crypto.c:192:6: note: uninitialized use occurs here
if (ctx->tfm)
^~~
kernel/bpf/crypto.c:138:2: note: remove the 'if' if its condition is always false
if (!type->has_algo(algo__str)) {
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
kernel/bpf/crypto.c:128:28: note: initialize the variable 'ctx' to silence this warning
struct bpf_crypto_ctx *ctx;
^
= NULL
5 warnings generated.


vim +159 kernel/bpf/crypto.c

105
106 /**
107 * bpf_crypto_ctx_create() - Create a mutable BPF crypto context.
108 *
109 * Allocates a crypto context that can be used, acquired, and released by
110 * a BPF program. The crypto context returned by this function must either
111 * be embedded in a map as a kptr, or freed with bpf_crypto_ctx_release().
112 * As crypto API functions use GFP_KERNEL allocations, this function can
113 * only be used in sleepable BPF programs.
114 *
115 * bpf_crypto_ctx_create() allocates memory for crypto context.
116 * It may return NULL if no memory is available.
117 * @type__str: pointer to string representation of crypto type.
118 * @algo__str: pointer to string representation of algorithm.
119 * @pkey: bpf_dynptr which holds cipher key to do crypto.
120 * @err: integer to store error code when NULL is returned
121 */
122 __bpf_kfunc struct bpf_crypto_ctx *
123 bpf_crypto_ctx_create(const char *type__str, const char *algo__str,
124 const struct bpf_dynptr_kern *pkey,
125 unsigned int authsize, int *err)
126 {
127 const struct bpf_crypto_type *type = bpf_crypto_get_type(type__str);
128 struct bpf_crypto_ctx *ctx;
129 const u8 *key;
130 u32 key_len;
131
132 type = bpf_crypto_get_type(type__str);
133 if (IS_ERR(type)) {
134 *err = PTR_ERR(type);
135 return NULL;
136 }
137
138 if (!type->has_algo(algo__str)) {
139 *err = -EOPNOTSUPP;
140 goto err;
141 }
142
143 if (!authsize && type->setauthsize) {
144 *err = -EOPNOTSUPP;
145 goto err;
146 }
147
148 if (authsize && !type->setauthsize) {
149 *err = -EOPNOTSUPP;
150 goto err;
151 }
152
153 key_len = __bpf_dynptr_size(pkey);
154 if (!key_len) {
155 *err = -EINVAL;
156 goto err;
157 }
158 key = __bpf_dynptr_data(pkey, key_len);
> 159 if (!key) {
160 *err = -EINVAL;
161 goto err;
162 }
163
164 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
165 if (!ctx) {
166 *err = -ENOMEM;
167 goto err;
168 }
169
170 ctx->type = type;
171 ctx->tfm = type->alloc_tfm(algo__str);
172 if (IS_ERR(ctx->tfm)) {
173 *err = PTR_ERR(ctx->tfm);
174 ctx->tfm = NULL;
175 goto err;
176 }
177
178 if (authsize) {
179 *err = type->setauthsize(ctx->tfm, authsize);
180 if (*err)
181 goto err;
182 }
183
184 *err = type->setkey(ctx->tfm, key, key_len);
185 if (*err)
186 goto err;
187
188 refcount_set(&ctx->usage, 1);
189
190 return ctx;
191 err:
192 if (ctx->tfm)
193 type->free_tfm(ctx->tfm);
194 kfree(ctx);
195 module_put(type->owner);
196
197 return NULL;
198 }
199

--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki

2023-12-05 22:44:59

by kernel test robot

[permalink] [raw]
Subject: Re: [PATCH bpf-next v7 1/3] bpf: make common crypto API for TC/XDP programs

Hi Vadim,

kernel test robot noticed the following build warnings:

[auto build test WARNING on bpf-next/master]

url: https://github.com/intel-lab-lkp/linux/commits/Vadim-Fedorenko/bpf-crypto-add-skcipher-to-bpf-crypto/20231202-091254
base: https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git master
patch link: https://lore.kernel.org/r/20231202010604.1877561-1-vadfed%40meta.com
patch subject: [PATCH bpf-next v7 1/3] bpf: make common crypto API for TC/XDP programs
config: m68k-randconfig-r081-20231202 (https://download.01.org/0day-ci/archive/20231206/[email protected]/config)
compiler: m68k-linux-gcc (GCC) 13.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20231206/[email protected]/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <[email protected]>
| Closes: https://lore.kernel.org/oe-kbuild-all/[email protected]/

All warnings (new ones prefixed by >>):

>> kernel/bpf/crypto.c:126: warning: Function parameter or member 'authsize' not described in 'bpf_crypto_ctx_create'


vim +126 kernel/bpf/crypto.c

105
106 /**
107 * bpf_crypto_ctx_create() - Create a mutable BPF crypto context.
108 *
109 * Allocates a crypto context that can be used, acquired, and released by
110 * a BPF program. The crypto context returned by this function must either
111 * be embedded in a map as a kptr, or freed with bpf_crypto_ctx_release().
112 * As crypto API functions use GFP_KERNEL allocations, this function can
113 * only be used in sleepable BPF programs.
114 *
115 * bpf_crypto_ctx_create() allocates memory for crypto context.
116 * It may return NULL if no memory is available.
117 * @type__str: pointer to string representation of crypto type.
118 * @algo__str: pointer to string representation of algorithm.
119 * @pkey: bpf_dynptr which holds cipher key to do crypto.
120 * @err: integer to store error code when NULL is returned
121 */
122 __bpf_kfunc struct bpf_crypto_ctx *
123 bpf_crypto_ctx_create(const char *type__str, const char *algo__str,
124 const struct bpf_dynptr_kern *pkey,
125 unsigned int authsize, int *err)
> 126 {
127 const struct bpf_crypto_type *type = bpf_crypto_get_type(type__str);
128 struct bpf_crypto_ctx *ctx;
129 const u8 *key;
130 u32 key_len;
131
132 type = bpf_crypto_get_type(type__str);
133 if (IS_ERR(type)) {
134 *err = PTR_ERR(type);
135 return NULL;
136 }
137
138 if (!type->has_algo(algo__str)) {
139 *err = -EOPNOTSUPP;
140 goto err;
141 }
142
143 if (!authsize && type->setauthsize) {
144 *err = -EOPNOTSUPP;
145 goto err;
146 }
147
148 if (authsize && !type->setauthsize) {
149 *err = -EOPNOTSUPP;
150 goto err;
151 }
152
153 key_len = __bpf_dynptr_size(pkey);
154 if (!key_len) {
155 *err = -EINVAL;
156 goto err;
157 }
158 key = __bpf_dynptr_data(pkey, key_len);
159 if (!key) {
160 *err = -EINVAL;
161 goto err;
162 }
163
164 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
165 if (!ctx) {
166 *err = -ENOMEM;
167 goto err;
168 }
169
170 ctx->type = type;
171 ctx->tfm = type->alloc_tfm(algo__str);
172 if (IS_ERR(ctx->tfm)) {
173 *err = PTR_ERR(ctx->tfm);
174 ctx->tfm = NULL;
175 goto err;
176 }
177
178 if (authsize) {
179 *err = type->setauthsize(ctx->tfm, authsize);
180 if (*err)
181 goto err;
182 }
183
184 *err = type->setkey(ctx->tfm, key, key_len);
185 if (*err)
186 goto err;
187
188 refcount_set(&ctx->usage, 1);
189
190 return ctx;
191 err:
192 if (ctx->tfm)
193 type->free_tfm(ctx->tfm);
194 kfree(ctx);
195 module_put(type->owner);
196
197 return NULL;
198 }
199

--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki

2023-12-06 06:35:10

by Dan Carpenter

[permalink] [raw]
Subject: Re: [PATCH bpf-next v7 1/3] bpf: make common crypto API for TC/XDP programs

Hi Vadim,

kernel test robot noticed the following build warnings:

url: https://github.com/intel-lab-lkp/linux/commits/Vadim-Fedorenko/bpf-crypto-add-skcipher-to-bpf-crypto/20231202-091254
base: https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git master
patch link: https://lore.kernel.org/r/20231202010604.1877561-1-vadfed%40meta.com
patch subject: [PATCH bpf-next v7 1/3] bpf: make common crypto API for TC/XDP programs
config: x86_64-randconfig-161-20231202 (https://download.01.org/0day-ci/archive/20231206/[email protected]/config)
compiler: clang version 16.0.4 (https://github.com/llvm/llvm-project.git ae42196bc493ffe877a7e3dff8be32035dea4d07)
reproduce: (https://download.01.org/0day-ci/archive/20231206/[email protected]/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <[email protected]>
| Reported-by: Dan Carpenter <[email protected]>
| Closes: https://lore.kernel.org/r/[email protected]/

smatch warnings:
kernel/bpf/crypto.c:192 bpf_crypto_ctx_create() error: we previously assumed 'ctx' could be null (see line 165)
kernel/bpf/crypto.c:192 bpf_crypto_ctx_create() error: potentially dereferencing uninitialized 'ctx'.

vim +/ctx +192 kernel/bpf/crypto.c

0c47cb96ac404e Vadim Fedorenko 2023-12-01 122 __bpf_kfunc struct bpf_crypto_ctx *
0c47cb96ac404e Vadim Fedorenko 2023-12-01 123 bpf_crypto_ctx_create(const char *type__str, const char *algo__str,
0c47cb96ac404e Vadim Fedorenko 2023-12-01 124 const struct bpf_dynptr_kern *pkey,
0c47cb96ac404e Vadim Fedorenko 2023-12-01 125 unsigned int authsize, int *err)
0c47cb96ac404e Vadim Fedorenko 2023-12-01 126 {
0c47cb96ac404e Vadim Fedorenko 2023-12-01 127 const struct bpf_crypto_type *type = bpf_crypto_get_type(type__str);

Delete this assignment. (Duplicated).

0c47cb96ac404e Vadim Fedorenko 2023-12-01 128 struct bpf_crypto_ctx *ctx;
0c47cb96ac404e Vadim Fedorenko 2023-12-01 129 const u8 *key;
0c47cb96ac404e Vadim Fedorenko 2023-12-01 130 u32 key_len;
0c47cb96ac404e Vadim Fedorenko 2023-12-01 131
0c47cb96ac404e Vadim Fedorenko 2023-12-01 132 type = bpf_crypto_get_type(type__str);
0c47cb96ac404e Vadim Fedorenko 2023-12-01 133 if (IS_ERR(type)) {
0c47cb96ac404e Vadim Fedorenko 2023-12-01 134 *err = PTR_ERR(type);
0c47cb96ac404e Vadim Fedorenko 2023-12-01 135 return NULL;

Why doesn't this function just return error pointers?

0c47cb96ac404e Vadim Fedorenko 2023-12-01 136 }
0c47cb96ac404e Vadim Fedorenko 2023-12-01 137
0c47cb96ac404e Vadim Fedorenko 2023-12-01 138 if (!type->has_algo(algo__str)) {
0c47cb96ac404e Vadim Fedorenko 2023-12-01 139 *err = -EOPNOTSUPP;
0c47cb96ac404e Vadim Fedorenko 2023-12-01 140 goto err;

ctx is uninitialized on this path.

0c47cb96ac404e Vadim Fedorenko 2023-12-01 141 }
0c47cb96ac404e Vadim Fedorenko 2023-12-01 142
0c47cb96ac404e Vadim Fedorenko 2023-12-01 143 if (!authsize && type->setauthsize) {
0c47cb96ac404e Vadim Fedorenko 2023-12-01 144 *err = -EOPNOTSUPP;
0c47cb96ac404e Vadim Fedorenko 2023-12-01 145 goto err;
0c47cb96ac404e Vadim Fedorenko 2023-12-01 146 }
0c47cb96ac404e Vadim Fedorenko 2023-12-01 147
0c47cb96ac404e Vadim Fedorenko 2023-12-01 148 if (authsize && !type->setauthsize) {
0c47cb96ac404e Vadim Fedorenko 2023-12-01 149 *err = -EOPNOTSUPP;
0c47cb96ac404e Vadim Fedorenko 2023-12-01 150 goto err;
0c47cb96ac404e Vadim Fedorenko 2023-12-01 151 }
0c47cb96ac404e Vadim Fedorenko 2023-12-01 152
0c47cb96ac404e Vadim Fedorenko 2023-12-01 153 key_len = __bpf_dynptr_size(pkey);
0c47cb96ac404e Vadim Fedorenko 2023-12-01 154 if (!key_len) {
0c47cb96ac404e Vadim Fedorenko 2023-12-01 155 *err = -EINVAL;
0c47cb96ac404e Vadim Fedorenko 2023-12-01 156 goto err;
0c47cb96ac404e Vadim Fedorenko 2023-12-01 157 }
0c47cb96ac404e Vadim Fedorenko 2023-12-01 158 key = __bpf_dynptr_data(pkey, key_len);
0c47cb96ac404e Vadim Fedorenko 2023-12-01 159 if (!key) {
0c47cb96ac404e Vadim Fedorenko 2023-12-01 160 *err = -EINVAL;
0c47cb96ac404e Vadim Fedorenko 2023-12-01 161 goto err;
0c47cb96ac404e Vadim Fedorenko 2023-12-01 162 }
0c47cb96ac404e Vadim Fedorenko 2023-12-01 163
0c47cb96ac404e Vadim Fedorenko 2023-12-01 164 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
0c47cb96ac404e Vadim Fedorenko 2023-12-01 @165 if (!ctx) {
0c47cb96ac404e Vadim Fedorenko 2023-12-01 166 *err = -ENOMEM;
0c47cb96ac404e Vadim Fedorenko 2023-12-01 167 goto err;

ctx is NULL here.

0c47cb96ac404e Vadim Fedorenko 2023-12-01 168 }
0c47cb96ac404e Vadim Fedorenko 2023-12-01 169
0c47cb96ac404e Vadim Fedorenko 2023-12-01 170 ctx->type = type;
0c47cb96ac404e Vadim Fedorenko 2023-12-01 171 ctx->tfm = type->alloc_tfm(algo__str);
0c47cb96ac404e Vadim Fedorenko 2023-12-01 172 if (IS_ERR(ctx->tfm)) {
0c47cb96ac404e Vadim Fedorenko 2023-12-01 173 *err = PTR_ERR(ctx->tfm);
0c47cb96ac404e Vadim Fedorenko 2023-12-01 174 ctx->tfm = NULL;
0c47cb96ac404e Vadim Fedorenko 2023-12-01 175 goto err;
0c47cb96ac404e Vadim Fedorenko 2023-12-01 176 }
0c47cb96ac404e Vadim Fedorenko 2023-12-01 177
0c47cb96ac404e Vadim Fedorenko 2023-12-01 178 if (authsize) {
0c47cb96ac404e Vadim Fedorenko 2023-12-01 179 *err = type->setauthsize(ctx->tfm, authsize);
0c47cb96ac404e Vadim Fedorenko 2023-12-01 180 if (*err)
0c47cb96ac404e Vadim Fedorenko 2023-12-01 181 goto err;
0c47cb96ac404e Vadim Fedorenko 2023-12-01 182 }
0c47cb96ac404e Vadim Fedorenko 2023-12-01 183
0c47cb96ac404e Vadim Fedorenko 2023-12-01 184 *err = type->setkey(ctx->tfm, key, key_len);
0c47cb96ac404e Vadim Fedorenko 2023-12-01 185 if (*err)
0c47cb96ac404e Vadim Fedorenko 2023-12-01 186 goto err;
0c47cb96ac404e Vadim Fedorenko 2023-12-01 187
0c47cb96ac404e Vadim Fedorenko 2023-12-01 188 refcount_set(&ctx->usage, 1);
0c47cb96ac404e Vadim Fedorenko 2023-12-01 189
0c47cb96ac404e Vadim Fedorenko 2023-12-01 190 return ctx;
0c47cb96ac404e Vadim Fedorenko 2023-12-01 191 err:
0c47cb96ac404e Vadim Fedorenko 2023-12-01 @192 if (ctx->tfm)
^^^^^^^^
NULL dereference. These two error handling bugs in three lines of code
are canonical One Err Label type bugs. Better to do a ladder where each
error label frees the last thing that was allocated. Easier to review.
Then you could delete the "ctx->tfm = NULL;" assignment on line 174.

return ctx;

err_free_tfm:
type->free_tfm(ctx->tfm);
err_free_ctx:
kfree(ctx);
err_module_put:
module_put(type->owner);

return NULL;

I have written about this at length on my blog:
https://staticthinking.wordpress.com/2022/04/28/free-the-last-thing-style/

0c47cb96ac404e Vadim Fedorenko 2023-12-01 193 type->free_tfm(ctx->tfm);
0c47cb96ac404e Vadim Fedorenko 2023-12-01 194 kfree(ctx);
0c47cb96ac404e Vadim Fedorenko 2023-12-01 195 module_put(type->owner);
0c47cb96ac404e Vadim Fedorenko 2023-12-01 196
0c47cb96ac404e Vadim Fedorenko 2023-12-01 197 return NULL;
0c47cb96ac404e Vadim Fedorenko 2023-12-01 198 }

--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki


2023-12-07 12:35:21

by Vadim Fedorenko

[permalink] [raw]
Subject: Re: [PATCH bpf-next v7 1/3] bpf: make common crypto API for TC/XDP programs

On 05/12/2023 21:56, Dan Carpenter wrote:
> Hi Vadim,
>
> kernel test robot noticed the following build warnings:
>
> url: https://github.com/intel-lab-lkp/linux/commits/Vadim-Fedorenko/bpf-crypto-add-skcipher-to-bpf-crypto/20231202-091254
> base: https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git master
> patch link: https://lore.kernel.org/r/20231202010604.1877561-1-vadfed%40meta.com
> patch subject: [PATCH bpf-next v7 1/3] bpf: make common crypto API for TC/XDP programs
> config: x86_64-randconfig-161-20231202 (https://download.01.org/0day-ci/archive/20231206/[email protected]/config)
> compiler: clang version 16.0.4 (https://github.com/llvm/llvm-project.git ae42196bc493ffe877a7e3dff8be32035dea4d07)
> reproduce: (https://download.01.org/0day-ci/archive/20231206/[email protected]/reproduce)
>
> If you fix the issue in a separate patch/commit (i.e. not just a new version of
> the same patch/commit), kindly add following tags
> | Reported-by: kernel test robot <[email protected]>
> | Reported-by: Dan Carpenter <[email protected]>
> | Closes: https://lore.kernel.org/r/[email protected]/
>
> smatch warnings:
> kernel/bpf/crypto.c:192 bpf_crypto_ctx_create() error: we previously assumed 'ctx' could be null (see line 165)
> kernel/bpf/crypto.c:192 bpf_crypto_ctx_create() error: potentially dereferencing uninitialized 'ctx'.
>
> vim +/ctx +192 kernel/bpf/crypto.c
>
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 122 __bpf_kfunc struct bpf_crypto_ctx *
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 123 bpf_crypto_ctx_create(const char *type__str, const char *algo__str,
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 124 const struct bpf_dynptr_kern *pkey,
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 125 unsigned int authsize, int *err)
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 126 {
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 127 const struct bpf_crypto_type *type = bpf_crypto_get_type(type__str);
>
> Delete this assignment. (Duplicated).
>

Ah, yeah, will remove it.

> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 128 struct bpf_crypto_ctx *ctx;
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 129 const u8 *key;
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 130 u32 key_len;
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 131
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 132 type = bpf_crypto_get_type(type__str);
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 133 if (IS_ERR(type)) {
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 134 *err = PTR_ERR(type);
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 135 return NULL;
>
> Why doesn't this function just return error pointers?

bpf_kfuncs cannot return error pointers, it makes BPF verifier very unhappy.

>
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 136 }
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 137
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 138 if (!type->has_algo(algo__str)) {
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 139 *err = -EOPNOTSUPP;
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 140 goto err;
>
> ctx is uninitialized on this path.
>
Yep, it was already highlighted in the feedback, thanks.

> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 141 }
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 142
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 143 if (!authsize && type->setauthsize) {
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 144 *err = -EOPNOTSUPP;
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 145 goto err;
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 146 }
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 147
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 148 if (authsize && !type->setauthsize) {
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 149 *err = -EOPNOTSUPP;
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 150 goto err;
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 151 }
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 152
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 153 key_len = __bpf_dynptr_size(pkey);
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 154 if (!key_len) {
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 155 *err = -EINVAL;
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 156 goto err;
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 157 }
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 158 key = __bpf_dynptr_data(pkey, key_len);
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 159 if (!key) {
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 160 *err = -EINVAL;
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 161 goto err;
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 162 }
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 163
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 164 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 @165 if (!ctx) {
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 166 *err = -ENOMEM;
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 167 goto err;
>
> ctx is NULL here.
>
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 168 }
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 169
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 170 ctx->type = type;
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 171 ctx->tfm = type->alloc_tfm(algo__str);
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 172 if (IS_ERR(ctx->tfm)) {
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 173 *err = PTR_ERR(ctx->tfm);
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 174 ctx->tfm = NULL;
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 175 goto err;
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 176 }
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 177
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 178 if (authsize) {
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 179 *err = type->setauthsize(ctx->tfm, authsize);
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 180 if (*err)
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 181 goto err;
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 182 }
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 183
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 184 *err = type->setkey(ctx->tfm, key, key_len);
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 185 if (*err)
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 186 goto err;
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 187
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 188 refcount_set(&ctx->usage, 1);
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 189
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 190 return ctx;
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 191 err:
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 @192 if (ctx->tfm)
> ^^^^^^^^
> NULL dereference. These two error handling bugs in three lines of code
> are canonical One Err Label type bugs. Better to do a ladder where each
> error label frees the last thing that was allocated. Easier to review.
> Then you could delete the "ctx->tfm = NULL;" assignment on line 174.
>
> return ctx;
>
> err_free_tfm:
> type->free_tfm(ctx->tfm);
> err_free_ctx:
> kfree(ctx);
> err_module_put:
> module_put(type->owner);
>
> return NULL;
>
> I have written about this at length on my blog:
> https://staticthinking.wordpress.com/2022/04/28/free-the-last-thing-style/

Thanks, very good blog post, I'll follow this way in the next version.

> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 193 type->free_tfm(ctx->tfm);
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 194 kfree(ctx);
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 195 module_put(type->owner);
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 196
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 197 return NULL;
> 0c47cb96ac404e Vadim Fedorenko 2023-12-01 198 }
>