2023-03-05 14:31:47

by Xu Kuohai

[permalink] [raw]
Subject: [PATCH bpf-next 0/2] bpf: add bound tracking for BPF_MOD

From: Xu Kuohai <[email protected]>

dst_reg is marked as unknown when BPF_MOD instruction is verified, causing
the following bpf prog to be incorrectly rejected.

0: r0 = 0
1: r0 %= 10 // r0 is marked as unknown
2: r1 = 0
3: r1 += 1
4: if r1 < r0 goto pc-2 // verifier concludes the loop is unbounded
5: exit

To teach verifier to accept the above prog, this series adds bound tracking
for BPF_MOD.

Xu Kuohai (2):
bpf: add bound tracking for BPF_MOD
selftests/bpf: check if verifier tracks dst_reg bound for BPF_MOD

kernel/bpf/verifier.c | 98 +++++++-
tools/testing/selftests/bpf/verifier/mod.c | 247 +++++++++++++++++++++
2 files changed, 340 insertions(+), 5 deletions(-)
create mode 100644 tools/testing/selftests/bpf/verifier/mod.c

--
2.30.2



2023-03-05 14:31:53

by Xu Kuohai

[permalink] [raw]
Subject: [PATCH bpf-next 1/2] bpf: add bound tracking for BPF_MOD

From: Xu Kuohai <[email protected]>

dst_reg is marked as unknown when BPF_MOD instruction is verified, causing
the following bpf prog to be incorrectly rejected.

0: r0 = 0
1: r0 %= 10 // r0 is marked as unknown
2: r1 = 0
3: r1 += 1
4: if r1 < r0 goto pc-2 // verifier concludes the loop is unbounded
5: exit

To teach verifier to accept the above prog, this patch adds bound tracking
for BPF_MOD, based on the following observation.

BPF_MOD is unsigned and for a given unsigned divisor x:
1. when x != 0, dst_reg bits are in the range [0, x);
2. when x == 0, dst_reg is truncated to 32 bits by mod32 or remains
unchanged by mod64.

Signed-off-by: Xu Kuohai <[email protected]>
---
kernel/bpf/verifier.c | 98 ++++++++++++++++++++++++++++++++++++++++---
1 file changed, 93 insertions(+), 5 deletions(-)

diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 272563a0b770..d44a33a53e8e 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -11257,6 +11257,87 @@ static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg,
__update_reg_bounds(dst_reg);
}

+static void scalar32_min_max_mod(struct bpf_reg_state *dst_reg,
+ struct bpf_reg_state *src_reg)
+{
+ u32 val = (u32)src_reg->var_off.value; /* src_reg is const */
+ u32 umax = dst_reg->u32_max_value;
+ u32 umin = dst_reg->u32_min_value;
+ u32 umax_rem, umin_rem;
+
+ /* dst_reg is 32-bit truncated when mod32 zero, since
+ * adjust_scalar_min_max_vals calls zext_32_to_64 to do truncation for
+ * all alu32 ops, here we do nothing and just return.
+ */
+ if (!val)
+ return;
+
+ umax_rem = do_div(umax, val);
+ umin_rem = do_div(umin, val);
+
+ /* no winding */
+ if (umax - umin < val && umin_rem <= umax_rem) {
+ dst_reg->var_off = tnum_range(umin_rem, umax_rem);
+ dst_reg->u32_min_value = umin_rem;
+ dst_reg->u32_max_value = umax_rem;
+ } else {
+ dst_reg->var_off = tnum_range(0, val - 1);
+ dst_reg->u32_min_value = 0;
+ dst_reg->u32_max_value = val - 1;
+ }
+
+ /* cross the sign boundary */
+ if ((s32)dst_reg->u32_min_value > (s32)dst_reg->u32_max_value) {
+ dst_reg->s32_min_value = S32_MIN;
+ dst_reg->s32_max_value = S32_MAX;
+ } else {
+ dst_reg->s32_min_value = (s32)dst_reg->u32_min_value;
+ dst_reg->s32_max_value = (s32)dst_reg->u32_max_value;
+ }
+
+ /* mark reg64 unbounded to deduce 64-bit bounds from var_off */
+ __mark_reg64_unbounded(dst_reg);
+}
+
+static void scalar_min_max_mod(struct bpf_reg_state *dst_reg,
+ struct bpf_reg_state *src_reg)
+{
+ u64 val = src_reg->var_off.value; /* src_reg is const */
+ u64 umax = dst_reg->umax_value;
+ u64 umin = dst_reg->umin_value;
+ u64 umax_rem, umin_rem;
+
+ /* dst_reg is untouched when mod64 zero */
+ if (!val)
+ return;
+
+ div64_u64_rem(umin, val, &umin_rem);
+ div64_u64_rem(umax, val, &umax_rem);
+
+ /* no winding */
+ if (umax - umin < val && umin_rem <= umax_rem) {
+ dst_reg->var_off = tnum_range(umin_rem, umax_rem);
+ dst_reg->umin_value = umin_rem;
+ dst_reg->umax_value = umax_rem;
+ } else {
+ dst_reg->var_off = tnum_range(0, val - 1);
+ dst_reg->umin_value = 0;
+ dst_reg->umax_value = val - 1;
+ }
+
+ /* cross the sign boundary */
+ if ((s64)dst_reg->umin_value > (s64)dst_reg->umax_value) {
+ dst_reg->smin_value = S64_MIN;
+ dst_reg->smax_value = S64_MAX;
+ } else {
+ dst_reg->smin_value = (s64)dst_reg->umin_value;
+ dst_reg->smax_value = (s64)dst_reg->umax_value;
+ }
+
+ /* mark reg32 unbounded to deduce 32-bit bounds from var_off */
+ __mark_reg32_unbounded(dst_reg);
+}
+
/* WARNING: This function does calculations on 64-bit values, but the actual
* execution may occur on 32-bit values. Therefore, things like bitshifts
* need extra checks in the 32-bit case.
@@ -11331,11 +11412,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
* and BPF_OR. This is possible because these ops have fairly easy to
* understand and calculate behavior in both 32-bit and 64-bit alu ops.
* See alu32 verifier tests for examples. The second class of
- * operations, BPF_LSH, BPF_RSH, and BPF_ARSH, however are not so easy
- * with regards to tracking sign/unsigned bounds because the bits may
- * cross subreg boundaries in the alu64 case. When this happens we mark
- * the reg unbounded in the subreg bound space and use the resulting
- * tnum to calculate an approximation of the sign/unsigned bounds.
+ * operations, BPF_LSH, BPF_RSH, BPF_ARSH and BPF_MOD, however are not
+ * so easy with regards to tracking sign/unsigned bounds because the
+ * bits may cross subreg boundaries in the alu64 case. When this happens
+ * we mark the reg unbounded in the subreg bound space and use the
+ * resulting tnum to calculate an approximation of the sign/unsigned
+ * bounds.
*/
switch (opcode) {
case BPF_ADD:
@@ -11407,6 +11489,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
else
scalar_min_max_arsh(dst_reg, &src_reg);
break;
+ case BPF_MOD:
+ if (alu32)
+ scalar32_min_max_mod(dst_reg, &src_reg);
+ else
+ scalar_min_max_mod(dst_reg, &src_reg);
+ break;
default:
mark_reg_unknown(env, regs, insn->dst_reg);
break;
--
2.30.2


2023-03-05 14:31:57

by Xu Kuohai

[permalink] [raw]
Subject: [PATCH bpf-next 2/2] selftests/bpf: check if verifier tracks dst_reg bound for BPF_MOD

From: Xu Kuohai <[email protected]>

Test cases to check if verifier tracks dst_reg bound for BPF_MOD.

Signed-off-by: Xu Kuohai <[email protected]>
---
tools/testing/selftests/bpf/verifier/mod.c | 247 +++++++++++++++++++++
1 file changed, 247 insertions(+)
create mode 100644 tools/testing/selftests/bpf/verifier/mod.c

diff --git a/tools/testing/selftests/bpf/verifier/mod.c b/tools/testing/selftests/bpf/verifier/mod.c
new file mode 100644
index 000000000000..717e4cd08e59
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/mod.c
@@ -0,0 +1,247 @@
+{
+ "mod64 positive imm",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_MOD, BPF_REG_0, 1),
+
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_0, -2),
+
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "mod64 positive reg",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_0, -2),
+
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "mod64 zero",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_0, -2),
+
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "mod64 negative 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_1, -1),
+ BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_0, -2),
+
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .retval = 1,
+},
+{
+ "mod64 negative 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, -4),
+ BPF_MOV32_IMM(BPF_REG_1, 5),
+ BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_0, -2),
+
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .retval = 2,
+},
+{
+ "mod64 negative 3",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, -4),
+ BPF_MOV32_IMM(BPF_REG_1, -5),
+ BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_0, -2),
+
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .retval = 1,
+},
+{
+ "mod64 variable dividend cross signed boundary",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 11),
+
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0),
+ BPF_LD_IMM64(BPF_REG_0, 0x7fffffffffffff10),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LD_IMM64(BPF_REG_0, 0x80000000000000ff),
+ BPF_ALU64_REG(BPF_MOD, BPF_REG_1, BPF_REG_0),
+
+ BPF_LD_IMM64(BPF_REG_0, 0x8000000000000000),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_JMP_REG(BPF_JSLT, BPF_REG_0, BPF_REG_1, -2),
+
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "BPF program is too large.",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "mod32 positive imm",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, 1),
+
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_0, -2),
+
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "mod32 positive reg",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_MOV32_IMM(BPF_REG_1, 1),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_0, -2),
+
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "mod32 zero",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_0, -2),
+
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "mod32 negative 1",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_MOV32_IMM(BPF_REG_1, -1),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_0, -2),
+
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .retval = 1,
+},
+{
+ "mod32 negative 2",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, -4),
+ BPF_MOV32_IMM(BPF_REG_1, 5),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_0, -2),
+
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .retval = 2,
+},
+{
+ "mod32 negative 3",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, -4),
+ BPF_MOV32_IMM(BPF_REG_1, -5),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_0, -2),
+
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .retval = 1,
+},
+{
+ "mod32 variable dividend cross signed boundary",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 7),
+
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0x7fffff10),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 0x800000ff),
+
+ BPF_MOV32_IMM(BPF_REG_0, 0x80000000),
+ BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_JMP32_REG(BPF_JSLT, BPF_REG_0, BPF_REG_1, -2),
+
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "BPF program is too large.",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
--
2.30.2


2023-03-05 16:49:44

by kernel test robot

[permalink] [raw]
Subject: Re: [PATCH bpf-next 1/2] bpf: add bound tracking for BPF_MOD

Hi Xu,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on bpf-next/master]

url: https://github.com/intel-lab-lkp/linux/commits/Xu-Kuohai/bpf-add-bound-tracking-for-BPF_MOD/20230305-223257
base: https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git master
patch link: https://lore.kernel.org/r/20230306033119.2634976-2-xukuohai%40huaweicloud.com
patch subject: [PATCH bpf-next 1/2] bpf: add bound tracking for BPF_MOD
config: arm-randconfig-r025-20230305 (https://download.01.org/0day-ci/archive/20230306/[email protected]/config)
compiler: clang version 17.0.0 (https://github.com/llvm/llvm-project 67409911353323ca5edf2049ef0df54132fa1ca7)
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# install arm cross compiling tool for clang build
# apt-get install binutils-arm-linux-gnueabi
# https://github.com/intel-lab-lkp/linux/commit/e66c7bbd32e375af92c776a2b9f51be4c515ad71
git remote add linux-review https://github.com/intel-lab-lkp/linux
git fetch --no-tags linux-review Xu-Kuohai/bpf-add-bound-tracking-for-BPF_MOD/20230305-223257
git checkout e66c7bbd32e375af92c776a2b9f51be4c515ad71
# save the config file
mkdir build_dir && cp config build_dir/.config
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 O=build_dir ARCH=arm olddefconfig
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 O=build_dir ARCH=arm SHELL=/bin/bash kernel/bpf/

If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <[email protected]>
| Link: https://lore.kernel.org/oe-kbuild-all/[email protected]/

All warnings (new ones prefixed by >>):

kernel/bpf/verifier.c:10298:24: warning: array index 16 is past the end of the array (that has type 'u32[16]' (aka 'unsigned int[16]')) [-Warray-bounds]
meta.func_id == special_kfunc_list[KF_bpf_dynptr_slice_rdwr]) {
^ ~~~~~~~~~~~~~~~~~~~~~~~~
kernel/bpf/verifier.c:9150:1: note: array 'special_kfunc_list' declared here
BTF_ID_LIST(special_kfunc_list)
^
include/linux/btf_ids.h:207:27: note: expanded from macro 'BTF_ID_LIST'
#define BTF_ID_LIST(name) static u32 __maybe_unused name[16];
^
>> kernel/bpf/verifier.c:11622:13: warning: comparison of distinct pointer types ('typeof ((umax)) *' (aka 'unsigned int *') and 'uint64_t *' (aka 'unsigned long long *')) [-Wcompare-distinct-pointer-types]
umax_rem = do_div(umax, val);
^~~~~~~~~~~~~~~~~
include/asm-generic/div64.h:222:28: note: expanded from macro 'do_div'
(void)(((typeof((n)) *)0) == ((uint64_t *)0)); \
~~~~~~~~~~~~~~~~~~ ^ ~~~~~~~~~~~~~~~
kernel/bpf/verifier.c:11622:13: error: incompatible pointer types passing 'u32 *' (aka 'unsigned int *') to parameter of type 'uint64_t *' (aka 'unsigned long long *') [-Werror,-Wincompatible-pointer-types]
umax_rem = do_div(umax, val);
^~~~~~~~~~~~~~~~~
include/asm-generic/div64.h:238:22: note: expanded from macro 'do_div'
__rem = __div64_32(&(n), __base); \
^~~~
arch/arm/include/asm/div64.h:24:45: note: passing argument to parameter 'n' here
static inline uint32_t __div64_32(uint64_t *n, uint32_t base)
^
>> kernel/bpf/verifier.c:11623:13: warning: comparison of distinct pointer types ('typeof ((umin)) *' (aka 'unsigned int *') and 'uint64_t *' (aka 'unsigned long long *')) [-Wcompare-distinct-pointer-types]
umin_rem = do_div(umin, val);
^~~~~~~~~~~~~~~~~
include/asm-generic/div64.h:222:28: note: expanded from macro 'do_div'
(void)(((typeof((n)) *)0) == ((uint64_t *)0)); \
~~~~~~~~~~~~~~~~~~ ^ ~~~~~~~~~~~~~~~
kernel/bpf/verifier.c:11623:13: error: incompatible pointer types passing 'u32 *' (aka 'unsigned int *') to parameter of type 'uint64_t *' (aka 'unsigned long long *') [-Werror,-Wincompatible-pointer-types]
umin_rem = do_div(umin, val);
^~~~~~~~~~~~~~~~~
include/asm-generic/div64.h:238:22: note: expanded from macro 'do_div'
__rem = __div64_32(&(n), __base); \
^~~~
arch/arm/include/asm/div64.h:24:45: note: passing argument to parameter 'n' here
static inline uint32_t __div64_32(uint64_t *n, uint32_t base)
^
>> kernel/bpf/verifier.c:11622:13: warning: shift count >= width of type [-Wshift-count-overflow]
umax_rem = do_div(umax, val);
^~~~~~~~~~~~~~~~~
include/asm-generic/div64.h:234:25: note: expanded from macro 'do_div'
} else if (likely(((n) >> 32) == 0)) { \
^ ~~
include/linux/compiler.h:77:40: note: expanded from macro 'likely'
# define likely(x) __builtin_expect(!!(x), 1)
^
kernel/bpf/verifier.c:11623:13: warning: shift count >= width of type [-Wshift-count-overflow]
umin_rem = do_div(umin, val);
^~~~~~~~~~~~~~~~~
include/asm-generic/div64.h:234:25: note: expanded from macro 'do_div'
} else if (likely(((n) >> 32) == 0)) { \
^ ~~
include/linux/compiler.h:77:40: note: expanded from macro 'likely'
# define likely(x) __builtin_expect(!!(x), 1)
^
5 warnings and 2 errors generated.


vim +11622 kernel/bpf/verifier.c

11606
11607 static void scalar32_min_max_mod(struct bpf_reg_state *dst_reg,
11608 struct bpf_reg_state *src_reg)
11609 {
11610 u32 val = (u32)src_reg->var_off.value; /* src_reg is const */
11611 u32 umax = dst_reg->u32_max_value;
11612 u32 umin = dst_reg->u32_min_value;
11613 u32 umax_rem, umin_rem;
11614
11615 /* dst_reg is 32-bit truncated when mod32 zero, since
11616 * adjust_scalar_min_max_vals calls zext_32_to_64 to do truncation for
11617 * all alu32 ops, here we do nothing and just return.
11618 */
11619 if (!val)
11620 return;
11621
11622 umax_rem = do_div(umax, val);
11623 umin_rem = do_div(umin, val);
11624
11625 /* no winding */
11626 if (umax - umin < val && umin_rem <= umax_rem) {
11627 dst_reg->var_off = tnum_range(umin_rem, umax_rem);
11628 dst_reg->u32_min_value = umin_rem;
11629 dst_reg->u32_max_value = umax_rem;
11630 } else {
11631 dst_reg->var_off = tnum_range(0, val - 1);
11632 dst_reg->u32_min_value = 0;
11633 dst_reg->u32_max_value = val - 1;
11634 }
11635
11636 /* cross the sign boundary */
11637 if ((s32)dst_reg->u32_min_value > (s32)dst_reg->u32_max_value) {
11638 dst_reg->s32_min_value = S32_MIN;
11639 dst_reg->s32_max_value = S32_MAX;
11640 } else {
11641 dst_reg->s32_min_value = (s32)dst_reg->u32_min_value;
11642 dst_reg->s32_max_value = (s32)dst_reg->u32_max_value;
11643 }
11644
11645 /* mark reg64 unbounded to deduce 64-bit bounds from var_off */
11646 __mark_reg64_unbounded(dst_reg);
11647 }
11648

--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests

2023-03-05 18:00:49

by kernel test robot

[permalink] [raw]
Subject: Re: [PATCH bpf-next 1/2] bpf: add bound tracking for BPF_MOD

Hi Xu,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on bpf-next/master]

url: https://github.com/intel-lab-lkp/linux/commits/Xu-Kuohai/bpf-add-bound-tracking-for-BPF_MOD/20230305-223257
base: https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git master
patch link: https://lore.kernel.org/r/20230306033119.2634976-2-xukuohai%40huaweicloud.com
patch subject: [PATCH bpf-next 1/2] bpf: add bound tracking for BPF_MOD
config: arm-randconfig-r025-20230305 (https://download.01.org/0day-ci/archive/20230306/[email protected]/config)
compiler: clang version 17.0.0 (https://github.com/llvm/llvm-project 67409911353323ca5edf2049ef0df54132fa1ca7)
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# install arm cross compiling tool for clang build
# apt-get install binutils-arm-linux-gnueabi
# https://github.com/intel-lab-lkp/linux/commit/e66c7bbd32e375af92c776a2b9f51be4c515ad71
git remote add linux-review https://github.com/intel-lab-lkp/linux
git fetch --no-tags linux-review Xu-Kuohai/bpf-add-bound-tracking-for-BPF_MOD/20230305-223257
git checkout e66c7bbd32e375af92c776a2b9f51be4c515ad71
# save the config file
mkdir build_dir && cp config build_dir/.config
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 O=build_dir ARCH=arm olddefconfig
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 O=build_dir ARCH=arm SHELL=/bin/bash kernel/

If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <[email protected]>
| Link: https://lore.kernel.org/oe-kbuild-all/[email protected]/

All errors (new ones prefixed by >>):

kernel/bpf/verifier.c:10298:24: warning: array index 16 is past the end of the array (that has type 'u32[16]' (aka 'unsigned int[16]')) [-Warray-bounds]
meta.func_id == special_kfunc_list[KF_bpf_dynptr_slice_rdwr]) {
^ ~~~~~~~~~~~~~~~~~~~~~~~~
kernel/bpf/verifier.c:9150:1: note: array 'special_kfunc_list' declared here
BTF_ID_LIST(special_kfunc_list)
^
include/linux/btf_ids.h:207:27: note: expanded from macro 'BTF_ID_LIST'
#define BTF_ID_LIST(name) static u32 __maybe_unused name[16];
^
kernel/bpf/verifier.c:11622:13: warning: comparison of distinct pointer types ('typeof ((umax)) *' (aka 'unsigned int *') and 'uint64_t *' (aka 'unsigned long long *')) [-Wcompare-distinct-pointer-types]
umax_rem = do_div(umax, val);
^~~~~~~~~~~~~~~~~
include/asm-generic/div64.h:222:28: note: expanded from macro 'do_div'
(void)(((typeof((n)) *)0) == ((uint64_t *)0)); \
~~~~~~~~~~~~~~~~~~ ^ ~~~~~~~~~~~~~~~
>> kernel/bpf/verifier.c:11622:13: error: incompatible pointer types passing 'u32 *' (aka 'unsigned int *') to parameter of type 'uint64_t *' (aka 'unsigned long long *') [-Werror,-Wincompatible-pointer-types]
umax_rem = do_div(umax, val);
^~~~~~~~~~~~~~~~~
include/asm-generic/div64.h:238:22: note: expanded from macro 'do_div'
__rem = __div64_32(&(n), __base); \
^~~~
arch/arm/include/asm/div64.h:24:45: note: passing argument to parameter 'n' here
static inline uint32_t __div64_32(uint64_t *n, uint32_t base)
^
kernel/bpf/verifier.c:11623:13: warning: comparison of distinct pointer types ('typeof ((umin)) *' (aka 'unsigned int *') and 'uint64_t *' (aka 'unsigned long long *')) [-Wcompare-distinct-pointer-types]
umin_rem = do_div(umin, val);
^~~~~~~~~~~~~~~~~
include/asm-generic/div64.h:222:28: note: expanded from macro 'do_div'
(void)(((typeof((n)) *)0) == ((uint64_t *)0)); \
~~~~~~~~~~~~~~~~~~ ^ ~~~~~~~~~~~~~~~
kernel/bpf/verifier.c:11623:13: error: incompatible pointer types passing 'u32 *' (aka 'unsigned int *') to parameter of type 'uint64_t *' (aka 'unsigned long long *') [-Werror,-Wincompatible-pointer-types]
umin_rem = do_div(umin, val);
^~~~~~~~~~~~~~~~~
include/asm-generic/div64.h:238:22: note: expanded from macro 'do_div'
__rem = __div64_32(&(n), __base); \
^~~~
arch/arm/include/asm/div64.h:24:45: note: passing argument to parameter 'n' here
static inline uint32_t __div64_32(uint64_t *n, uint32_t base)
^
kernel/bpf/verifier.c:11622:13: warning: shift count >= width of type [-Wshift-count-overflow]
umax_rem = do_div(umax, val);
^~~~~~~~~~~~~~~~~
include/asm-generic/div64.h:234:25: note: expanded from macro 'do_div'
} else if (likely(((n) >> 32) == 0)) { \
^ ~~
include/linux/compiler.h:77:40: note: expanded from macro 'likely'
# define likely(x) __builtin_expect(!!(x), 1)
^
kernel/bpf/verifier.c:11623:13: warning: shift count >= width of type [-Wshift-count-overflow]
umin_rem = do_div(umin, val);
^~~~~~~~~~~~~~~~~
include/asm-generic/div64.h:234:25: note: expanded from macro 'do_div'
} else if (likely(((n) >> 32) == 0)) { \
^ ~~
include/linux/compiler.h:77:40: note: expanded from macro 'likely'
# define likely(x) __builtin_expect(!!(x), 1)
^
5 warnings and 2 errors generated.


vim +11622 kernel/bpf/verifier.c

11606
11607 static void scalar32_min_max_mod(struct bpf_reg_state *dst_reg,
11608 struct bpf_reg_state *src_reg)
11609 {
11610 u32 val = (u32)src_reg->var_off.value; /* src_reg is const */
11611 u32 umax = dst_reg->u32_max_value;
11612 u32 umin = dst_reg->u32_min_value;
11613 u32 umax_rem, umin_rem;
11614
11615 /* dst_reg is 32-bit truncated when mod32 zero, since
11616 * adjust_scalar_min_max_vals calls zext_32_to_64 to do truncation for
11617 * all alu32 ops, here we do nothing and just return.
11618 */
11619 if (!val)
11620 return;
11621
11622 umax_rem = do_div(umax, val);
11623 umin_rem = do_div(umin, val);
11624
11625 /* no winding */
11626 if (umax - umin < val && umin_rem <= umax_rem) {
11627 dst_reg->var_off = tnum_range(umin_rem, umax_rem);
11628 dst_reg->u32_min_value = umin_rem;
11629 dst_reg->u32_max_value = umax_rem;
11630 } else {
11631 dst_reg->var_off = tnum_range(0, val - 1);
11632 dst_reg->u32_min_value = 0;
11633 dst_reg->u32_max_value = val - 1;
11634 }
11635
11636 /* cross the sign boundary */
11637 if ((s32)dst_reg->u32_min_value > (s32)dst_reg->u32_max_value) {
11638 dst_reg->s32_min_value = S32_MIN;
11639 dst_reg->s32_max_value = S32_MAX;
11640 } else {
11641 dst_reg->s32_min_value = (s32)dst_reg->u32_min_value;
11642 dst_reg->s32_max_value = (s32)dst_reg->u32_max_value;
11643 }
11644
11645 /* mark reg64 unbounded to deduce 64-bit bounds from var_off */
11646 __mark_reg64_unbounded(dst_reg);
11647 }
11648

--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests