2022-12-11 22:23:50

by David Keisar Schm

[permalink] [raw]
Subject: [PATCH 2/5] Replace invocation of weak PRNG in kernel/bpf/core.c

From: David <[email protected]>

We changed the invocation of
prandom_u32_state to get_random_u32.
We deleted the maintained state,
which was a CPU-variable,
since get_random_u32 maintains its own CPU-variable.
We also deleted the state initializer,
since it is not needed anymore.

Signed-off-by: David <[email protected]>
---
include/linux/bpf.h | 1 -
kernel/bpf/core.c | 13 +------------
kernel/bpf/verifier.c | 2 --
net/core/filter.c | 1 -
4 files changed, 1 insertion(+), 16 deletions(-)

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index c1bd1bd10..0689520b9 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -2572,7 +2572,6 @@ const struct bpf_func_proto *tracing_prog_func_proto(
enum bpf_func_id func_id, const struct bpf_prog *prog);

/* Shared helpers among cBPF and eBPF. */
-void bpf_user_rnd_init_once(void);
u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);

diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 4cb5421d9..a6f06894e 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -2579,13 +2579,6 @@ void bpf_prog_free(struct bpf_prog *fp)
}
EXPORT_SYMBOL_GPL(bpf_prog_free);

-/* RNG for unpriviledged user space with separated state from prandom_u32(). */
-static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
-
-void bpf_user_rnd_init_once(void)
-{
- prandom_init_once(&bpf_user_rnd_state);
-}

BPF_CALL_0(bpf_user_rnd_u32)
{
@@ -2595,12 +2588,8 @@ BPF_CALL_0(bpf_user_rnd_u32)
* transformations. Register assignments from both sides are
* different, f.e. classic always sets fn(ctx, A, X) here.
*/
- struct rnd_state *state;
u32 res;
-
- state = &get_cpu_var(bpf_user_rnd_state);
- res = predictable_rng_prandom_u32_state(state);
- put_cpu_var(bpf_user_rnd_state);
+ res = get_random_u32();

return res;
}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 264b3dc71..9f22fb3fa 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -14049,8 +14049,6 @@ static int do_misc_fixups(struct bpf_verifier_env *env)

if (insn->imm == BPF_FUNC_get_route_realm)
prog->dst_needed = 1;
- if (insn->imm == BPF_FUNC_get_prandom_u32)
- bpf_user_rnd_init_once();
if (insn->imm == BPF_FUNC_override_return)
prog->kprobe_override = 1;
if (insn->imm == BPF_FUNC_tail_call) {
diff --git a/net/core/filter.c b/net/core/filter.c
index bb0136e7a..7a595ac00 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -443,7 +443,6 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
break;
case SKF_AD_OFF + SKF_AD_RANDOM:
*insn = BPF_EMIT_CALL(bpf_user_rnd_u32);
- bpf_user_rnd_init_once();
break;
}
break;
--
2.38.0


2022-12-12 18:31:20

by Yonghong Song

[permalink] [raw]
Subject: Re: [PATCH 2/5] Replace invocation of weak PRNG in kernel/bpf/core.c



On 12/11/22 2:16 PM, [email protected] wrote:
> From: David <[email protected]>
>
> We changed the invocation of
> prandom_u32_state to get_random_u32.
> We deleted the maintained state,
> which was a CPU-variable,
> since get_random_u32 maintains its own CPU-variable.
> We also deleted the state initializer,
> since it is not needed anymore.
>
> Signed-off-by: David <[email protected]>
> ---
> include/linux/bpf.h | 1 -
> kernel/bpf/core.c | 13 +------------
> kernel/bpf/verifier.c | 2 --
> net/core/filter.c | 1 -
> 4 files changed, 1 insertion(+), 16 deletions(-)
>
> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> index c1bd1bd10..0689520b9 100644
> --- a/include/linux/bpf.h
> +++ b/include/linux/bpf.h
> @@ -2572,7 +2572,6 @@ const struct bpf_func_proto *tracing_prog_func_proto(
> enum bpf_func_id func_id, const struct bpf_prog *prog);
>
> /* Shared helpers among cBPF and eBPF. */
> -void bpf_user_rnd_init_once(void);
> u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
> u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
>
> diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
> index 4cb5421d9..a6f06894e 100644
> --- a/kernel/bpf/core.c
> +++ b/kernel/bpf/core.c
> @@ -2579,13 +2579,6 @@ void bpf_prog_free(struct bpf_prog *fp)
> }
> EXPORT_SYMBOL_GPL(bpf_prog_free);
>
> -/* RNG for unpriviledged user space with separated state from prandom_u32(). */
> -static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
> -
> -void bpf_user_rnd_init_once(void)
> -{
> - prandom_init_once(&bpf_user_rnd_state);
> -}
>
> BPF_CALL_0(bpf_user_rnd_u32)
> {
> @@ -2595,12 +2588,8 @@ BPF_CALL_0(bpf_user_rnd_u32)
> * transformations. Register assignments from both sides are
> * different, f.e. classic always sets fn(ctx, A, X) here.
> */
> - struct rnd_state *state;
> u32 res;
> -
> - state = &get_cpu_var(bpf_user_rnd_state);
> - res = predictable_rng_prandom_u32_state(state);
> - put_cpu_var(bpf_user_rnd_state);
> + res = get_random_u32();
>
> return res;
> }

Please see the discussion here.
https://lore.kernel.org/bpf/[email protected]/
There is a performance concern with the above change.

> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> index 264b3dc71..9f22fb3fa 100644
> --- a/kernel/bpf/verifier.c
> +++ b/kernel/bpf/verifier.c
> @@ -14049,8 +14049,6 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
>
> if (insn->imm == BPF_FUNC_get_route_realm)
> prog->dst_needed = 1;
> - if (insn->imm == BPF_FUNC_get_prandom_u32)
> - bpf_user_rnd_init_once();
> if (insn->imm == BPF_FUNC_override_return)
> prog->kprobe_override = 1;
> if (insn->imm == BPF_FUNC_tail_call) {
> diff --git a/net/core/filter.c b/net/core/filter.c
> index bb0136e7a..7a595ac00 100644
> --- a/net/core/filter.c
> +++ b/net/core/filter.c
> @@ -443,7 +443,6 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
> break;
> case SKF_AD_OFF + SKF_AD_RANDOM:
> *insn = BPF_EMIT_CALL(bpf_user_rnd_u32);
> - bpf_user_rnd_init_once();
> break;
> }
> break;

2022-12-12 22:48:56

by Jason A. Donenfeld

[permalink] [raw]
Subject: Re: [PATCH 2/5] Replace invocation of weak PRNG in kernel/bpf/core.c

On Tue, Dec 13, 2022 at 12:35:24AM +0200, Amit Klein wrote:
> On Mon, Dec 12, 2022 at 8:03 PM Yonghong Song <[email protected]> wrote:
> >
> >
> >
> > On 12/11/22 2:16 PM, [email protected] wrote:
> > > From: David <[email protected]>
> > >
> > > We changed the invocation of
> > > prandom_u32_state to get_random_u32.
> > > We deleted the maintained state,
> > > which was a CPU-variable,
> > > since get_random_u32 maintains its own CPU-variable.
> > > We also deleted the state initializer,
> > > since it is not needed anymore.
> > >
> > > Signed-off-by: David <[email protected]>
> > > ---
> > > include/linux/bpf.h | 1 -
> > > kernel/bpf/core.c | 13 +------------
> > > kernel/bpf/verifier.c | 2 --
> > > net/core/filter.c | 1 -
> > > 4 files changed, 1 insertion(+), 16 deletions(-)
> > >
> > > diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> [...]
> > Please see the discussion here.
> > https://lore.kernel.org/bpf/[email protected]/
> > There is a performance concern with the above change.
> >
>
> I see. How about using (in this instance only!) the SipHash-based
> solution which was the basis for prandom_u32() starting with commit
> c51f8f88d705 (v5.10-rc1) up until commit d4150779e60f (v5.19-rc1)?

Stop with this pseudo cryptographic garbage. Stop pushing this
everywhere. It was a hassle to undo this crap the first time around. The
last thing we need is to add it back.

Plus, there's no need for it either. I'll revisit the bpf patch if/when
it makes sense to do performance-wise.

Jason

2022-12-12 23:11:13

by Amit Klein

[permalink] [raw]
Subject: Re: [PATCH 2/5] Replace invocation of weak PRNG in kernel/bpf/core.c

On Mon, Dec 12, 2022 at 8:03 PM Yonghong Song <[email protected]> wrote:
>
>
>
> On 12/11/22 2:16 PM, [email protected] wrote:
> > From: David <[email protected]>
> >
> > We changed the invocation of
> > prandom_u32_state to get_random_u32.
> > We deleted the maintained state,
> > which was a CPU-variable,
> > since get_random_u32 maintains its own CPU-variable.
> > We also deleted the state initializer,
> > since it is not needed anymore.
> >
> > Signed-off-by: David <[email protected]>
> > ---
> > include/linux/bpf.h | 1 -
> > kernel/bpf/core.c | 13 +------------
> > kernel/bpf/verifier.c | 2 --
> > net/core/filter.c | 1 -
> > 4 files changed, 1 insertion(+), 16 deletions(-)
> >
> > diff --git a/include/linux/bpf.h b/include/linux/bpf.h
[...]
> Please see the discussion here.
> https://lore.kernel.org/bpf/[email protected]/
> There is a performance concern with the above change.
>

I see. How about using (in this instance only!) the SipHash-based
solution which was the basis for prandom_u32() starting with commit
c51f8f88d705 (v5.10-rc1) up until commit d4150779e60f (v5.19-rc1)?
Copying the relevant snippets (minus comments, for brevity) from
/lib/random32.c and /include/linux/prandom.h from that era (the 32
random bits are generated by prandom_u32() at the bottom; I didn't
bother with initialization, and I don't know if the per_cpu logic is
needed here, or perhaps some other kind of locking, if any):


#define PRND_SIPROUND(v0, v1, v2, v3) ( \
v0 += v1, v1 = rol32(v1, 5), v2 += v3, v3 = rol32(v3, 8), \
v1 ^= v0, v0 = rol32(v0, 16), v3 ^= v2, \
v0 += v3, v3 = rol32(v3, 7), v2 += v1, v1 = rol32(v1, 13), \
v3 ^= v0, v1 ^= v2, v2 = rol32(v2, 16) \

)

struct siprand_state {
unsigned long v0;
unsigned long v1;
unsigned long v2;
unsigned long v3;
};

static DEFINE_PER_CPU(struct siprand_state, net_rand_state)
__latent_entropy; // do we actually need this? -AK

static inline u32 siprand_u32(struct siprand_state *s)
{
unsigned long v0 = s->v0, v1 = s->v1, v2 = s->v2, v3 = s->v3;

PRND_SIPROUND(v0, v1, v2, v3);
PRND_SIPROUND(v0, v1, v2, v3);
s->v0 = v0; s->v1 = v1; s->v2 = v2; s->v3 = v3;
return v1 + v3;
}


u32 prandom_u32(void)
{
struct siprand_state *state = get_cpu_ptr(&net_rand_state);
u32 res = siprand_u32(state);

trace_prandom_u32(res);
put_cpu_ptr(&net_rand_state);
return res;
}
EXPORT_SYMBOL(prandom_u32);