2022-10-29 00:00:40

by Jason A. Donenfeld

[permalink] [raw]
Subject: [PATCH] random: remove early archrandom abstraction

The arch_get_random*_early() abstraction is not completely useful and
adds complexity, because it's not a given that there will be no calls to
arch_get_random*() between random_init_early(), which uses
arch_get_random*_early(), and init_cpu_features(). During that gap,
crng_reseed() might be called, which uses arch_get_random*(), since it's
mostly not init code.

Instead we can test whether we're in the early phase in
arch_get_random*() itself, and in doing so avoid all ambiguity about
where we are. Fortunately, the only architecture that currently
implements arch_get_random*_early() also has an alternatives-based cpu
feature system, one flag of which determines whether the other flags
have been initialized. This makes it possible to do the early check with
zero cost once the system is initialized.

Signed-off-by: Jason A. Donenfeld <[email protected]>
---
Catalin - Though this touches arm64's archrandom.h, I intend to take
this through the random.git tree, if that's okay. I have other patches
that will build off of this one. -Jason

arch/arm64/include/asm/archrandom.h | 57 ++++++-----------------------
drivers/char/random.c | 4 +-
include/linux/random.h | 20 ----------
3 files changed, 14 insertions(+), 67 deletions(-)

diff --git a/arch/arm64/include/asm/archrandom.h b/arch/arm64/include/asm/archrandom.h
index 109e2a4454be..8a059a9033af 100644
--- a/arch/arm64/include/asm/archrandom.h
+++ b/arch/arm64/include/asm/archrandom.h
@@ -58,6 +58,16 @@ static inline bool __arm64_rndrrs(unsigned long *v)
return ok;
}

+static __always_inline bool __cpu_has_rng(void)
+{
+ if (!system_capabilities_finalized()) {
+ /* Open code as we run prior to the first call to cpufeature. */
+ unsigned long ftr = read_sysreg_s(SYS_ID_AA64ISAR0_EL1);
+ return (ftr >> ID_AA64ISAR0_EL1_RNDR_SHIFT) & 0xf;
+ }
+ return cpus_have_const_cap(ARM64_HAS_RNG);
+}
+
static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs)
{
/*
@@ -66,7 +76,7 @@ static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t
* cpufeature code and with potential scheduling between CPUs
* with and without the feature.
*/
- if (max_longs && cpus_have_const_cap(ARM64_HAS_RNG) && __arm64_rndr(v))
+ if (max_longs && __cpu_has_rng() && __arm64_rndr(v))
return 1;
return 0;
}
@@ -108,53 +118,10 @@ static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, s
* reseeded after each invocation. This is not a 100% fit but good
* enough to implement this API if no other entropy source exists.
*/
- if (cpus_have_const_cap(ARM64_HAS_RNG) && __arm64_rndrrs(v))
+ if (__cpu_has_rng() && __arm64_rndrrs(v))
return 1;

return 0;
}

-static inline bool __init __early_cpu_has_rndr(void)
-{
- /* Open code as we run prior to the first call to cpufeature. */
- unsigned long ftr = read_sysreg_s(SYS_ID_AA64ISAR0_EL1);
- return (ftr >> ID_AA64ISAR0_EL1_RNDR_SHIFT) & 0xf;
-}
-
-static inline size_t __init __must_check
-arch_get_random_seed_longs_early(unsigned long *v, size_t max_longs)
-{
- WARN_ON(system_state != SYSTEM_BOOTING);
-
- if (!max_longs)
- return 0;
-
- if (smccc_trng_available) {
- struct arm_smccc_res res;
-
- max_longs = min_t(size_t, 3, max_longs);
- arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, max_longs * 64, &res);
- if ((int)res.a0 >= 0) {
- switch (max_longs) {
- case 3:
- *v++ = res.a1;
- fallthrough;
- case 2:
- *v++ = res.a2;
- fallthrough;
- case 1:
- *v++ = res.a3;
- break;
- }
- return max_longs;
- }
- }
-
- if (__early_cpu_has_rndr() && __arm64_rndr(v))
- return 1;
-
- return 0;
-}
-#define arch_get_random_seed_longs_early arch_get_random_seed_longs_early
-
#endif /* _ASM_ARCHRANDOM_H */
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 6f323344d0b9..e3cf4f51ed58 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -813,13 +813,13 @@ void __init random_init_early(const char *command_line)
#endif

for (i = 0, arch_bits = sizeof(entropy) * 8; i < ARRAY_SIZE(entropy);) {
- longs = arch_get_random_seed_longs_early(entropy, ARRAY_SIZE(entropy) - i);
+ longs = arch_get_random_seed_longs(entropy, ARRAY_SIZE(entropy) - i);
if (longs) {
_mix_pool_bytes(entropy, sizeof(*entropy) * longs);
i += longs;
continue;
}
- longs = arch_get_random_longs_early(entropy, ARRAY_SIZE(entropy) - i);
+ longs = arch_get_random_longs(entropy, ARRAY_SIZE(entropy) - i);
if (longs) {
_mix_pool_bytes(entropy, sizeof(*entropy) * longs);
i += longs;
diff --git a/include/linux/random.h b/include/linux/random.h
index 182780cafd45..2bdd3add3400 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -153,26 +153,6 @@ declare_get_random_var_wait(long, unsigned long)

#include <asm/archrandom.h>

-/*
- * Called from the boot CPU during startup; not valid to call once
- * secondary CPUs are up and preemption is possible.
- */
-#ifndef arch_get_random_seed_longs_early
-static inline size_t __init arch_get_random_seed_longs_early(unsigned long *v, size_t max_longs)
-{
- WARN_ON(system_state != SYSTEM_BOOTING);
- return arch_get_random_seed_longs(v, max_longs);
-}
-#endif
-
-#ifndef arch_get_random_longs_early
-static inline bool __init arch_get_random_longs_early(unsigned long *v, size_t max_longs)
-{
- WARN_ON(system_state != SYSTEM_BOOTING);
- return arch_get_random_longs(v, max_longs);
-}
-#endif
-
#ifdef CONFIG_SMP
int random_prepare_cpu(unsigned int cpu);
int random_online_cpu(unsigned int cpu);
--
2.38.1



2022-10-30 17:53:28

by Catalin Marinas

[permalink] [raw]
Subject: Re: [PATCH] random: remove early archrandom abstraction

On Sat, Oct 29, 2022 at 01:40:25AM +0200, Jason A. Donenfeld wrote:
> diff --git a/arch/arm64/include/asm/archrandom.h b/arch/arm64/include/asm/archrandom.h
> index 109e2a4454be..8a059a9033af 100644
> --- a/arch/arm64/include/asm/archrandom.h
> +++ b/arch/arm64/include/asm/archrandom.h
> @@ -58,6 +58,16 @@ static inline bool __arm64_rndrrs(unsigned long *v)
> return ok;
> }
>
> +static __always_inline bool __cpu_has_rng(void)
> +{
> + if (!system_capabilities_finalized()) {
> + /* Open code as we run prior to the first call to cpufeature. */
> + unsigned long ftr = read_sysreg_s(SYS_ID_AA64ISAR0_EL1);
> + return (ftr >> ID_AA64ISAR0_EL1_RNDR_SHIFT) & 0xf;
> + }
> + return cpus_have_const_cap(ARM64_HAS_RNG);
> +}

We need to be careful with this check as it is only valid on the CPU it
was called on. Is the result used only on this CPU and with the
preemption disabled? We have big.LITTLE systems where CPUs may differ
and the ARM64_HAS_RNG feature may not be enabled once all the CPUs have
been initialised (capabilities finalised).

We could make this capability an ARM64_CPUCAP_BOOT_CPU_FEATURE, though
I'd have to check whether any systems in the wild have such mixed CPUs.

--
Catalin

2022-10-30 21:31:01

by Jason A. Donenfeld

[permalink] [raw]
Subject: Re: [PATCH] random: remove early archrandom abstraction

Hi Catalin,

> > +static __always_inline bool __cpu_has_rng(void)
> > +{
> > + if (!system_capabilities_finalized()) {
> > + /* Open code as we run prior to the first call to cpufeature. */
> > + unsigned long ftr = read_sysreg_s(SYS_ID_AA64ISAR0_EL1);
> > + return (ftr >> ID_AA64ISAR0_EL1_RNDR_SHIFT) & 0xf;
> > + }
> > + return cpus_have_const_cap(ARM64_HAS_RNG);
> > +}
>
> We need to be careful with this check as it is only valid on the CPU it
> was called on. Is the result used only on this CPU and with the
> preemption disabled? We have big.LITTLE systems where CPUs may differ
> and the ARM64_HAS_RNG feature may not be enabled once all the CPUs have
> been initialised (capabilities finalised).
>
> We could make this capability an ARM64_CPUCAP_BOOT_CPU_FEATURE, though
> I'd have to check whether any systems in the wild have such mixed CPUs.

This occurred to me too and I wasn't quite sure how the interaction
worked out. It sounds like system_capabilities_finalized() might still
be false when SMP brings up other cores? In that case, I guess we just
have to make sure the system is still booting / in single CPU mode,
before interrupts have been enabled. This should be straight forward to
do; I'll send a v2.

Jason

2022-10-30 21:31:21

by Jason A. Donenfeld

[permalink] [raw]
Subject: [PATCH v2] random: remove early archrandom abstraction

The arch_get_random*_early() abstraction is not completely useful and
adds complexity, because it's not a given that there will be no calls to
arch_get_random*() between random_init_early(), which uses
arch_get_random*_early(), and init_cpu_features(). During that gap,
crng_reseed() might be called, which uses arch_get_random*(), since it's
mostly not init code.

Instead we can test whether we're in the early phase in
arch_get_random*() itself, and in doing so avoid all ambiguity about
where we are. Fortunately, the only architecture that currently
implements arch_get_random*_early() also has an alternatives-based cpu
feature system, one flag of which determines whether the other flags
have been initialized. This makes it possible to do the early check with
zero cost once the system is initialized.

Cc: Catalin Marinas <[email protected]>
Cc: Will Deacon <[email protected]>
Cc: Ard Biesheuvel <[email protected]>
Cc: Jean-Philippe Brucker <[email protected]>
Signed-off-by: Jason A. Donenfeld <[email protected]>
---
Changes v1->v2:
- Also check early_boot_irqs_disabled, to make sure that the raw
capability check only runs during an early stage when we're only
running on the boot CPU and with IRQs off. This check disappears once
the system is up, because system_capabilities_finalized() is a static
branch.

Catalin - Though this touches arm64's archrandom.h, I intend to take
this through the random.git tree, if that's okay. I have other patches
that will build off of this one. -Jason

arch/arm64/include/asm/archrandom.h | 57 ++++++-----------------------
drivers/char/random.c | 4 +-
include/linux/random.h | 20 ----------
3 files changed, 14 insertions(+), 67 deletions(-)

diff --git a/arch/arm64/include/asm/archrandom.h b/arch/arm64/include/asm/archrandom.h
index 109e2a4454be..4a68621078ab 100644
--- a/arch/arm64/include/asm/archrandom.h
+++ b/arch/arm64/include/asm/archrandom.h
@@ -58,6 +58,16 @@ static inline bool __arm64_rndrrs(unsigned long *v)
return ok;
}

+static __always_inline bool __cpu_has_rng(void)
+{
+ if (!system_capabilities_finalized() && early_boot_irqs_disabled) {
+ /* Open code as we run prior to the first call to cpufeature. */
+ unsigned long ftr = read_sysreg_s(SYS_ID_AA64ISAR0_EL1);
+ return (ftr >> ID_AA64ISAR0_EL1_RNDR_SHIFT) & 0xf;
+ }
+ return cpus_have_const_cap(ARM64_HAS_RNG);
+}
+
static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs)
{
/*
@@ -66,7 +76,7 @@ static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t
* cpufeature code and with potential scheduling between CPUs
* with and without the feature.
*/
- if (max_longs && cpus_have_const_cap(ARM64_HAS_RNG) && __arm64_rndr(v))
+ if (max_longs && __cpu_has_rng() && __arm64_rndr(v))
return 1;
return 0;
}
@@ -108,53 +118,10 @@ static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, s
* reseeded after each invocation. This is not a 100% fit but good
* enough to implement this API if no other entropy source exists.
*/
- if (cpus_have_const_cap(ARM64_HAS_RNG) && __arm64_rndrrs(v))
- return 1;
-
- return 0;
-}
-
-static inline bool __init __early_cpu_has_rndr(void)
-{
- /* Open code as we run prior to the first call to cpufeature. */
- unsigned long ftr = read_sysreg_s(SYS_ID_AA64ISAR0_EL1);
- return (ftr >> ID_AA64ISAR0_EL1_RNDR_SHIFT) & 0xf;
-}
-
-static inline size_t __init __must_check
-arch_get_random_seed_longs_early(unsigned long *v, size_t max_longs)
-{
- WARN_ON(system_state != SYSTEM_BOOTING);
-
- if (!max_longs)
- return 0;
-
- if (smccc_trng_available) {
- struct arm_smccc_res res;
-
- max_longs = min_t(size_t, 3, max_longs);
- arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, max_longs * 64, &res);
- if ((int)res.a0 >= 0) {
- switch (max_longs) {
- case 3:
- *v++ = res.a1;
- fallthrough;
- case 2:
- *v++ = res.a2;
- fallthrough;
- case 1:
- *v++ = res.a3;
- break;
- }
- return max_longs;
- }
- }
-
- if (__early_cpu_has_rndr() && __arm64_rndr(v))
+ if (__cpu_has_rng() && __arm64_rndrrs(v))
return 1;

return 0;
}
-#define arch_get_random_seed_longs_early arch_get_random_seed_longs_early

#endif /* _ASM_ARCHRANDOM_H */
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 6f323344d0b9..e3cf4f51ed58 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -813,13 +813,13 @@ void __init random_init_early(const char *command_line)
#endif

for (i = 0, arch_bits = sizeof(entropy) * 8; i < ARRAY_SIZE(entropy);) {
- longs = arch_get_random_seed_longs_early(entropy, ARRAY_SIZE(entropy) - i);
+ longs = arch_get_random_seed_longs(entropy, ARRAY_SIZE(entropy) - i);
if (longs) {
_mix_pool_bytes(entropy, sizeof(*entropy) * longs);
i += longs;
continue;
}
- longs = arch_get_random_longs_early(entropy, ARRAY_SIZE(entropy) - i);
+ longs = arch_get_random_longs(entropy, ARRAY_SIZE(entropy) - i);
if (longs) {
_mix_pool_bytes(entropy, sizeof(*entropy) * longs);
i += longs;
diff --git a/include/linux/random.h b/include/linux/random.h
index 182780cafd45..2bdd3add3400 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -153,26 +153,6 @@ declare_get_random_var_wait(long, unsigned long)

#include <asm/archrandom.h>

-/*
- * Called from the boot CPU during startup; not valid to call once
- * secondary CPUs are up and preemption is possible.
- */
-#ifndef arch_get_random_seed_longs_early
-static inline size_t __init arch_get_random_seed_longs_early(unsigned long *v, size_t max_longs)
-{
- WARN_ON(system_state != SYSTEM_BOOTING);
- return arch_get_random_seed_longs(v, max_longs);
-}
-#endif
-
-#ifndef arch_get_random_longs_early
-static inline bool __init arch_get_random_longs_early(unsigned long *v, size_t max_longs)
-{
- WARN_ON(system_state != SYSTEM_BOOTING);
- return arch_get_random_longs(v, max_longs);
-}
-#endif
-
#ifdef CONFIG_SMP
int random_prepare_cpu(unsigned int cpu);
int random_online_cpu(unsigned int cpu);
--
2.38.1


2022-10-31 10:36:44

by Jason A. Donenfeld

[permalink] [raw]
Subject: [PATCH v3] random: remove early archrandom abstraction

The arch_get_random*_early() abstraction is not completely useful and
adds complexity, because it's not a given that there will be no calls to
arch_get_random*() between random_init_early(), which uses
arch_get_random*_early(), and init_cpu_features(). During that gap,
crng_reseed() might be called, which uses arch_get_random*(), since it's
mostly not init code.

Instead we can test whether we're in the early phase in
arch_get_random*() itself, and in doing so avoid all ambiguity about
where we are. Fortunately, the only architecture that currently
implements arch_get_random*_early() also has an alternatives-based cpu
feature system, one flag of which determines whether the other flags
have been initialized. This makes it possible to do the early check with
zero cost once the system is initialized.

Cc: Catalin Marinas <[email protected]>
Cc: Will Deacon <[email protected]>
Cc: Ard Biesheuvel <[email protected]>
Cc: Jean-Philippe Brucker <[email protected]>
Signed-off-by: Jason A. Donenfeld <[email protected]>
---
Changes v2->v3:
- Keep around __early_cpu_has_rndr() for kaslr usage.
Changes v1->v2:
- Also check early_boot_irqs_disabled, to make sure that the raw
capability check only runs during an early stage when we're only
running on the boot CPU and with IRQs off. This check disappears once
the system is up, because system_capabilities_finalized() is a static
branch.

Catalin - Though this touches arm64's archrandom.h, I intend to take
this through the random.git tree, if that's okay. I have other patches
that will build off of this one. -Jason

arch/arm64/include/asm/archrandom.h | 61 ++++++++---------------------
drivers/char/random.c | 4 +-
include/linux/random.h | 20 ----------
3 files changed, 18 insertions(+), 67 deletions(-)

diff --git a/arch/arm64/include/asm/archrandom.h b/arch/arm64/include/asm/archrandom.h
index 109e2a4454be..4b0f28730ab2 100644
--- a/arch/arm64/include/asm/archrandom.h
+++ b/arch/arm64/include/asm/archrandom.h
@@ -58,6 +58,20 @@ static inline bool __arm64_rndrrs(unsigned long *v)
return ok;
}

+static inline bool __early_cpu_has_rndr(void)
+{
+ /* Open code as we run prior to the first call to cpufeature. */
+ unsigned long ftr = read_sysreg_s(SYS_ID_AA64ISAR0_EL1);
+ return (ftr >> ID_AA64ISAR0_EL1_RNDR_SHIFT) & 0xf;
+}
+
+static __always_inline bool __cpu_has_rng(void)
+{
+ if (!system_capabilities_finalized() && early_boot_irqs_disabled)
+ return __early_cpu_has_rndr();
+ return cpus_have_const_cap(ARM64_HAS_RNG);
+}
+
static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs)
{
/*
@@ -66,7 +80,7 @@ static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t
* cpufeature code and with potential scheduling between CPUs
* with and without the feature.
*/
- if (max_longs && cpus_have_const_cap(ARM64_HAS_RNG) && __arm64_rndr(v))
+ if (max_longs && __cpu_has_rng() && __arm64_rndr(v))
return 1;
return 0;
}
@@ -108,53 +122,10 @@ static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, s
* reseeded after each invocation. This is not a 100% fit but good
* enough to implement this API if no other entropy source exists.
*/
- if (cpus_have_const_cap(ARM64_HAS_RNG) && __arm64_rndrrs(v))
- return 1;
-
- return 0;
-}
-
-static inline bool __init __early_cpu_has_rndr(void)
-{
- /* Open code as we run prior to the first call to cpufeature. */
- unsigned long ftr = read_sysreg_s(SYS_ID_AA64ISAR0_EL1);
- return (ftr >> ID_AA64ISAR0_EL1_RNDR_SHIFT) & 0xf;
-}
-
-static inline size_t __init __must_check
-arch_get_random_seed_longs_early(unsigned long *v, size_t max_longs)
-{
- WARN_ON(system_state != SYSTEM_BOOTING);
-
- if (!max_longs)
- return 0;
-
- if (smccc_trng_available) {
- struct arm_smccc_res res;
-
- max_longs = min_t(size_t, 3, max_longs);
- arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, max_longs * 64, &res);
- if ((int)res.a0 >= 0) {
- switch (max_longs) {
- case 3:
- *v++ = res.a1;
- fallthrough;
- case 2:
- *v++ = res.a2;
- fallthrough;
- case 1:
- *v++ = res.a3;
- break;
- }
- return max_longs;
- }
- }
-
- if (__early_cpu_has_rndr() && __arm64_rndr(v))
+ if (__cpu_has_rng() && __arm64_rndrrs(v))
return 1;

return 0;
}
-#define arch_get_random_seed_longs_early arch_get_random_seed_longs_early

#endif /* _ASM_ARCHRANDOM_H */
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 6f323344d0b9..e3cf4f51ed58 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -813,13 +813,13 @@ void __init random_init_early(const char *command_line)
#endif

for (i = 0, arch_bits = sizeof(entropy) * 8; i < ARRAY_SIZE(entropy);) {
- longs = arch_get_random_seed_longs_early(entropy, ARRAY_SIZE(entropy) - i);
+ longs = arch_get_random_seed_longs(entropy, ARRAY_SIZE(entropy) - i);
if (longs) {
_mix_pool_bytes(entropy, sizeof(*entropy) * longs);
i += longs;
continue;
}
- longs = arch_get_random_longs_early(entropy, ARRAY_SIZE(entropy) - i);
+ longs = arch_get_random_longs(entropy, ARRAY_SIZE(entropy) - i);
if (longs) {
_mix_pool_bytes(entropy, sizeof(*entropy) * longs);
i += longs;
diff --git a/include/linux/random.h b/include/linux/random.h
index 182780cafd45..2bdd3add3400 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -153,26 +153,6 @@ declare_get_random_var_wait(long, unsigned long)

#include <asm/archrandom.h>

-/*
- * Called from the boot CPU during startup; not valid to call once
- * secondary CPUs are up and preemption is possible.
- */
-#ifndef arch_get_random_seed_longs_early
-static inline size_t __init arch_get_random_seed_longs_early(unsigned long *v, size_t max_longs)
-{
- WARN_ON(system_state != SYSTEM_BOOTING);
- return arch_get_random_seed_longs(v, max_longs);
-}
-#endif
-
-#ifndef arch_get_random_longs_early
-static inline bool __init arch_get_random_longs_early(unsigned long *v, size_t max_longs)
-{
- WARN_ON(system_state != SYSTEM_BOOTING);
- return arch_get_random_longs(v, max_longs);
-}
-#endif
-
#ifdef CONFIG_SMP
int random_prepare_cpu(unsigned int cpu);
int random_online_cpu(unsigned int cpu);
--
2.38.1


2022-11-01 11:59:44

by Catalin Marinas

[permalink] [raw]
Subject: Re: [PATCH v3] random: remove early archrandom abstraction

On Mon, Oct 31, 2022 at 11:28:40AM +0100, Jason A. Donenfeld wrote:
> Catalin - Though this touches arm64's archrandom.h, I intend to take
> this through the random.git tree, if that's okay. I have other patches
> that will build off of this one. -Jason

I'm fine with the patch going through your tree but I have a comment
below.

> diff --git a/arch/arm64/include/asm/archrandom.h b/arch/arm64/include/asm/archrandom.h
> index 109e2a4454be..4b0f28730ab2 100644
> --- a/arch/arm64/include/asm/archrandom.h
> +++ b/arch/arm64/include/asm/archrandom.h
> @@ -58,6 +58,20 @@ static inline bool __arm64_rndrrs(unsigned long *v)
> return ok;
> }
>
> +static inline bool __early_cpu_has_rndr(void)
> +{
> + /* Open code as we run prior to the first call to cpufeature. */
> + unsigned long ftr = read_sysreg_s(SYS_ID_AA64ISAR0_EL1);
> + return (ftr >> ID_AA64ISAR0_EL1_RNDR_SHIFT) & 0xf;
> +}
> +
> +static __always_inline bool __cpu_has_rng(void)
> +{
> + if (!system_capabilities_finalized() && early_boot_irqs_disabled)
> + return __early_cpu_has_rndr();
> + return cpus_have_const_cap(ARM64_HAS_RNG);
> +}

I'm not sure about using early_boot_irqs_disabled, it is described as a
debug helper. It's also set to 'false' before
system_capabilities_finalized() (once the full SMP is enabled).

Would something like this work:

if (system_capabilities_finalized())
return cpus_have_final_cap(ARM64_HAS_RNG);
if (!preemptible())
return __early_cpu_has_rndr();
return false;

We also have a this_cpu_has_cap() function, though it's likely more
expensive than the hand-coded __early_cpu_has_rndr() (if we care about
performance here).

--
Catalin

2022-11-01 12:27:33

by Jason A. Donenfeld

[permalink] [raw]
Subject: Re: [PATCH v3] random: remove early archrandom abstraction

Hi Catalin,

On Tue, Nov 1, 2022 at 12:39 PM Catalin Marinas <[email protected]> wrote:
> > +static __always_inline bool __cpu_has_rng(void)
> > +{
> > + if (!system_capabilities_finalized() && early_boot_irqs_disabled)
> > + return __early_cpu_has_rndr();
> > + return cpus_have_const_cap(ARM64_HAS_RNG);
> > +}
>
> I'm not sure about using early_boot_irqs_disabled, it is described as a
> debug helper.

Not sure that part matters much?

> It's also set to 'false' before
> system_capabilities_finalized() (once the full SMP is enabled).

Right, so there's still a "hole", where we'll ball back to
cpus_have_final_cap(), which might return false. In practice I don't
think this matters much. But it's still not perfect.

>
> Would something like this work:
>
> if (system_capabilities_finalized())
> return cpus_have_final_cap(ARM64_HAS_RNG);
> if (!preemptible())
> return __early_cpu_has_rndr();
> return false;

That'd be fine. Of course that introduces a different sort of "hole",
when it's called from preemptable context. But again, that doesn't
matter in practice. So I'll send you a v4 doing that for you to ack.

I'm going to structure it like this, though:

static __always_inline bool __cpu_has_rng(void)
{
if (!system_capabilities_finalized() && !preemptible())
return __early_cpu_has_rndr();
return cpus_have_const_cap(ARM64_HAS_RNG);
}

Because cpus_have_const_cap() itself has a fallback mode before
system_capabilities_finalized() is true, where it checks a big
bitmask. So that seems like a better fallback than `false`, in case it
happens to be true.

Jason