This patchset adds a control function for cpufeature framework
so that the feature can be controlled at runtime.
Defer PAC on boot core and use the filter function added to disable
PAC from command line. This will help toggling the feature on systems
that do not support PAC or where PAC needs to be disabled at runtime,
without modifying the core kernel.
The idea of adding the filter function for cpufeature is taken from
https://lore.kernel.org/linux-arm-kernel/[email protected]/
https://lore.kernel.org/linux-arm-kernel/[email protected]/
Srinivas Ramana (3):
arm64: Defer enabling pointer authentication on boot core
arm64: cpufeature: Add a filter function to cpufeature
arm64: Enable control of pointer authentication using early param
Documentation/admin-guide/kernel-parameters.txt | 6 +++
arch/arm64/include/asm/cpufeature.h | 8 +++-
arch/arm64/include/asm/pointer_auth.h | 10 +++++
arch/arm64/include/asm/stackprotector.h | 1 +
arch/arm64/kernel/cpufeature.c | 53 +++++++++++++++++++------
arch/arm64/kernel/head.S | 4 --
6 files changed, 64 insertions(+), 18 deletions(-)
--
2.7.4
Add support to control turning off the pointer authentication
using a kernel command line early param.
This will help control pointer authentication feature for both kernel
and userspace without kernel changes.
Signed-off-by: Ajay Patil <[email protected]>
Signed-off-by: Prasad Sodagudi <[email protected]>
Signed-off-by: Srinivas Ramana <[email protected]>
---
Documentation/admin-guide/kernel-parameters.txt | 6 ++++
arch/arm64/kernel/cpufeature.c | 38 +++++++++++++++++++------
2 files changed, 36 insertions(+), 8 deletions(-)
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index c722ec19cd00..d6855e0a9085 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -373,6 +373,12 @@
arcrimi= [HW,NET] ARCnet - "RIM I" (entirely mem-mapped) cards
Format: <io>,<irq>,<nodeID>
+ arm64.disable_ptr_auth=
+ [ARM64] Force disable Linux support for address
+ authentication (both user and in-kernel).
+ 0 - Pointer authentication is enabled[Default]
+ 1 - Pointer authentication is force disabled
+
ataflop= [HW,M68k]
atarimouse= [HW,MOUSE] Atari Mouse
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index b2ffa9eaaaff..bdaaff78240b 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -164,6 +164,24 @@ static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap);
static bool __system_matches_cap(unsigned int n);
+#ifdef CONFIG_ARM64_PTR_AUTH
+bool arm64_disable_ptr_auth;
+
+static int __init arm64_disable_ptr_auth_fn(char *buf)
+{
+ return strtobool(buf, &arm64_disable_ptr_auth);
+}
+early_param("arm64.disable_ptr_auth", arm64_disable_ptr_auth_fn);
+
+s64 ptr_auth_ftr_filter(const struct arm64_ftr_bits *ftrp, s64 val)
+{
+ if (arm64_disable_ptr_auth)
+ return 0;
+ else
+ return val;
+}
+#endif
+
/*
* NOTE: Any changes to the visibility of features should be kept in
* sync with the documentation of the CPU feature register ABI.
@@ -193,17 +211,21 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SPECRES_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SB_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FRINTTS_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
- FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPI_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
- FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPA_SHIFT, 4, 0),
+#ifdef CONFIG_ARM64_PTR_AUTH
+ FILTERED_ARM64_FTR_BITS(FTR_UNSIGNED, FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE,
+ ID_AA64ISAR1_GPI_SHIFT, 4, 0, ptr_auth_ftr_filter),
+ FILTERED_ARM64_FTR_BITS(FTR_UNSIGNED, FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE,
+ ID_AA64ISAR1_GPA_SHIFT, 4, 0, ptr_auth_ftr_filter),
+#endif
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
- FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_API_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
- FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_APA_SHIFT, 4, 0),
+#ifdef CONFIG_ARM64_PTR_AUTH
+ FILTERED_ARM64_FTR_BITS(FTR_UNSIGNED, FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE,
+ ID_AA64ISAR1_API_SHIFT, 4, 0, ptr_auth_ftr_filter),
+ FILTERED_ARM64_FTR_BITS(FTR_UNSIGNED, FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE,
+ ID_AA64ISAR1_APA_SHIFT, 4, 0, ptr_auth_ftr_filter),
+#endif
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
ARM64_FTR_END,
};
--
2.7.4
Defer enabling pointer authentication on boot core until
after its required to be enabled by cpufeature framework.
This will help in controlling the feature dynamically
with a boot parameter.
Signed-off-by: Ajay Patil <[email protected]>
Signed-off-by: Prasad Sodagudi <[email protected]>
Signed-off-by: Srinivas Ramana <[email protected]>
---
arch/arm64/include/asm/pointer_auth.h | 10 ++++++++++
arch/arm64/include/asm/stackprotector.h | 1 +
arch/arm64/kernel/head.S | 4 ----
3 files changed, 11 insertions(+), 4 deletions(-)
diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h
index c6b4f0603024..b112a11e9302 100644
--- a/arch/arm64/include/asm/pointer_auth.h
+++ b/arch/arm64/include/asm/pointer_auth.h
@@ -76,6 +76,15 @@ static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr)
return ptrauth_clear_pac(ptr);
}
+static __always_inline void ptrauth_enable(void)
+{
+ if (!system_supports_address_auth())
+ return;
+ sysreg_clear_set(sctlr_el1, 0, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB |
+ SCTLR_ELx_ENDA | SCTLR_ELx_ENDB));
+ isb();
+}
+
#define ptrauth_thread_init_user(tsk) \
ptrauth_keys_init_user(&(tsk)->thread.keys_user)
#define ptrauth_thread_init_kernel(tsk) \
@@ -84,6 +93,7 @@ static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr)
ptrauth_keys_switch_kernel(&(tsk)->thread.keys_kernel)
#else /* CONFIG_ARM64_PTR_AUTH */
+#define ptrauth_enable()
#define ptrauth_prctl_reset_keys(tsk, arg) (-EINVAL)
#define ptrauth_strip_insn_pac(lr) (lr)
#define ptrauth_thread_init_user(tsk)
diff --git a/arch/arm64/include/asm/stackprotector.h b/arch/arm64/include/asm/stackprotector.h
index 7263e0bac680..33f1bb453150 100644
--- a/arch/arm64/include/asm/stackprotector.h
+++ b/arch/arm64/include/asm/stackprotector.h
@@ -41,6 +41,7 @@ static __always_inline void boot_init_stack_canary(void)
#endif
ptrauth_thread_init_kernel(current);
ptrauth_thread_switch_kernel(current);
+ ptrauth_enable();
}
#endif /* _ASM_STACKPROTECTOR_H */
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index a0dc987724ed..83d3929e0e8b 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -404,10 +404,6 @@ SYM_FUNC_START_LOCAL(__primary_switched)
adr_l x5, init_task
msr sp_el0, x5 // Save thread_info
-#ifdef CONFIG_ARM64_PTR_AUTH
- __ptrauth_keys_init_cpu x5, x6, x7, x8
-#endif
-
adr_l x8, vectors // load VBAR_EL1 with virtual
msr vbar_el1, x8 // vector table address
isb
--
2.7.4
Add a filter function to cpufeature so that it can be used
when dynamic control of the feature is required.
Signed-off-by: Ajay Patil <[email protected]>
Signed-off-by: Prasad Sodagudi <[email protected]>
Signed-off-by: Srinivas Ramana <[email protected]>
---
arch/arm64/include/asm/cpufeature.h | 8 +++++++-
arch/arm64/kernel/cpufeature.c | 15 ++++++++++-----
2 files changed, 17 insertions(+), 6 deletions(-)
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 9a555809b89c..81a5c97d647d 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -61,6 +61,7 @@ struct arm64_ftr_bits {
u8 shift;
u8 width;
s64 safe_val; /* safe value for FTR_EXACT features */
+ s64 (*filter)(const struct arm64_ftr_bits *ftrp, s64 fval);
};
/*
@@ -566,7 +567,12 @@ cpuid_feature_extract_field(u64 features, int field, bool sign)
static inline s64 arm64_ftr_value(const struct arm64_ftr_bits *ftrp, u64 val)
{
- return (s64)cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width, ftrp->sign);
+ s64 fval = (s64)cpuid_feature_extract_field_width(val, ftrp->shift,
+ ftrp->width, ftrp->sign);
+
+ if (ftrp->filter)
+ fval = ftrp->filter(ftrp, fval);
+ return fval;
}
static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 7ffb5f1d8b68..b2ffa9eaaaff 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -132,23 +132,28 @@ DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
EXPORT_SYMBOL(cpu_hwcap_keys);
#define __ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
- { \
.sign = SIGNED, \
.visible = VISIBLE, \
.strict = STRICT, \
.type = TYPE, \
.shift = SHIFT, \
.width = WIDTH, \
- .safe_val = SAFE_VAL, \
- }
+ .safe_val = SAFE_VAL
/* Define a feature with unsigned values */
#define ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
- __ARM64_FTR_BITS(FTR_UNSIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
+ {__ARM64_FTR_BITS(FTR_UNSIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL), }
/* Define a feature with a signed value */
#define S_ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
- __ARM64_FTR_BITS(FTR_SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
+ {__ARM64_FTR_BITS(FTR_SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL), }
+
+/* Define a feature with a filter function to process the field value */
+#define FILTERED_ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL, filter_fn) \
+ { \
+ __ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL), \
+ .filter = filter_fn, \
+ }
#define ARM64_FTR_END \
{ \
--
2.7.4
Hi Srinivas,
On 2021-01-09 00:29, Srinivas Ramana wrote:
> This patchset adds a control function for cpufeature framework
> so that the feature can be controlled at runtime.
>
> Defer PAC on boot core and use the filter function added to disable
> PAC from command line. This will help toggling the feature on systems
> that do not support PAC or where PAC needs to be disabled at runtime,
> without modifying the core kernel.
>
> The idea of adding the filter function for cpufeature is taken from
> https://lore.kernel.org/linux-arm-kernel/[email protected]/
> https://lore.kernel.org/linux-arm-kernel/[email protected]/
>
> Srinivas Ramana (3):
> arm64: Defer enabling pointer authentication on boot core
> arm64: cpufeature: Add a filter function to cpufeature
> arm64: Enable control of pointer authentication using early param
>
> Documentation/admin-guide/kernel-parameters.txt | 6 +++
> arch/arm64/include/asm/cpufeature.h | 8 +++-
> arch/arm64/include/asm/pointer_auth.h | 10 +++++
> arch/arm64/include/asm/stackprotector.h | 1 +
> arch/arm64/kernel/cpufeature.c | 53
> +++++++++++++++++++------
> arch/arm64/kernel/head.S | 4 --
> 6 files changed, 64 insertions(+), 18 deletions(-)
I've been working for some time on a similar series to allow a feature
set to be disabled during the early boot phase, initially to prevent
booting a kernel with VHE, but the mechanism is generic enough to
deal with most architectural features.
I took the liberty to lift your first patch and to add it to my
series[1],
further allowing PAuth to be disabled at boot time on top of BTI and
VHE.
I'd appreciate your comments on this.
Thanks,
M.
[1] https://lore.kernel.org/r/[email protected]
--
Jazz is not dead. It just smells funny...
Hi Marc,
On 1/11/2021 5:40 AM, Marc Zyngier wrote:
> Hi Srinivas,
>
> On 2021-01-09 00:29, Srinivas Ramana wrote:
>> This patchset adds a control function for cpufeature framework
>> so that the feature can be controlled at runtime.
>>
>> Defer PAC on boot core and use the filter function added to disable
>> PAC from command line. This will help toggling the feature on systems
>> that do not support PAC or where PAC needs to be disabled at runtime,
>> without modifying the core kernel.
>>
>> The idea of adding the filter function for cpufeature is taken from
>> https://lore.kernel.org/linux-arm-kernel/[email protected]/
>>
>> https://lore.kernel.org/linux-arm-kernel/[email protected]/
>>
>>
>> Srinivas Ramana (3):
>> arm64: Defer enabling pointer authentication on boot core
>> arm64: cpufeature: Add a filter function to cpufeature
>> arm64: Enable control of pointer authentication using early param
>>
>> Documentation/admin-guide/kernel-parameters.txt | 6 +++
>> arch/arm64/include/asm/cpufeature.h | 8 +++-
>> arch/arm64/include/asm/pointer_auth.h | 10 +++++
>> arch/arm64/include/asm/stackprotector.h | 1 +
>> arch/arm64/kernel/cpufeature.c | 53
>> +++++++++++++++++++------
>> arch/arm64/kernel/head.S | 4 --
>> 6 files changed, 64 insertions(+), 18 deletions(-)
>
> I've been working for some time on a similar series to allow a feature
> set to be disabled during the early boot phase, initially to prevent
> booting a kernel with VHE, but the mechanism is generic enough to
> deal with most architectural features.
>
> I took the liberty to lift your first patch and to add it to my
> series[1],
> further allowing PAuth to be disabled at boot time on top of BTI and VHE.
>
> I'd appreciate your comments on this.
Thanks for sending this series. It seems to be more flexible compared
you what we did.
Following your discussion on allowing EXACT ftr_reg values.
Btw, do you have plan to add MTE in similar lines to control the feature?
We may be needing this on some systems.
>
> Thanks,
>
> M.
>
> [1] https://lore.kernel.org/r/[email protected]
Thanks,
-- Srinivas R
On 2021-01-14 07:15, Srinivas Ramana wrote:
> Hi Marc,
>
> On 1/11/2021 5:40 AM, Marc Zyngier wrote:
>> Hi Srinivas,
>>
>> On 2021-01-09 00:29, Srinivas Ramana wrote:
>>> This patchset adds a control function for cpufeature framework
>>> so that the feature can be controlled at runtime.
>>>
>>> Defer PAC on boot core and use the filter function added to disable
>>> PAC from command line. This will help toggling the feature on systems
>>> that do not support PAC or where PAC needs to be disabled at runtime,
>>> without modifying the core kernel.
>>>
>>> The idea of adding the filter function for cpufeature is taken from
>>> https://lore.kernel.org/linux-arm-kernel/[email protected]/
>>> https://lore.kernel.org/linux-arm-kernel/[email protected]/
>>> Srinivas Ramana (3):
>>> arm64: Defer enabling pointer authentication on boot core
>>> arm64: cpufeature: Add a filter function to cpufeature
>>> arm64: Enable control of pointer authentication using early param
>>>
>>> Documentation/admin-guide/kernel-parameters.txt | 6 +++
>>> arch/arm64/include/asm/cpufeature.h | 8 +++-
>>> arch/arm64/include/asm/pointer_auth.h | 10 +++++
>>> arch/arm64/include/asm/stackprotector.h | 1 +
>>> arch/arm64/kernel/cpufeature.c | 53
>>> +++++++++++++++++++------
>>> arch/arm64/kernel/head.S | 4 --
>>> 6 files changed, 64 insertions(+), 18 deletions(-)
>>
>> I've been working for some time on a similar series to allow a feature
>> set to be disabled during the early boot phase, initially to prevent
>> booting a kernel with VHE, but the mechanism is generic enough to
>> deal with most architectural features.
>>
>> I took the liberty to lift your first patch and to add it to my
>> series[1],
>> further allowing PAuth to be disabled at boot time on top of BTI and
>> VHE.
>>
>> I'd appreciate your comments on this.
> Thanks for sending this series. It seems to be more flexible compared
> you what we did.
> Following your discussion on allowing EXACT ftr_reg values.
>
>
> Btw, do you have plan to add MTE in similar lines to control the
> feature?
> We may be needing this on some systems.
I don't have any need for this at the moment, as my initial goal was
to enable a different boot flow for VHE. The BTI "support" was added
as a way to demonstrate the use of __read_sysreg_by_encoding(), and
your patches were a good opportunity to converge on a single solution.
But if you write the patches that do that, I can add them to the series,
and Catalin/Will can decide whether they want to take them.
Thanks,
M.
--
Jazz is not dead. It just smells funny...
On Thu, Jan 14, 2021 at 08:20:52AM +0000, Marc Zyngier wrote:
> On 2021-01-14 07:15, Srinivas Ramana wrote:
> > On 1/11/2021 5:40 AM, Marc Zyngier wrote:
> > > On 2021-01-09 00:29, Srinivas Ramana wrote:
> > > > This patchset adds a control function for cpufeature framework
> > > > so that the feature can be controlled at runtime.
> > > >
> > > > Defer PAC on boot core and use the filter function added to disable
> > > > PAC from command line. This will help toggling the feature on systems
> > > > that do not support PAC or where PAC needs to be disabled at runtime,
> > > > without modifying the core kernel.
[...]
> > > I've been working for some time on a similar series to allow a feature
> > > set to be disabled during the early boot phase, initially to prevent
> > > booting a kernel with VHE, but the mechanism is generic enough to
> > > deal with most architectural features.
> > >
> > > I took the liberty to lift your first patch and to add it to my
> > > series[1],
> > > further allowing PAuth to be disabled at boot time on top of BTI and
> > > VHE.
> > >
> > > I'd appreciate your comments on this.
> >
> > Thanks for sending this series. It seems to be more flexible compared
> > you what we did.
> > Following your discussion on allowing EXACT ftr_reg values.
> >
> > Btw, do you have plan to add MTE in similar lines to control the
> > feature?
> > We may be needing this on some systems.
>
> I don't have any need for this at the moment, as my initial goal was
> to enable a different boot flow for VHE. The BTI "support" was added
> as a way to demonstrate the use of __read_sysreg_by_encoding(), and
> your patches were a good opportunity to converge on a single solution.
>
> But if you write the patches that do that, I can add them to the series,
> and Catalin/Will can decide whether they want to take them.
For MTE it's trickier (probably similar to VHE) as we do the setup early
in proc.S before we hit the cpufeature infrastructure.
So far we haven't agreed on disabling MTE means - is it disabled
completely (no Normal Tagged memory type) or we just need to disable tag
checking? The former is required if we expect buggy hardware (SoC-level,
not necessarily CPU). The latter, at least for the kernel, is already
handled via the kasan.mode cmdline. For user, we can disable the tagged
address ABI via sysctl (or kernel cmdline) and it indirectly disabled
MTE since the C library detects this.
--
Catalin