During the processing of arm64 kernel hardware memory errors(do_sea()), if
the errors is consumed in the kernel, the current processing is panic.
However, it is not optimal.
Take uaccess for example, if the uaccess operation fails due to memory
error, only the user process will be affected, kill the user process
and isolate the user page with hardware memory errors is a better choice.
This patch only enable machine error check framework, it add exception
fixup before kernel panic in do_sea() and only limit the consumption of
hardware memory errors in kernel mode triggered by user mode processes.
If fixup successful, panic can be avoided.
Consistent with PPC/x86, it is implemented by CONFIG_ARCH_HAS_COPY_MC.
Also add copy_mc_to_user() in include/linux/uaccess.h, this helper is
called when CONFIG_ARCH_HAS_COPOY_MC is open.
Signed-off-by: Tong Tiangen <[email protected]>
---
arch/arm64/Kconfig | 1 +
arch/arm64/include/asm/extable.h | 1 +
arch/arm64/mm/extable.c | 18 ++++++++++++++++++
arch/arm64/mm/fault.c | 28 ++++++++++++++++++++++++++++
include/linux/uaccess.h | 8 ++++++++
5 files changed, 56 insertions(+)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index d9325dd95eba..012e38309955 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -19,6 +19,7 @@ config ARM64
select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2
select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
select ARCH_HAS_CACHE_LINE_SIZE
+ select ARCH_HAS_COPY_MC if ACPI_APEI_GHES
select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEBUG_VM_PGTABLE
diff --git a/arch/arm64/include/asm/extable.h b/arch/arm64/include/asm/extable.h
index 72b0e71cc3de..f80ebd0addfd 100644
--- a/arch/arm64/include/asm/extable.h
+++ b/arch/arm64/include/asm/extable.h
@@ -46,4 +46,5 @@ bool ex_handler_bpf(const struct exception_table_entry *ex,
#endif /* !CONFIG_BPF_JIT */
bool fixup_exception(struct pt_regs *regs);
+bool fixup_exception_mc(struct pt_regs *regs);
#endif
diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c
index 489455309695..5de256a25464 100644
--- a/arch/arm64/mm/extable.c
+++ b/arch/arm64/mm/extable.c
@@ -9,6 +9,7 @@
#include <asm/asm-extable.h>
#include <asm/ptrace.h>
+#include <asm/esr.h>
static inline unsigned long
get_ex_fixup(const struct exception_table_entry *ex)
@@ -73,6 +74,7 @@ bool fixup_exception(struct pt_regs *regs)
switch (ex->type) {
case EX_TYPE_FIXUP:
+ case EX_TYPE_UACCESS_MC:
return ex_handler_fixup(ex, regs);
case EX_TYPE_BPF:
return ex_handler_bpf(ex, regs);
@@ -84,3 +86,19 @@ bool fixup_exception(struct pt_regs *regs)
BUG();
}
+
+bool fixup_exception_mc(struct pt_regs *regs)
+{
+ const struct exception_table_entry *ex;
+
+ ex = search_exception_tables(instruction_pointer(regs));
+ if (!ex)
+ return false;
+
+ switch (ex->type) {
+ case EX_TYPE_UACCESS_MC:
+ return ex_handler_fixup(ex, regs);
+ }
+
+ return false;
+}
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 77341b160aca..56b13cf8bf1d 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -695,6 +695,30 @@ static int do_bad(unsigned long far, unsigned int esr, struct pt_regs *regs)
return 1; /* "fault" */
}
+static bool arm64_process_kernel_sea(unsigned long addr, unsigned int esr,
+ struct pt_regs *regs, int sig, int code)
+{
+ if (!IS_ENABLED(CONFIG_ARCH_HAS_COPY_MC))
+ return false;
+
+ if (user_mode(regs) || !current->mm)
+ return false;
+
+ if (apei_claim_sea(regs) < 0)
+ return false;
+
+ current->thread.fault_address = 0;
+ current->thread.fault_code = esr;
+
+ if (!fixup_exception_mc(regs))
+ return false;
+
+ arm64_force_sig_fault(sig, code, addr,
+ "Uncorrected hardware memory error in kernel-access\n");
+
+ return true;
+}
+
static int do_sea(unsigned long far, unsigned int esr, struct pt_regs *regs)
{
const struct fault_info *inf;
@@ -720,6 +744,10 @@ static int do_sea(unsigned long far, unsigned int esr, struct pt_regs *regs)
*/
siaddr = untagged_addr(far);
}
+
+ if (arm64_process_kernel_sea(siaddr, esr, regs, inf->sig, inf->code))
+ return 0;
+
arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr);
return 0;
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 546179418ffa..dd952aeecdc1 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -174,6 +174,14 @@ copy_mc_to_kernel(void *dst, const void *src, size_t cnt)
}
#endif
+#ifndef copy_mc_to_user
+static inline unsigned long __must_check
+copy_mc_to_user(void *dst, const void *src, size_t cnt)
+{
+ return raw_copy_to_user(dst, src, cnt);
+}
+#endif
+
static __always_inline void pagefault_disabled_inc(void)
{
current->pagefault_disabled++;
--
2.18.0.huawei.25
在 2022/4/12 21:08, Kefeng Wang 写道:
[...]
>> +
>> +bool fixup_exception_mc(struct pt_regs *regs)
>> +{
>> + const struct exception_table_entry *ex;
>> +
>> + ex = search_exception_tables(instruction_pointer(regs));
>> + if (!ex)
>> + return false;
>> +
>> + switch (ex->type) {
>> + case EX_TYPE_UACCESS_MC:
>> + return ex_handler_fixup(ex, regs);
>> + }
>> +
>> + return false;
>> +}
>
> The definition of EX_TYPE_UACCESS_MC is in patch4, please fix it, and if
> arm64 exception table
ok, will do next version.
>
> is sorted by exception type, we could drop fixup_exception_mc(), right?
In sort_relative_table_with_data(), it seems sorted by insn and data.
>
>> diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
>> index 77341b160aca..56b13cf8bf1d 100644
>> --- a/arch/arm64/mm/fault.c
>> +++ b/arch/arm64/mm/fault.c
>> @@ -695,6 +695,30 @@ static int do_bad(unsigned long far, unsigned int
>> esr, struct pt_regs *regs)
>> return 1; /* "fault" */
>> }
>> +static bool arm64_process_kernel_sea(unsigned long addr, unsigned int
>> esr,
>> + struct pt_regs *regs, int sig, int code)
>> +{
>> + if (!IS_ENABLED(CONFIG_ARCH_HAS_COPY_MC))
>> + return false;
>> +
>> + if (user_mode(regs) || !current->mm)
>> + return false;
>> +
>> + if (apei_claim_sea(regs) < 0)
>> + return false;
>> +
>> + current->thread.fault_address = 0;
>> + current->thread.fault_code = esr;
>> +
> Use set_thread_esr(0, esr) and move it after fixup_exception_mc();
>> + if (!fixup_exception_mc(regs))
>> + return false;
>> +
>> + arm64_force_sig_fault(sig, code, addr,
>> + "Uncorrected hardware memory error in kernel-access\n");
>> +
>> + return true;
>> +}
>> +
>> static int do_sea(unsigned long far, unsigned int esr, struct
>> pt_regs *regs)
>> {
>> const struct fault_info *inf;
>> @@ -720,6 +744,10 @@ static int do_sea(unsigned long far, unsigned int
>> esr, struct pt_regs *regs)
>> */
>> siaddr = untagged_addr(far);
>> }
>> +
>> + if (arm64_process_kernel_sea(siaddr, esr, regs, inf->sig,
>> inf->code))
>> + return 0;
>> +
>
> Rename arm64_process_kernel_sea() to arm64_do_kernel_sea()
>
> if (!arm64_do_kernel_sea())
>
> arm64_notify_die();
>
Agreed, will do next version.
>> arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr,
>> esr);
>> return 0;
>> diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
>> index 546179418ffa..dd952aeecdc1 100644
>> --- a/include/linux/uaccess.h
>> +++ b/include/linux/uaccess.h
>> @@ -174,6 +174,14 @@ copy_mc_to_kernel(void *dst, const void *src,
>> size_t cnt)
>> }
>> #endif
>> +#ifndef copy_mc_to_user
>> +static inline unsigned long __must_check
>> +copy_mc_to_user(void *dst, const void *src, size_t cnt)
>> +{
> Add check_object_size(cnt, src, true); which could make
> HARDENED_USERCOPY works.
Agreed, will do next version.
Thanks KeFeng,
Tong.
>> + return raw_copy_to_user(dst, src, cnt);
>> +}
>> +#endif
>> +
>> static __always_inline void pagefault_disabled_inc(void)
>> {
>> current->pagefault_disabled++;
> .