2022-02-24 16:33:55

by Peter Zijlstra

[permalink] [raw]
Subject: [PATCH v2 21/39] x86/ibt: Annotate text references

Annotate away some of the generic code references. This is things
where we take the address of a symbol for exception handling or return
addresses (eg. context switch).

Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
---
arch/x86/entry/entry_64.S | 9 +++++++++
arch/x86/entry/entry_64_compat.S | 1 +
arch/x86/kernel/alternative.c | 4 +++-
arch/x86/kernel/head_64.S | 4 ++++
arch/x86/kernel/kprobes/core.c | 1 +
arch/x86/kernel/relocate_kernel_64.S | 2 ++
arch/x86/lib/error-inject.c | 2 ++
arch/x86/lib/retpoline.S | 2 ++
8 files changed, 24 insertions(+), 1 deletion(-)

--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -277,6 +277,7 @@ SYM_FUNC_END(__switch_to_asm)
.pushsection .text, "ax"
SYM_CODE_START(ret_from_fork)
UNWIND_HINT_EMPTY
+ ANNOTATE_NOENDBR // copy_thread
movq %rax, %rdi
call schedule_tail /* rdi: 'prev' task parameter */

@@ -563,12 +564,14 @@ SYM_CODE_END(\asmsym)
.align 16
.globl __irqentry_text_start
__irqentry_text_start:
+ ANNOTATE_NOENDBR // unwinders

#include <asm/idtentry.h>

.align 16
.globl __irqentry_text_end
__irqentry_text_end:
+ ANNOTATE_NOENDBR

SYM_CODE_START_LOCAL(common_interrupt_return)
SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
@@ -646,6 +651,7 @@ SYM_INNER_LABEL_ALIGN(native_iret, SYM_L
#endif

SYM_INNER_LABEL(native_irq_return_iret, SYM_L_GLOBAL)
+ ANNOTATE_NOENDBR // exc_double_fault
/*
* This may fault. Non-paranoid faults on return to userspace are
* handled by fixup_bad_iret. These include #SS, #GP, and #NP.
@@ -740,6 +746,7 @@ SYM_FUNC_START(asm_load_gs_index)
FRAME_BEGIN
swapgs
.Lgs_change:
+ ANNOTATE_NOENDBR // error_entry
movl %edi, %gs
2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
swapgs
@@ -1317,6 +1324,7 @@ SYM_CODE_START(asm_exc_nmi)
#endif

repeat_nmi:
+ ANNOTATE_NOENDBR // this code
/*
* If there was a nested NMI, the first NMI's iret will return
* here. But NMIs are still enabled and we can take another
@@ -1345,6 +1353,7 @@ SYM_CODE_START(asm_exc_nmi)
.endr
subq $(5*8), %rsp
end_repeat_nmi:
+ ANNOTATE_NOENDBR // this code

/*
* Everything below this point can be preempted by a nested NMI.
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -148,6 +148,7 @@ SYM_INNER_LABEL(entry_SYSENTER_compat_af
popfq
jmp .Lsysenter_flags_fixed
SYM_INNER_LABEL(__end_entry_SYSENTER_compat, SYM_L_GLOBAL)
+ ANNOTATE_NOENDBR // is_sysenter_singlestep
SYM_CODE_END(entry_SYSENTER_compat)

/*
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -713,6 +713,7 @@ asm (
" .pushsection .init.text, \"ax\", @progbits\n"
" .type int3_magic, @function\n"
"int3_magic:\n"
+ ANNOTATE_NOENDBR
" movl $1, (%" _ASM_ARG1 ")\n"
ASM_RET
" .size int3_magic, .-int3_magic\n"
@@ -757,7 +758,8 @@ static void __init int3_selftest(void)
* then trigger the INT3, padded with NOPs to match a CALL instruction
* length.
*/
- asm volatile ("1: int3; nop; nop; nop; nop\n\t"
+ asm volatile (ANNOTATE_NOENDBR
+ "1: int3; nop; nop; nop; nop\n\t"
".pushsection .init.data,\"aw\"\n\t"
".align " __ASM_SEL(4, 8) "\n\t"
".type int3_selftest_ip, @object\n\t"
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -99,6 +99,7 @@ SYM_CODE_END(startup_64)

SYM_CODE_START(secondary_startup_64)
UNWIND_HINT_EMPTY
+ ANNOTATE_NOENDBR
/*
* At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
* and someone has loaded a mapped page table.
@@ -127,6 +128,7 @@ SYM_CODE_START(secondary_startup_64)
*/
SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
UNWIND_HINT_EMPTY
+ ANNOTATE_NOENDBR

/*
* Retrieve the modifier (SME encryption mask if SME is active) to be
@@ -192,6 +194,7 @@ SYM_INNER_LABEL(secondary_startup_64_no_
jmp *%rax
1:
UNWIND_HINT_EMPTY
+ ANNOTATE_NOENDBR // above

/*
* We must switch to a new descriptor in kernel space for the GDT
@@ -299,6 +302,7 @@ SYM_INNER_LABEL(secondary_startup_64_no_
pushq %rax # target address in negative space
lretq
.Lafter_lret:
+ ANNOTATE_NOENDBR
SYM_CODE_END(secondary_startup_64)

#include "verify_cpu.S"
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -1023,6 +1023,7 @@ asm(
".type __kretprobe_trampoline, @function\n"
"__kretprobe_trampoline:\n"
#ifdef CONFIG_X86_64
+ ANNOTATE_NOENDBR
/* Push a fake return address to tell the unwinder it's a kretprobe. */
" pushq $__kretprobe_trampoline\n"
UNWIND_HINT_FUNC
--- a/arch/x86/kernel/relocate_kernel_64.S
+++ b/arch/x86/kernel/relocate_kernel_64.S
@@ -42,6 +42,7 @@
.code64
SYM_CODE_START_NOALIGN(relocate_kernel)
UNWIND_HINT_EMPTY
+ ANNOTATE_NOENDBR
/*
* %rdi indirection_page
* %rsi page_list
@@ -215,6 +216,7 @@ SYM_CODE_END(identity_mapped)

SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
UNWIND_HINT_EMPTY
+ ANNOTATE_NOENDBR // RET target, above
movq RSP(%r8), %rsp
movq CR4(%r8), %rax
movq %rax, %cr4
--- a/arch/x86/lib/error-inject.c
+++ b/arch/x86/lib/error-inject.c
@@ -3,6 +3,7 @@
#include <linux/linkage.h>
#include <linux/error-injection.h>
#include <linux/kprobes.h>
+#include <linux/objtool.h>

asmlinkage void just_return_func(void);

@@ -11,6 +12,7 @@ asm(
".type just_return_func, @function\n"
".globl just_return_func\n"
"just_return_func:\n"
+ ANNOTATE_NOENDBR
ASM_RET
".size just_return_func, .-just_return_func\n"
);
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -12,6 +12,8 @@

.section .text.__x86.indirect_thunk

+ ANNOTATE_NOENDBR // apply_retpolines
+
.macro RETPOLINE reg
ANNOTATE_INTRA_FUNCTION_CALL
call .Ldo_rop_\@



2022-02-25 01:26:17

by Josh Poimboeuf

[permalink] [raw]
Subject: Re: [PATCH v2 21/39] x86/ibt: Annotate text references

On Thu, Feb 24, 2022 at 03:51:59PM +0100, Peter Zijlstra wrote:
> @@ -563,12 +564,14 @@ SYM_CODE_END(\asmsym)
> .align 16
> .globl __irqentry_text_start
> __irqentry_text_start:
> + ANNOTATE_NOENDBR // unwinders

But the instruction here (first idt entry) actually does have an
endbr64...

Also I'm wondering if it would make sense to create an
'idt_entry_<vector>' symbol for each entry so objtool knows to validate
their ENDBRs.

> +++ b/arch/x86/lib/retpoline.S
> @@ -12,6 +12,8 @@
>
> .section .text.__x86.indirect_thunk
>
> + ANNOTATE_NOENDBR // apply_retpolines

This should probably go after __x86_indirect_thunk_array?

--
Josh

2022-02-25 18:15:36

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH v2 21/39] x86/ibt: Annotate text references

On Thu, Feb 24, 2022 at 04:47:16PM -0800, Josh Poimboeuf wrote:
> On Thu, Feb 24, 2022 at 03:51:59PM +0100, Peter Zijlstra wrote:
> > @@ -563,12 +564,14 @@ SYM_CODE_END(\asmsym)
> > .align 16
> > .globl __irqentry_text_start
> > __irqentry_text_start:
> > + ANNOTATE_NOENDBR // unwinders
>
> But the instruction here (first idt entry) actually does have an
> endbr64...
>
> Also I'm wondering if it would make sense to create an
> 'idt_entry_<vector>' symbol for each entry so objtool knows to validate
> their ENDBRs.

I think we're good on that front since irq_entries_start and
spurious_entries_start capture the first entry and the rest is .rept off
of that.

2022-02-26 00:17:52

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH v2 21/39] x86/ibt: Annotate text references

On Thu, Feb 24, 2022 at 04:47:16PM -0800, Josh Poimboeuf wrote:
> On Thu, Feb 24, 2022 at 03:51:59PM +0100, Peter Zijlstra wrote:
> > @@ -563,12 +564,14 @@ SYM_CODE_END(\asmsym)
> > .align 16
> > .globl __irqentry_text_start
> > __irqentry_text_start:
> > + ANNOTATE_NOENDBR // unwinders
>
> But the instruction here (first idt entry) actually does have an
> endbr64...
>

--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -2023,6 +2023,9 @@ static int read_noendbr_hints(struct obj
return -1;
}

+ if (insn->type == INSN_ENDBR)
+ WARN_FUNC("ANNOTATE_NOENDBR on ENDBR", insn->sec, insn->offset);
+
insn->noendbr = 1;
}


vmlinux.o: warning: objtool: .entry.text+0x160: ANNOTATE_NOENDBR on ENDBR
vmlinux.o: warning: objtool: xen_pvh_init()+0x0: ANNOTATE_NOENDBR on ENDBR

right you are... /me goes fix