2024-03-31 18:01:13

by Borislav Petkov

[permalink] [raw]
Subject: [PATCH v2 0/4] x86/alternatives: Do NOPs optimization on a temporary buffer

From: "Borislav Petkov (AMD)" <[email protected]>

Hi,

here's v2 of the set to do NOPs optimizations on a temporary buffer
after the last-minute conflict and resulting breakage before the last
merge window which made me retract those.

This v2 has been extensively tested on 6.9-rc1.

Thx.

Changelog:
=========

v0:

here's a small set which sprang out from my reacting to the fact that
NOPs optimization in the alternatives code needs to happen on
a temporary buffer like the other alternative operations - not in-place
and cause all kinds of fun.

The result is this, which makes the alternatives code simpler and it is
a net win, size-wise:

1 file changed, 50 insertions(+), 72 deletions(-)

Constructive feedback is always welcome!

Borislav Petkov (AMD) (4):
x86/alternatives: Use a temporary buffer when optimizing NOPs
x86/alternatives: Get rid of __optimize_nops()
x86/alternatives: Optimize optimize_nops()
x86/alternatives: Sort local vars in apply_alternatives()

arch/x86/include/asm/text-patching.h | 2 +-
arch/x86/kernel/alternative.c | 133 ++++++++++++---------------
arch/x86/kernel/callthunks.c | 9 +-
3 files changed, 62 insertions(+), 82 deletions(-)

--
2.43.0


2024-03-31 18:01:31

by Borislav Petkov

[permalink] [raw]
Subject: [PATCH v2 2/4] x86/alternatives: Get rid of __optimize_nops()

From: "Borislav Petkov (AMD)" <[email protected]>

There's no need to carve out bits of the NOP optimization functionality
and look at JMP opcodes - simply do one more NOPs optimization pass
at the end of patching.

A lot simpler code.

Signed-off-by: Borislav Petkov (AMD) <[email protected]>
---
arch/x86/kernel/alternative.c | 59 ++++++++++-------------------------
1 file changed, 16 insertions(+), 43 deletions(-)

diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index ec94f1359c00..4b3378c71518 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -216,47 +216,12 @@ static int skip_nops(u8 *buf, int offset, int len)
return offset;
}

-/*
- * Optimize a sequence of NOPs, possibly preceded by an unconditional jump
- * to the end of the NOP sequence into a single NOP.
- */
-static bool
-__optimize_nops(const u8 * const instr, u8 *buf, size_t len, struct insn *insn, int *next, int *prev, int *target)
-{
- int i = *next - insn->length;
-
- switch (insn->opcode.bytes[0]) {
- case JMP8_INSN_OPCODE:
- case JMP32_INSN_OPCODE:
- *prev = i;
- *target = *next + insn->immediate.value;
- return false;
- }
-
- if (insn_is_nop(insn)) {
- int nop = i;
-
- *next = skip_nops(buf, *next, len);
- if (*target && *next == *target)
- nop = *prev;
-
- add_nop(buf + nop, *next - nop);
- DUMP_BYTES(ALT, buf, len, "%px: [%d:%d) optimized NOPs: ", instr, nop, *next);
- return true;
- }
-
- *target = 0;
- return false;
-}
-
/*
* "noinline" to cause control flow change and thus invalidate I$ and
* cause refetch after modification.
*/
-static void __init_or_module noinline optimize_nops(const u8 * const instr, u8 *buf, size_t len)
+static void noinline optimize_nops(const u8 * const instr, u8 *buf, size_t len)
{
- int prev, target = 0;
-
for (int next, i = 0; i < len; i = next) {
struct insn insn;

@@ -265,7 +230,14 @@ static void __init_or_module noinline optimize_nops(const u8 * const instr, u8 *

next = i + insn.length;

- __optimize_nops(instr, buf, len, &insn, &next, &prev, &target);
+ if (insn_is_nop(&insn)) {
+ int nop = i;
+
+ next = skip_nops(buf, next, len);
+
+ add_nop(buf + nop, next - nop);
+ DUMP_BYTES(ALT, buf, len, "%px: [%d:%d) optimized NOPs: ", instr, nop, next);
+ }
}
}

@@ -339,10 +311,8 @@ bool need_reloc(unsigned long offset, u8 *src, size_t src_len)
return (target < src || target > src + src_len);
}

-void apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl, size_t repl_len)
+static void __apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl, size_t repl_len)
{
- int prev, target = 0;
-
for (int next, i = 0; i < instrlen; i = next) {
struct insn insn;

@@ -351,9 +321,6 @@ void apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl

next = i + insn.length;

- if (__optimize_nops(instr, buf, instrlen, &insn, &next, &prev, &target))
- continue;
-
switch (insn.opcode.bytes[0]) {
case 0x0f:
if (insn.opcode.bytes[1] < 0x80 ||
@@ -398,6 +365,12 @@ void apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl
}
}

+void apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl, size_t repl_len)
+{
+ __apply_relocation(buf, instr, instrlen, repl, repl_len);
+ optimize_nops(instr, buf, repl_len);
+}
+
/* Low-level backend functions usable from alternative code replacements */
DEFINE_ASM_FUNC(nop_func, "", .entry.text);
EXPORT_SYMBOL_GPL(nop_func);
--
2.43.0


2024-03-31 18:01:48

by Borislav Petkov

[permalink] [raw]
Subject: [PATCH v2 3/4] x86/alternatives: Optimize optimize_nops()

From: "Borislav Petkov (AMD)" <[email protected]>

Return early if NOPs have already been optimized.

Signed-off-by: Borislav Petkov (AMD) <[email protected]>
---
arch/x86/kernel/alternative.c | 4 ++++
1 file changed, 4 insertions(+)

diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 4b3378c71518..67dd7c371d28 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -233,6 +233,10 @@ static void noinline optimize_nops(const u8 * const instr, u8 *buf, size_t len)
if (insn_is_nop(&insn)) {
int nop = i;

+ /* Has the NOP already been optimized? */
+ if (i + insn.length == len)
+ return;
+
next = skip_nops(buf, next, len);

add_nop(buf + nop, next - nop);
--
2.43.0


2024-03-31 18:02:07

by Borislav Petkov

[permalink] [raw]
Subject: [PATCH v2 4/4] x86/alternatives: Sort local vars in apply_alternatives()

From: "Borislav Petkov (AMD)" <[email protected]>

In a reverse x-mas tree.

No functional changes.

Signed-off-by: Borislav Petkov (AMD) <[email protected]>
---
arch/x86/kernel/alternative.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 67dd7c371d28..7555c15b7183 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -445,9 +445,9 @@ static int alt_replace_call(u8 *instr, u8 *insn_buff, struct alt_instr *a)
void __init_or_module noinline apply_alternatives(struct alt_instr *start,
struct alt_instr *end)
{
- struct alt_instr *a;
- u8 *instr, *replacement;
u8 insn_buff[MAX_PATCH_LEN];
+ u8 *instr, *replacement;
+ struct alt_instr *a;

DPRINTK(ALT, "alt table %px, -> %px", start, end);

--
2.43.0


2024-03-31 18:07:20

by Borislav Petkov

[permalink] [raw]
Subject: [PATCH v2 1/4] x86/alternatives: Use a temporary buffer when optimizing NOPs

From: "Borislav Petkov (AMD)" <[email protected]>

Instead of optimizing NOPs in-place, use a temporary buffer like the
usual alternatives patching flow does. This obviates the need to grab
locks when patching, see

6778977590da ("x86/alternatives: Disable interrupts and sync when optimizing NOPs in place")

While at it, add nomenclature definitions clarifying and simplifying the
naming of function-local variables in the alternatives code.

Signed-off-by: Borislav Petkov (AMD) <[email protected]>
---
arch/x86/include/asm/text-patching.h | 2 +-
arch/x86/kernel/alternative.c | 84 +++++++++++++++-------------
arch/x86/kernel/callthunks.c | 9 +--
3 files changed, 49 insertions(+), 46 deletions(-)

diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h
index 345aafbc1964..6259f1937fe7 100644
--- a/arch/x86/include/asm/text-patching.h
+++ b/arch/x86/include/asm/text-patching.h
@@ -15,7 +15,7 @@

extern void text_poke_early(void *addr, const void *opcode, size_t len);

-extern void apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len);
+extern void apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl, size_t repl_len);

/*
* Clear and restore the kernel write-protection flag on the local CPU.
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 45a280f2161c..ec94f1359c00 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -124,6 +124,20 @@ const unsigned char * const x86_nops[ASM_NOP_MAX+1] =
#endif
};

+/*
+ * Nomenclature for variable names to simplify and clarify this code and ease
+ * any potential staring at it:
+ *
+ * @instr: source address of the original instructions in the kernel text as
+ * generated by the compiler.
+ *
+ * @buf: temporary buffer on which the patching operates. This buffer is
+ * eventually text-poked into the kernel image.
+ *
+ * @replacement/@repl: pointer to the opcodes which are replacing @instr, located
+ * in the .altinstr_replacement section.
+ */
+
/*
* Fill the buffer with a single effective instruction of size @len.
*
@@ -133,28 +147,28 @@ const unsigned char * const x86_nops[ASM_NOP_MAX+1] =
* each single-byte NOPs). If @len to fill out is > ASM_NOP_MAX, pad with INT3 and
* *jump* over instead of executing long and daft NOPs.
*/
-static void add_nop(u8 *instr, unsigned int len)
+static void add_nop(u8 *buf, unsigned int len)
{
- u8 *target = instr + len;
+ u8 *target = buf + len;

if (!len)
return;

if (len <= ASM_NOP_MAX) {
- memcpy(instr, x86_nops[len], len);
+ memcpy(buf, x86_nops[len], len);
return;
}

if (len < 128) {
- __text_gen_insn(instr, JMP8_INSN_OPCODE, instr, target, JMP8_INSN_SIZE);
- instr += JMP8_INSN_SIZE;
+ __text_gen_insn(buf, JMP8_INSN_OPCODE, buf, target, JMP8_INSN_SIZE);
+ buf += JMP8_INSN_SIZE;
} else {
- __text_gen_insn(instr, JMP32_INSN_OPCODE, instr, target, JMP32_INSN_SIZE);
- instr += JMP32_INSN_SIZE;
+ __text_gen_insn(buf, JMP32_INSN_OPCODE, buf, target, JMP32_INSN_SIZE);
+ buf += JMP32_INSN_SIZE;
}

- for (;instr < target; instr++)
- *instr = INT3_INSN_OPCODE;
+ for (;buf < target; buf++)
+ *buf = INT3_INSN_OPCODE;
}

extern s32 __retpoline_sites[], __retpoline_sites_end[];
@@ -187,12 +201,12 @@ static bool insn_is_nop(struct insn *insn)
* Find the offset of the first non-NOP instruction starting at @offset
* but no further than @len.
*/
-static int skip_nops(u8 *instr, int offset, int len)
+static int skip_nops(u8 *buf, int offset, int len)
{
struct insn insn;

for (; offset < len; offset += insn.length) {
- if (insn_decode_kernel(&insn, &instr[offset]))
+ if (insn_decode_kernel(&insn, &buf[offset]))
break;

if (!insn_is_nop(&insn))
@@ -207,7 +221,7 @@ static int skip_nops(u8 *instr, int offset, int len)
* to the end of the NOP sequence into a single NOP.
*/
static bool
-__optimize_nops(u8 *instr, size_t len, struct insn *insn, int *next, int *prev, int *target)
+__optimize_nops(const u8 * const instr, u8 *buf, size_t len, struct insn *insn, int *next, int *prev, int *target)
{
int i = *next - insn->length;

@@ -222,12 +236,12 @@ __optimize_nops(u8 *instr, size_t len, struct insn *insn, int *next, int *prev,
if (insn_is_nop(insn)) {
int nop = i;

- *next = skip_nops(instr, *next, len);
+ *next = skip_nops(buf, *next, len);
if (*target && *next == *target)
nop = *prev;

- add_nop(instr + nop, *next - nop);
- DUMP_BYTES(ALT, instr, len, "%px: [%d:%d) optimized NOPs: ", instr, nop, *next);
+ add_nop(buf + nop, *next - nop);
+ DUMP_BYTES(ALT, buf, len, "%px: [%d:%d) optimized NOPs: ", instr, nop, *next);
return true;
}

@@ -239,32 +253,22 @@ __optimize_nops(u8 *instr, size_t len, struct insn *insn, int *next, int *prev,
* "noinline" to cause control flow change and thus invalidate I$ and
* cause refetch after modification.
*/
-static void __init_or_module noinline optimize_nops(u8 *instr, size_t len)
+static void __init_or_module noinline optimize_nops(const u8 * const instr, u8 *buf, size_t len)
{
int prev, target = 0;

for (int next, i = 0; i < len; i = next) {
struct insn insn;

- if (insn_decode_kernel(&insn, &instr[i]))
+ if (insn_decode_kernel(&insn, &buf[i]))
return;

next = i + insn.length;

- __optimize_nops(instr, len, &insn, &next, &prev, &target);
+ __optimize_nops(instr, buf, len, &insn, &next, &prev, &target);
}
}

-static void __init_or_module noinline optimize_nops_inplace(u8 *instr, size_t len)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- optimize_nops(instr, len);
- sync_core();
- local_irq_restore(flags);
-}
-
/*
* In this context, "source" is where the instructions are placed in the
* section .altinstr_replacement, for example during kernel build by the
@@ -335,11 +339,11 @@ bool need_reloc(unsigned long offset, u8 *src, size_t src_len)
return (target < src || target > src + src_len);
}

-void apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len)
+void apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl, size_t repl_len)
{
int prev, target = 0;

- for (int next, i = 0; i < len; i = next) {
+ for (int next, i = 0; i < instrlen; i = next) {
struct insn insn;

if (WARN_ON_ONCE(insn_decode_kernel(&insn, &buf[i])))
@@ -347,7 +351,7 @@ void apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len)

next = i + insn.length;

- if (__optimize_nops(buf, len, &insn, &next, &prev, &target))
+ if (__optimize_nops(instr, buf, instrlen, &insn, &next, &prev, &target))
continue;

switch (insn.opcode.bytes[0]) {
@@ -361,10 +365,10 @@ void apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len)
case JMP8_INSN_OPCODE:
case JMP32_INSN_OPCODE:
case CALL_INSN_OPCODE:
- if (need_reloc(next + insn.immediate.value, src, src_len)) {
+ if (need_reloc(next + insn.immediate.value, repl, repl_len)) {
apply_reloc(insn.immediate.nbytes,
buf + i + insn_offset_immediate(&insn),
- src - dest);
+ repl - instr);
}

/*
@@ -372,7 +376,7 @@ void apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len)
*/
if (insn.opcode.bytes[0] == JMP32_INSN_OPCODE) {
s32 imm = insn.immediate.value;
- imm += src - dest;
+ imm += repl - instr;
imm += JMP32_INSN_SIZE - JMP8_INSN_SIZE;
if ((imm >> 31) == (imm >> 7)) {
buf[i+0] = JMP8_INSN_OPCODE;
@@ -385,10 +389,10 @@ void apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len)
}

if (insn_rip_relative(&insn)) {
- if (need_reloc(next + insn.displacement.value, src, src_len)) {
+ if (need_reloc(next + insn.displacement.value, repl, repl_len)) {
apply_reloc(insn.displacement.nbytes,
buf + i + insn_offset_displacement(&insn),
- src - dest);
+ repl - instr);
}
}
}
@@ -504,7 +508,9 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
* patch if feature is *NOT* present.
*/
if (!boot_cpu_has(a->cpuid) == !(a->flags & ALT_FLAG_NOT)) {
- optimize_nops_inplace(instr, a->instrlen);
+ memcpy(insn_buff, instr, a->instrlen);
+ optimize_nops(instr, insn_buff, a->instrlen);
+ text_poke_early(instr, insn_buff, a->instrlen);
continue;
}

@@ -526,7 +532,7 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
for (; insn_buff_sz < a->instrlen; insn_buff_sz++)
insn_buff[insn_buff_sz] = 0x90;

- apply_relocation(insn_buff, a->instrlen, instr, replacement, a->replacementlen);
+ apply_relocation(insn_buff, instr, a->instrlen, replacement, a->replacementlen);

DUMP_BYTES(ALT, instr, a->instrlen, "%px: old_insn: ", instr);
DUMP_BYTES(ALT, replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
@@ -761,7 +767,7 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)

len = patch_retpoline(addr, &insn, bytes);
if (len == insn.length) {
- optimize_nops(bytes, len);
+ optimize_nops(addr, bytes, len);
DUMP_BYTES(RETPOLINE, ((u8*)addr), len, "%px: orig: ", addr);
DUMP_BYTES(RETPOLINE, ((u8*)bytes), len, "%px: repl: ", addr);
text_poke_early(addr, bytes, len);
diff --git a/arch/x86/kernel/callthunks.c b/arch/x86/kernel/callthunks.c
index 30335182b6b0..771d95484453 100644
--- a/arch/x86/kernel/callthunks.c
+++ b/arch/x86/kernel/callthunks.c
@@ -185,8 +185,7 @@ static void *patch_dest(void *dest, bool direct)
u8 *pad = dest - tsize;

memcpy(insn_buff, skl_call_thunk_template, tsize);
- apply_relocation(insn_buff, tsize, pad,
- skl_call_thunk_template, tsize);
+ apply_relocation(insn_buff, pad, tsize, skl_call_thunk_template, tsize);

/* Already patched? */
if (!bcmp(pad, insn_buff, tsize))
@@ -308,8 +307,7 @@ static bool is_callthunk(void *addr)
pad = (void *)(dest - tmpl_size);

memcpy(insn_buff, skl_call_thunk_template, tmpl_size);
- apply_relocation(insn_buff, tmpl_size, pad,
- skl_call_thunk_template, tmpl_size);
+ apply_relocation(insn_buff, pad, tmpl_size, skl_call_thunk_template, tmpl_size);

return !bcmp(pad, insn_buff, tmpl_size);
}
@@ -327,8 +325,7 @@ int x86_call_depth_emit_accounting(u8 **pprog, void *func)
return 0;

memcpy(insn_buff, skl_call_thunk_template, tmpl_size);
- apply_relocation(insn_buff, tmpl_size, *pprog,
- skl_call_thunk_template, tmpl_size);
+ apply_relocation(insn_buff, *pprog, tmpl_size, skl_call_thunk_template, tmpl_size);

memcpy(*pprog, insn_buff, tmpl_size);
*pprog += tmpl_size;
--
2.43.0