2020-04-25 11:11:33

by Josh Poimboeuf

[permalink] [raw]
Subject: [PATCH v3 00/10] livepatch,module: Remove .klp.arch and module_disable_ro()

v3:
- klp: split klp_write_relocations() into object/section specific
functions [joe]
- s390: fix plt/got writes [joe]
- s390: remove text_mutex usage [mbenes]
- x86: do text_poke_sync() before releasing text_mutex [peterz]
- split x86 text_mutex changes into separate patch [mbenes]

v2:
- add vmlinux.ko check [peterz]
- remove 'klp_object' forward declaration [mbenes]
- use text_mutex [jeyu]
- fix documentation TOC [jeyu]
- fix s390 issues [mbenes]
- upstream kpatch-build now supports this
(though it's only enabled for Linux >= 5.8)

These patches add simplifications and improvements for some issues Peter
found six months ago, as part of his non-writable text code (W^X)
cleanups.

Highlights:

- Remove the livepatch arch-specific .klp.arch sections, which were used
to do paravirt patching and alternatives patching for livepatch
replacement code.

- Add support for jump labels in patched code.

- Remove the last module_disable_ro() usage.

For more background, see this thread:

https://lkml.kernel.org/r/20191021135312.jbbxsuipxldocdjk@treble

This has been tested with kpatch-build integration tests and klp-convert
selftests.

Josh Poimboeuf (7):
livepatch: Disallow vmlinux.ko
livepatch: Apply vmlinux-specific KLP relocations early
livepatch: Prevent module-specific KLP rela sections from referencing
vmlinux symbols
s390: Change s390_kernel_write() return type to match memcpy()
livepatch: Remove module_disable_ro() usage
module: Remove module_disable_ro()
x86/module: Use text_mutex in apply_relocate_add()

Peter Zijlstra (3):
livepatch: Remove .klp.arch
s390/module: Use s390_kernel_write() for late relocations
x86/module: Use text_poke() for late relocations

Documentation/livepatch/module-elf-format.rst | 15 +-
arch/s390/include/asm/uaccess.h | 2 +-
arch/s390/kernel/module.c | 147 +++++++++------
arch/s390/mm/maccess.c | 9 +-
arch/um/kernel/um_arch.c | 16 ++
arch/x86/kernel/Makefile | 1 -
arch/x86/kernel/livepatch.c | 53 ------
arch/x86/kernel/module.c | 43 ++++-
include/linux/livepatch.h | 17 +-
include/linux/module.h | 2 -
kernel/livepatch/core.c | 177 +++++++++++-------
kernel/module.c | 23 +--
12 files changed, 277 insertions(+), 228 deletions(-)
delete mode 100644 arch/x86/kernel/livepatch.c

--
2.21.1


2020-04-25 11:11:33

by Josh Poimboeuf

[permalink] [raw]
Subject: [PATCH v3 06/10] s390/module: Use s390_kernel_write() for late relocations

From: Peter Zijlstra <[email protected]>

Because of late module patching, a livepatch module needs to be able to
apply some of its relocations well after it has been loaded. Instead of
playing games with module_{dis,en}able_ro(), use existing text poking
mechanisms to apply relocations after module loading.

So far only x86, s390 and Power have HAVE_LIVEPATCH but only the first
two also have STRICT_MODULE_RWX.

This will allow removal of the last module_disable_ro() usage in
livepatch. The ultimate goal is to completely disallow making
executable mappings writable.

[ jpoimboe: Split up patches. Use mod state to determine whether
memcpy() can be used. Test and add fixes. ]

Cc: [email protected]
Cc: Heiko Carstens [email protected]
Cc: Gerald Schaefer <[email protected]>
Cc: Christian Borntraeger <[email protected]>
Suggested-by: Josh Poimboeuf <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Signed-off-by: Josh Poimboeuf <[email protected]>
Acked-by: Peter Zijlstra (Intel) <[email protected]>
---
arch/s390/kernel/module.c | 147 +++++++++++++++++++++++---------------
1 file changed, 88 insertions(+), 59 deletions(-)

diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index ba8f19bb438b..4055f1c49814 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -19,6 +19,7 @@
#include <linux/kasan.h>
#include <linux/moduleloader.h>
#include <linux/bug.h>
+#include <linux/memory.h>
#include <asm/alternative.h>
#include <asm/nospec-branch.h>
#include <asm/facility.h>
@@ -174,10 +175,12 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
}

static int apply_rela_bits(Elf_Addr loc, Elf_Addr val,
- int sign, int bits, int shift)
+ int sign, int bits, int shift,
+ void *(*write)(void *dest, const void *src, size_t len))
{
unsigned long umax;
long min, max;
+ void *dest = (void *)loc;

if (val & ((1UL << shift) - 1))
return -ENOEXEC;
@@ -194,26 +197,33 @@ static int apply_rela_bits(Elf_Addr loc, Elf_Addr val,
return -ENOEXEC;
}

- if (bits == 8)
- *(unsigned char *) loc = val;
- else if (bits == 12)
- *(unsigned short *) loc = (val & 0xfff) |
+ if (bits == 8) {
+ unsigned char tmp = val;
+ write(dest, &tmp, 1);
+ } else if (bits == 12) {
+ unsigned short tmp = (val & 0xfff) |
(*(unsigned short *) loc & 0xf000);
- else if (bits == 16)
- *(unsigned short *) loc = val;
- else if (bits == 20)
- *(unsigned int *) loc = (val & 0xfff) << 16 |
- (val & 0xff000) >> 4 |
- (*(unsigned int *) loc & 0xf00000ff);
- else if (bits == 32)
- *(unsigned int *) loc = val;
- else if (bits == 64)
- *(unsigned long *) loc = val;
+ write(dest, &tmp, 2);
+ } else if (bits == 16) {
+ unsigned short tmp = val;
+ write(dest, &tmp, 2);
+ } else if (bits == 20) {
+ unsigned int tmp = (val & 0xfff) << 16 |
+ (val & 0xff000) >> 4 | (*(unsigned int *) loc & 0xf00000ff);
+ write(dest, &tmp, 4);
+ } else if (bits == 32) {
+ unsigned int tmp = val;
+ write(dest, &tmp, 4);
+ } else if (bits == 64) {
+ unsigned long tmp = val;
+ write(dest, &tmp, 8);
+ }
return 0;
}

static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
- const char *strtab, struct module *me)
+ const char *strtab, struct module *me,
+ void *(*write)(void *dest, const void *src, size_t len))
{
struct mod_arch_syminfo *info;
Elf_Addr loc, val;
@@ -241,17 +251,17 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
case R_390_64: /* Direct 64 bit. */
val += rela->r_addend;
if (r_type == R_390_8)
- rc = apply_rela_bits(loc, val, 0, 8, 0);
+ rc = apply_rela_bits(loc, val, 0, 8, 0, write);
else if (r_type == R_390_12)
- rc = apply_rela_bits(loc, val, 0, 12, 0);
+ rc = apply_rela_bits(loc, val, 0, 12, 0, write);
else if (r_type == R_390_16)
- rc = apply_rela_bits(loc, val, 0, 16, 0);
+ rc = apply_rela_bits(loc, val, 0, 16, 0, write);
else if (r_type == R_390_20)
- rc = apply_rela_bits(loc, val, 1, 20, 0);
+ rc = apply_rela_bits(loc, val, 1, 20, 0, write);
else if (r_type == R_390_32)
- rc = apply_rela_bits(loc, val, 0, 32, 0);
+ rc = apply_rela_bits(loc, val, 0, 32, 0, write);
else if (r_type == R_390_64)
- rc = apply_rela_bits(loc, val, 0, 64, 0);
+ rc = apply_rela_bits(loc, val, 0, 64, 0, write);
break;
case R_390_PC16: /* PC relative 16 bit. */
case R_390_PC16DBL: /* PC relative 16 bit shifted by 1. */
@@ -260,15 +270,15 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
case R_390_PC64: /* PC relative 64 bit. */
val += rela->r_addend - loc;
if (r_type == R_390_PC16)
- rc = apply_rela_bits(loc, val, 1, 16, 0);
+ rc = apply_rela_bits(loc, val, 1, 16, 0, write);
else if (r_type == R_390_PC16DBL)
- rc = apply_rela_bits(loc, val, 1, 16, 1);
+ rc = apply_rela_bits(loc, val, 1, 16, 1, write);
else if (r_type == R_390_PC32DBL)
- rc = apply_rela_bits(loc, val, 1, 32, 1);
+ rc = apply_rela_bits(loc, val, 1, 32, 1, write);
else if (r_type == R_390_PC32)
- rc = apply_rela_bits(loc, val, 1, 32, 0);
+ rc = apply_rela_bits(loc, val, 1, 32, 0, write);
else if (r_type == R_390_PC64)
- rc = apply_rela_bits(loc, val, 1, 64, 0);
+ rc = apply_rela_bits(loc, val, 1, 64, 0, write);
break;
case R_390_GOT12: /* 12 bit GOT offset. */
case R_390_GOT16: /* 16 bit GOT offset. */
@@ -283,33 +293,33 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
case R_390_GOTPLT64: /* 64 bit offset to jump slot. */
case R_390_GOTPLTENT: /* 32 bit rel. offset to jump slot >> 1. */
if (info->got_initialized == 0) {
- Elf_Addr *gotent;
+ Elf_Addr *gotent = me->core_layout.base +
+ me->arch.got_offset +
+ info->got_offset;

- gotent = me->core_layout.base + me->arch.got_offset +
- info->got_offset;
- *gotent = val;
+ write(gotent, &val, sizeof(*gotent));
info->got_initialized = 1;
}
val = info->got_offset + rela->r_addend;
if (r_type == R_390_GOT12 ||
r_type == R_390_GOTPLT12)
- rc = apply_rela_bits(loc, val, 0, 12, 0);
+ rc = apply_rela_bits(loc, val, 0, 12, 0, write);
else if (r_type == R_390_GOT16 ||
r_type == R_390_GOTPLT16)
- rc = apply_rela_bits(loc, val, 0, 16, 0);
+ rc = apply_rela_bits(loc, val, 0, 16, 0, write);
else if (r_type == R_390_GOT20 ||
r_type == R_390_GOTPLT20)
- rc = apply_rela_bits(loc, val, 1, 20, 0);
+ rc = apply_rela_bits(loc, val, 1, 20, 0, write);
else if (r_type == R_390_GOT32 ||
r_type == R_390_GOTPLT32)
- rc = apply_rela_bits(loc, val, 0, 32, 0);
+ rc = apply_rela_bits(loc, val, 0, 32, 0, write);
else if (r_type == R_390_GOT64 ||
r_type == R_390_GOTPLT64)
- rc = apply_rela_bits(loc, val, 0, 64, 0);
+ rc = apply_rela_bits(loc, val, 0, 64, 0, write);
else if (r_type == R_390_GOTENT ||
r_type == R_390_GOTPLTENT) {
val += (Elf_Addr) me->core_layout.base - loc;
- rc = apply_rela_bits(loc, val, 1, 32, 1);
+ rc = apply_rela_bits(loc, val, 1, 32, 1, write);
}
break;
case R_390_PLT16DBL: /* 16 bit PC rel. PLT shifted by 1. */
@@ -320,25 +330,29 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */
case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
if (info->plt_initialized == 0) {
- unsigned int *ip;
- ip = me->core_layout.base + me->arch.plt_offset +
- info->plt_offset;
- ip[0] = 0x0d10e310; /* basr 1,0 */
- ip[1] = 0x100a0004; /* lg 1,10(1) */
+ unsigned int insn[5];
+ unsigned int *ip = me->core_layout.base +
+ me->arch.plt_offset +
+ info->plt_offset;
+
+ insn[0] = 0x0d10e310; /* basr 1,0 */
+ insn[1] = 0x100a0004; /* lg 1,10(1) */
if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) {
unsigned int *ij;
ij = me->core_layout.base +
me->arch.plt_offset +
me->arch.plt_size - PLT_ENTRY_SIZE;
- ip[2] = 0xa7f40000 + /* j __jump_r1 */
+ insn[2] = 0xa7f40000 + /* j __jump_r1 */
(unsigned int)(u16)
(((unsigned long) ij - 8 -
(unsigned long) ip) / 2);
} else {
- ip[2] = 0x07f10000; /* br %r1 */
+ insn[2] = 0x07f10000; /* br %r1 */
}
- ip[3] = (unsigned int) (val >> 32);
- ip[4] = (unsigned int) val;
+ insn[3] = (unsigned int) (val >> 32);
+ insn[4] = (unsigned int) val;
+
+ write(ip, insn, sizeof(insn));
info->plt_initialized = 1;
}
if (r_type == R_390_PLTOFF16 ||
@@ -357,17 +371,17 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
val += rela->r_addend - loc;
}
if (r_type == R_390_PLT16DBL)
- rc = apply_rela_bits(loc, val, 1, 16, 1);
+ rc = apply_rela_bits(loc, val, 1, 16, 1, write);
else if (r_type == R_390_PLTOFF16)
- rc = apply_rela_bits(loc, val, 0, 16, 0);
+ rc = apply_rela_bits(loc, val, 0, 16, 0, write);
else if (r_type == R_390_PLT32DBL)
- rc = apply_rela_bits(loc, val, 1, 32, 1);
+ rc = apply_rela_bits(loc, val, 1, 32, 1, write);
else if (r_type == R_390_PLT32 ||
r_type == R_390_PLTOFF32)
- rc = apply_rela_bits(loc, val, 0, 32, 0);
+ rc = apply_rela_bits(loc, val, 0, 32, 0, write);
else if (r_type == R_390_PLT64 ||
r_type == R_390_PLTOFF64)
- rc = apply_rela_bits(loc, val, 0, 64, 0);
+ rc = apply_rela_bits(loc, val, 0, 64, 0, write);
break;
case R_390_GOTOFF16: /* 16 bit offset to GOT. */
case R_390_GOTOFF32: /* 32 bit offset to GOT. */
@@ -375,20 +389,20 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
val = val + rela->r_addend -
((Elf_Addr) me->core_layout.base + me->arch.got_offset);
if (r_type == R_390_GOTOFF16)
- rc = apply_rela_bits(loc, val, 0, 16, 0);
+ rc = apply_rela_bits(loc, val, 0, 16, 0, write);
else if (r_type == R_390_GOTOFF32)
- rc = apply_rela_bits(loc, val, 0, 32, 0);
+ rc = apply_rela_bits(loc, val, 0, 32, 0, write);
else if (r_type == R_390_GOTOFF64)
- rc = apply_rela_bits(loc, val, 0, 64, 0);
+ rc = apply_rela_bits(loc, val, 0, 64, 0, write);
break;
case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
val = (Elf_Addr) me->core_layout.base + me->arch.got_offset +
rela->r_addend - loc;
if (r_type == R_390_GOTPC)
- rc = apply_rela_bits(loc, val, 1, 32, 0);
+ rc = apply_rela_bits(loc, val, 1, 32, 0, write);
else if (r_type == R_390_GOTPCDBL)
- rc = apply_rela_bits(loc, val, 1, 32, 1);
+ rc = apply_rela_bits(loc, val, 1, 32, 1, write);
break;
case R_390_COPY:
case R_390_GLOB_DAT: /* Create GOT entry. */
@@ -412,9 +426,10 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
return 0;
}

-int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
+static int __apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec,
- struct module *me)
+ struct module *me,
+ void *(*write)(void *dest, const void *src, size_t len))
{
Elf_Addr base;
Elf_Sym *symtab;
@@ -430,13 +445,27 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
n = sechdrs[relsec].sh_size / sizeof(Elf_Rela);

for (i = 0; i < n; i++, rela++) {
- rc = apply_rela(rela, base, symtab, strtab, me);
+ rc = apply_rela(rela, base, symtab, strtab, me, write);
if (rc)
return rc;
}
return 0;
}

+int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
+ unsigned int symindex, unsigned int relsec,
+ struct module *me)
+{
+ bool early = me->state == MODULE_STATE_UNFORMED;
+ void *(*write)(void *, const void *, size_t) = memcpy;
+
+ if (!early)
+ write = s390_kernel_write;
+
+ return __apply_relocate_add(sechdrs, strtab, symindex, relsec, me,
+ write);
+}
+
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
--
2.21.1

2020-04-25 11:12:04

by Josh Poimboeuf

[permalink] [raw]
Subject: [PATCH v3 02/10] livepatch: Apply vmlinux-specific KLP relocations early

KLP relocations are livepatch-specific relocations which are applied to
a KLP module's text or data. They exist for two reasons:

1) Unexported symbols: replacement functions often need to access
unexported symbols (e.g. static functions), which "normal"
relocations don't allow.

2) Late module patching: this is the ability for a KLP module to
bypass normal module dependencies, such that the KLP module can be
loaded *before* a to-be-patched module. This means that
relocations which need to access symbols in the to-be-patched
module might need to be applied to the KLP module well after it has
been loaded.

Non-late-patched KLP relocations are applied from the KLP module's init
function. That usually works fine, unless the patched code wants to use
alternatives, paravirt patching, jump tables, or some other special
section which needs relocations. Then we run into ordering issues and
crashes.

In order for those special sections to work properly, the KLP
relocations should be applied *before* the special section init code
runs, such as apply_paravirt(), apply_alternatives(), or
jump_label_apply_nops().

You might think the obvious solution would be to move the KLP relocation
initialization earlier, but it's not necessarily that simple. The
problem is the above-mentioned late module patching, for which KLP
relocations can get applied well after the KLP module is loaded.

To "fix" this issue in the past, we created .klp.arch sections:

.klp.arch.{module}..altinstructions
.klp.arch.{module}..parainstructions

Those sections allow KLP late module patching code to call
apply_paravirt() and apply_alternatives() after the module-specific KLP
relocations (.klp.rela.{module}.{section}) have been applied.

But that has a lot of drawbacks, including code complexity, the need for
arch-specific code, and the (per-arch) danger that we missed some
special section -- for example the __jump_table section which is used
for jump labels.

It turns out there's a simpler and more functional approach. There are
two kinds of KLP relocation sections:

1) vmlinux-specific KLP relocation sections

.klp.rela.vmlinux.{sec}

These are relocations (applied to the KLP module) which reference
unexported vmlinux symbols.

2) module-specific KLP relocation sections

.klp.rela.{module}.{sec}:

These are relocations (applied to the KLP module) which reference
unexported or exported module symbols.

Up until now, these have been treated the same. However, they're
inherently different.

Because of late module patching, module-specific KLP relocations can be
applied very late, thus they can create the ordering headaches described
above.

But vmlinux-specific KLP relocations don't have that problem. There's
nothing to prevent them from being applied earlier. So apply them at
the same time as normal relocations, when the KLP module is being
loaded.

This means that for vmlinux-specific KLP relocations, we no longer have
any ordering issues. vmlinux-referencing jump labels, alternatives, and
paravirt patching will work automatically, without the need for the
.klp.arch hacks.

All that said, for module-specific KLP relocations, the ordering
problems still exist and we *do* still need .klp.arch. Or do we? Stay
tuned.

Suggested-by: Peter Zijlstra <[email protected]>
Signed-off-by: Josh Poimboeuf <[email protected]>
Acked-by: Peter Zijlstra (Intel) <[email protected]>
---
include/linux/livepatch.h | 14 ++++
kernel/livepatch/core.c | 134 +++++++++++++++++++++++---------------
kernel/module.c | 10 +--
3 files changed, 103 insertions(+), 55 deletions(-)

diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index e894e74905f3..c4302e9a5905 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -234,6 +234,11 @@ void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor);
struct klp_state *klp_get_state(struct klp_patch *patch, unsigned long id);
struct klp_state *klp_get_prev_state(unsigned long id);

+int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
+ const char *shstrtab, const char *strtab,
+ unsigned int symindex, unsigned int secindex,
+ const char *objname);
+
#else /* !CONFIG_LIVEPATCH */

static inline int klp_module_coming(struct module *mod) { return 0; }
@@ -242,6 +247,15 @@ static inline bool klp_patch_pending(struct task_struct *task) { return false; }
static inline void klp_update_patch_state(struct task_struct *task) {}
static inline void klp_copy_process(struct task_struct *child) {}

+static inline
+int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
+ const char *shstrtab, const char *strtab,
+ unsigned int symindex, unsigned int secindex,
+ const char *objname)
+{
+ return 0;
+}
+
#endif /* CONFIG_LIVEPATCH */

#endif /* _LINUX_LIVEPATCH_H_ */
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index 40cfac8156fd..86a45d8a593b 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -191,12 +191,12 @@ static int klp_find_object_symbol(const char *objname, const char *name,
return -EINVAL;
}

-static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod)
+static int klp_resolve_symbols(Elf64_Shdr *sechdrs, const char *strtab,
+ unsigned int symndx, Elf_Shdr *relasec)
{
int i, cnt, vmlinux, ret;
char objname[MODULE_NAME_LEN];
char symname[KSYM_NAME_LEN];
- char *strtab = pmod->core_kallsyms.strtab;
Elf_Rela *relas;
Elf_Sym *sym;
unsigned long sympos, addr;
@@ -216,7 +216,7 @@ static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod)
relas = (Elf_Rela *) relasec->sh_addr;
/* For each rela in this klp relocation section */
for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
- sym = pmod->core_kallsyms.symtab + ELF_R_SYM(relas[i].r_info);
+ sym = (Elf64_Sym *)sechdrs[symndx].sh_addr + ELF_R_SYM(relas[i].r_info);
if (sym->st_shndx != SHN_LIVEPATCH) {
pr_err("symbol %s is not marked as a livepatch symbol\n",
strtab + sym->st_name);
@@ -246,54 +246,59 @@ static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod)
return 0;
}

-static int klp_write_object_relocations(struct module *pmod,
- struct klp_object *obj)
+/*
+ * At a high-level, there are two types of klp relocation sections: those which
+ * reference symbols which live in vmlinux; and those which reference symbols
+ * which live in other modules. This function is called for both types:
+ *
+ * 1) When a klp module itself loads, the module code calls this function to
+ * write vmlinux-specific klp relocations (.klp.rela.vmlinux.* sections).
+ * These relocations are written to the klp module text to allow the patched
+ * code/data to reference unexported vmlinux symbols. They're written as
+ * early as possible to ensure that other module init code (.e.g.,
+ * jump_label_apply_nops) can access any unexported vmlinux symbols which
+ * might be referenced by the klp module's special sections.
+ *
+ * 2) When a to-be-patched module loads -- or is already loaded when a
+ * corresponding klp module loads -- klp code calls this function to write
+ * module-specific klp relocations (.klp.rela.{module}.* sections). These
+ * are written to the klp module text to allow the patched code/data to
+ * reference symbols which live in the to-be-patched module or one of its
+ * module dependencies. Exported symbols are supported, in addition to
+ * unexported symbols, in order to enable late module patching, which allows
+ * the to-be-patched module to be loaded and patched sometime *after* the
+ * klp module is loaded.
+ */
+int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
+ const char *shstrtab, const char *strtab,
+ unsigned int symndx, unsigned int secndx,
+ const char *objname)
{
- int i, cnt, ret = 0;
- const char *objname, *secname;
+ int cnt, ret;
char sec_objname[MODULE_NAME_LEN];
- Elf_Shdr *sec;
+ Elf_Shdr *sec = sechdrs + secndx;

- if (WARN_ON(!klp_is_object_loaded(obj)))
+ /*
+ * Format: .klp.rela.sec_objname.section_name
+ * See comment in klp_resolve_symbols() for an explanation
+ * of the selected field width value.
+ */
+ cnt = sscanf(shstrtab + sec->sh_name, ".klp.rela.%55[^.]",
+ sec_objname);
+ if (cnt != 1) {
+ pr_err("section %s has an incorrectly formatted name\n",
+ shstrtab + sec->sh_name);
return -EINVAL;
+ }

- objname = klp_is_module(obj) ? obj->name : "vmlinux";
-
- /* For each klp relocation section */
- for (i = 1; i < pmod->klp_info->hdr.e_shnum; i++) {
- sec = pmod->klp_info->sechdrs + i;
- secname = pmod->klp_info->secstrings + sec->sh_name;
- if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
- continue;
-
- /*
- * Format: .klp.rela.sec_objname.section_name
- * See comment in klp_resolve_symbols() for an explanation
- * of the selected field width value.
- */
- cnt = sscanf(secname, ".klp.rela.%55[^.]", sec_objname);
- if (cnt != 1) {
- pr_err("section %s has an incorrectly formatted name\n",
- secname);
- ret = -EINVAL;
- break;
- }
-
- if (strcmp(objname, sec_objname))
- continue;
-
- ret = klp_resolve_symbols(sec, pmod);
- if (ret)
- break;
+ if (strcmp(objname ? objname : "vmlinux", sec_objname))
+ return 0;

- ret = apply_relocate_add(pmod->klp_info->sechdrs,
- pmod->core_kallsyms.strtab,
- pmod->klp_info->symndx, i, pmod);
- if (ret)
- break;
- }
+ ret = klp_resolve_symbols(sechdrs, strtab, symndx, sec);
+ if (ret)
+ return ret;

- return ret;
+ return apply_relocate_add(sechdrs, strtab, symndx, secndx, pmod);
}

/*
@@ -730,6 +735,28 @@ void __weak arch_klp_init_object_loaded(struct klp_patch *patch,
{
}

+int klp_apply_object_relocs(struct klp_patch *patch, struct klp_object *obj)
+{
+ int i, ret;
+ struct klp_modinfo *info = patch->mod->klp_info;
+
+ for (i = 1; i < info->hdr.e_shnum; i++) {
+ Elf_Shdr *sec = info->sechdrs + i;
+
+ if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
+ continue;
+
+ ret = klp_apply_section_relocs(patch->mod, info->sechdrs,
+ info->secstrings,
+ patch->mod->core_kallsyms.strtab,
+ info->symndx, i, obj->name);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
/* parts of the initialization that is done only when the object is loaded */
static int klp_init_object_loaded(struct klp_patch *patch,
struct klp_object *obj)
@@ -738,18 +765,23 @@ static int klp_init_object_loaded(struct klp_patch *patch,
int ret;

mutex_lock(&text_mutex);
-
module_disable_ro(patch->mod);
- ret = klp_write_object_relocations(patch->mod, obj);
- if (ret) {
- module_enable_ro(patch->mod, true);
- mutex_unlock(&text_mutex);
- return ret;
+
+ if (klp_is_module(obj)) {
+ /*
+ * Only write module-specific relocations here
+ * (.klp.rela.{module}.*). vmlinux-specific relocations were
+ * written earlier during the initialization of the klp module
+ * itself.
+ */
+ ret = klp_apply_object_relocs(patch, obj);
+ if (ret)
+ return ret;
}

arch_klp_init_object_loaded(patch, obj);
- module_enable_ro(patch->mod, true);

+ module_enable_ro(patch->mod, true);
mutex_unlock(&text_mutex);

klp_for_each_func(obj, func) {
diff --git a/kernel/module.c b/kernel/module.c
index d626dc31b37f..96b2575329f4 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2363,11 +2363,13 @@ static int apply_relocations(struct module *mod, const struct load_info *info)
if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC))
continue;

- /* Livepatch relocation sections are applied by livepatch */
if (info->sechdrs[i].sh_flags & SHF_RELA_LIVEPATCH)
- continue;
-
- if (info->sechdrs[i].sh_type == SHT_REL)
+ err = klp_apply_section_relocs(mod, info->sechdrs,
+ info->secstrings,
+ info->strtab,
+ info->index.sym, i,
+ NULL);
+ else if (info->sechdrs[i].sh_type == SHT_REL)
err = apply_relocate(info->sechdrs, info->strtab,
info->index.sym, i, mod);
else if (info->sechdrs[i].sh_type == SHT_RELA)
--
2.21.1

2020-04-25 11:12:51

by Josh Poimboeuf

[permalink] [raw]
Subject: [PATCH v3 03/10] livepatch: Remove .klp.arch

From: Peter Zijlstra <[email protected]>

After the previous patch, vmlinux-specific KLP relocations are now
applied early during KLP module load. This means that .klp.arch
sections are no longer needed for *vmlinux-specific* KLP relocations.

One might think they're still needed for *module-specific* KLP
relocations. If a to-be-patched module is loaded *after* its
corresponding KLP module is loaded, any corresponding KLP relocations
will be delayed until the to-be-patched module is loaded. If any
special sections (.parainstructions, for example) rely on those
relocations, their initializations (apply_paravirt) need to be done
afterwards. Thus the apparent need for arch_klp_init_object_loaded()
and its corresponding .klp.arch sections -- it allows some of the
special section initializations to be done at a later time.

But... if you look closer, that dependency between the special sections
and the module-specific KLP relocations doesn't actually exist in
reality. Looking at the contents of the .altinstructions and
.parainstructions sections, there's not a realistic scenario in which a
KLP module's .altinstructions or .parainstructions section needs to
access a symbol in a to-be-patched module. It might need to access a
local symbol or even a vmlinux symbol; but not another module's symbol.
When a special section needs to reference a local or vmlinux symbol, a
normal rela can be used instead of a KLP rela.

Since the special section initializations don't actually have any real
dependency on module-specific KLP relocations, .klp.arch and
arch_klp_init_object_loaded() no longer have a reason to exist. So
remove them.

As Peter said much more succinctly:

So the reason for .klp.arch was that .klp.rela.* stuff would overwrite
paravirt instructions. If that happens you're doing it wrong. Those
RELAs are core kernel, not module, and thus should've happened in
.rela.* sections at patch-module loading time.

Reverting this removes the two apply_{paravirt,alternatives}() calls
from the late patching path, and means we don't have to worry about
them when removing module_disable_ro().

[ jpoimboe: Rewrote patch description. Tweaked klp_init_object_loaded()
error path. ]

Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Signed-off-by: Josh Poimboeuf <[email protected]>
Acked-by: Peter Zijlstra (Intel) <[email protected]>
---
Documentation/livepatch/module-elf-format.rst | 15 +-----
arch/x86/kernel/Makefile | 1 -
arch/x86/kernel/livepatch.c | 53 -------------------
include/linux/livepatch.h | 3 --
kernel/livepatch/core.c | 22 +++-----
5 files changed, 10 insertions(+), 84 deletions(-)
delete mode 100644 arch/x86/kernel/livepatch.c

diff --git a/Documentation/livepatch/module-elf-format.rst b/Documentation/livepatch/module-elf-format.rst
index 2a591e6f8e6c..8c6b894c4661 100644
--- a/Documentation/livepatch/module-elf-format.rst
+++ b/Documentation/livepatch/module-elf-format.rst
@@ -14,8 +14,7 @@ This document outlines the Elf format requirements that livepatch modules must f
4. Livepatch symbols
4.1 A livepatch module's symbol table
4.2 Livepatch symbol format
- 5. Architecture-specific sections
- 6. Symbol table and Elf section access
+ 5. Symbol table and Elf section access

1. Background and motivation
============================
@@ -298,17 +297,7 @@ Examples:
Note that the 'Ndx' (Section index) for these symbols is SHN_LIVEPATCH (0xff20).
"OS" means OS-specific.

-5. Architecture-specific sections
-=================================
-Architectures may override arch_klp_init_object_loaded() to perform
-additional arch-specific tasks when a target module loads, such as applying
-arch-specific sections. On x86 for example, we must apply per-object
-.altinstructions and .parainstructions sections when a target module loads.
-These sections must be prefixed with ".klp.arch.$objname." so that they can
-be easily identified when iterating through a patch module's Elf sections
-(See arch/x86/kernel/livepatch.c for a complete example).
-
-6. Symbol table and Elf section access
+5. Symbol table and Elf section access
======================================
A livepatch module's symbol table is accessible through module->symtab.

diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 92e1261ec4ec..e77261db2391 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -94,7 +94,6 @@ obj-$(CONFIG_X86_MPPARSE) += mpparse.o
obj-y += apic/
obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
-obj-$(CONFIG_LIVEPATCH) += livepatch.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace_$(BITS).o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c
deleted file mode 100644
index 6a68e41206e7..000000000000
--- a/arch/x86/kernel/livepatch.c
+++ /dev/null
@@ -1,53 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * livepatch.c - x86-specific Kernel Live Patching Core
- */
-
-#include <linux/module.h>
-#include <linux/kallsyms.h>
-#include <linux/livepatch.h>
-#include <asm/text-patching.h>
-
-/* Apply per-object alternatives. Based on x86 module_finalize() */
-void arch_klp_init_object_loaded(struct klp_patch *patch,
- struct klp_object *obj)
-{
- int cnt;
- struct klp_modinfo *info;
- Elf_Shdr *s, *alt = NULL, *para = NULL;
- void *aseg, *pseg;
- const char *objname;
- char sec_objname[MODULE_NAME_LEN];
- char secname[KSYM_NAME_LEN];
-
- info = patch->mod->klp_info;
- objname = obj->name ? obj->name : "vmlinux";
-
- /* See livepatch core code for BUILD_BUG_ON() explanation */
- BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128);
-
- for (s = info->sechdrs; s < info->sechdrs + info->hdr.e_shnum; s++) {
- /* Apply per-object .klp.arch sections */
- cnt = sscanf(info->secstrings + s->sh_name,
- ".klp.arch.%55[^.].%127s",
- sec_objname, secname);
- if (cnt != 2)
- continue;
- if (strcmp(sec_objname, objname))
- continue;
- if (!strcmp(".altinstructions", secname))
- alt = s;
- if (!strcmp(".parainstructions", secname))
- para = s;
- }
-
- if (alt) {
- aseg = (void *) alt->sh_addr;
- apply_alternatives(aseg, aseg + alt->sh_size);
- }
-
- if (para) {
- pseg = (void *) para->sh_addr;
- apply_paravirt(pseg, pseg + para->sh_size);
- }
-}
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index c4302e9a5905..2614247a9781 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -195,9 +195,6 @@ struct klp_patch {

int klp_enable_patch(struct klp_patch *);

-void arch_klp_init_object_loaded(struct klp_patch *patch,
- struct klp_object *obj);
-
/* Called from the module loader during module coming/going states */
int klp_module_coming(struct module *mod);
void klp_module_going(struct module *mod);
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index 86a45d8a593b..16632e75112a 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -729,12 +729,6 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func)
func->old_sympos ? func->old_sympos : 1);
}

-/* Arches may override this to finish any remaining arch-specific tasks */
-void __weak arch_klp_init_object_loaded(struct klp_patch *patch,
- struct klp_object *obj)
-{
-}
-
int klp_apply_object_relocs(struct klp_patch *patch, struct klp_object *obj)
{
int i, ret;
@@ -764,10 +758,11 @@ static int klp_init_object_loaded(struct klp_patch *patch,
struct klp_func *func;
int ret;

- mutex_lock(&text_mutex);
- module_disable_ro(patch->mod);
-
if (klp_is_module(obj)) {
+
+ mutex_lock(&text_mutex);
+ module_disable_ro(patch->mod);
+
/*
* Only write module-specific relocations here
* (.klp.rela.{module}.*). vmlinux-specific relocations were
@@ -775,15 +770,14 @@ static int klp_init_object_loaded(struct klp_patch *patch,
* itself.
*/
ret = klp_apply_object_relocs(patch, obj);
+
+ module_enable_ro(patch->mod, true);
+ mutex_unlock(&text_mutex);
+
if (ret)
return ret;
}

- arch_klp_init_object_loaded(patch, obj);
-
- module_enable_ro(patch->mod, true);
- mutex_unlock(&text_mutex);
-
klp_for_each_func(obj, func) {
ret = klp_find_object_symbol(obj->name, func->old_name,
func->old_sympos,
--
2.21.1

2020-04-25 11:12:57

by Josh Poimboeuf

[permalink] [raw]
Subject: [PATCH v3 09/10] module: Remove module_disable_ro()

module_disable_ro() has no more users. Remove it.

Signed-off-by: Josh Poimboeuf <[email protected]>
Acked-by: Peter Zijlstra (Intel) <[email protected]>
---
include/linux/module.h | 2 --
kernel/module.c | 13 -------------
2 files changed, 15 deletions(-)

diff --git a/include/linux/module.h b/include/linux/module.h
index 1ad393e62bef..e4ef7b36feda 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -860,10 +860,8 @@ extern int module_sysfs_initialized;

#ifdef CONFIG_STRICT_MODULE_RWX
extern void module_enable_ro(const struct module *mod, bool after_init);
-extern void module_disable_ro(const struct module *mod);
#else
static inline void module_enable_ro(const struct module *mod, bool after_init) { }
-static inline void module_disable_ro(const struct module *mod) { }
#endif

#ifdef CONFIG_GENERIC_BUG
diff --git a/kernel/module.c b/kernel/module.c
index 96b2575329f4..f0e414a01d91 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2016,19 +2016,6 @@ static void frob_writable_data(const struct module_layout *layout,
(layout->size - layout->ro_after_init_size) >> PAGE_SHIFT);
}

-/* livepatching wants to disable read-only so it can frob module. */
-void module_disable_ro(const struct module *mod)
-{
- if (!rodata_enabled)
- return;
-
- frob_text(&mod->core_layout, set_memory_rw);
- frob_rodata(&mod->core_layout, set_memory_rw);
- frob_ro_after_init(&mod->core_layout, set_memory_rw);
- frob_text(&mod->init_layout, set_memory_rw);
- frob_rodata(&mod->init_layout, set_memory_rw);
-}
-
void module_enable_ro(const struct module *mod, bool after_init)
{
if (!rodata_enabled)
--
2.21.1

2020-04-25 11:13:58

by Josh Poimboeuf

[permalink] [raw]
Subject: [PATCH v3 10/10] x86/module: Use text_mutex in apply_relocate_add()

Now that the livepatch code no longer needs the text_mutex for changing
module permissions, move its usage down to apply_relocate_add().

Note the s390 version of apply_relocate_add() doesn't need to use the
text_mutex because it already uses s390_kernel_write_lock, which
accomplishes the same task.

Signed-off-by: Josh Poimboeuf <[email protected]>
---
arch/x86/kernel/module.c | 9 +++++++--
kernel/livepatch/core.c | 6 ------
2 files changed, 7 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index 7614f478fd7a..23c95a53d20e 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -18,6 +18,7 @@
#include <linux/gfp.h>
#include <linux/jump_label.h>
#include <linux/random.h>
+#include <linux/memory.h>

#include <asm/text-patching.h>
#include <asm/page.h>
@@ -227,14 +228,18 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
bool early = me->state == MODULE_STATE_UNFORMED;
void *(*write)(void *, const void *, size_t) = memcpy;

- if (!early)
+ if (!early) {
write = text_poke;
+ mutex_lock(&text_mutex);
+ }

ret = __apply_relocate_add(sechdrs, strtab, symindex, relsec, me,
write);

- if (!early)
+ if (!early) {
text_poke_sync();
+ mutex_unlock(&text_mutex);
+ }

return ret;
}
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index 6b8b3c067be0..96d2da14eb0d 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -775,9 +775,6 @@ static int klp_init_object_loaded(struct klp_patch *patch,
int ret;

if (klp_is_module(obj)) {
-
- mutex_lock(&text_mutex);
-
/*
* Only write module-specific relocations here
* (.klp.rela.{module}.*). vmlinux-specific relocations were
@@ -785,9 +782,6 @@ static int klp_init_object_loaded(struct klp_patch *patch,
* itself.
*/
ret = klp_apply_object_relocs(patch, obj);
-
- mutex_unlock(&text_mutex);
-
if (ret)
return ret;
}
--
2.21.1

2020-04-25 11:17:15

by Josh Poimboeuf

[permalink] [raw]
Subject: Re: [PATCH v3 06/10] s390/module: Use s390_kernel_write() for late relocations

On Sat, Apr 25, 2020 at 06:07:26AM -0500, Josh Poimboeuf wrote:
> From: Peter Zijlstra <[email protected]>
>
> Because of late module patching, a livepatch module needs to be able to
> apply some of its relocations well after it has been loaded. Instead of
> playing games with module_{dis,en}able_ro(), use existing text poking
> mechanisms to apply relocations after module loading.
>
> So far only x86, s390 and Power have HAVE_LIVEPATCH but only the first
> two also have STRICT_MODULE_RWX.
>
> This will allow removal of the last module_disable_ro() usage in
> livepatch. The ultimate goal is to completely disallow making
> executable mappings writable.
>
> [ jpoimboe: Split up patches. Use mod state to determine whether
> memcpy() can be used. Test and add fixes. ]
>
> Cc: [email protected]
> Cc: Heiko Carstens [email protected]

Gah, I somehow messed up the formatting on this Cc, should be

Cc: Heiko Carstens <[email protected]>

> Cc: Gerald Schaefer <[email protected]>
> Cc: Christian Borntraeger <[email protected]>
> Suggested-by: Josh Poimboeuf <[email protected]>
> Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
> Signed-off-by: Josh Poimboeuf <[email protected]>
> Acked-by: Peter Zijlstra (Intel) <[email protected]>
> ---
> arch/s390/kernel/module.c | 147 +++++++++++++++++++++++---------------
> 1 file changed, 88 insertions(+), 59 deletions(-)
>
> diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
> index ba8f19bb438b..4055f1c49814 100644
> --- a/arch/s390/kernel/module.c
> +++ b/arch/s390/kernel/module.c
> @@ -19,6 +19,7 @@
> #include <linux/kasan.h>
> #include <linux/moduleloader.h>
> #include <linux/bug.h>
> +#include <linux/memory.h>
> #include <asm/alternative.h>
> #include <asm/nospec-branch.h>
> #include <asm/facility.h>
> @@ -174,10 +175,12 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
> }
>
> static int apply_rela_bits(Elf_Addr loc, Elf_Addr val,
> - int sign, int bits, int shift)
> + int sign, int bits, int shift,
> + void *(*write)(void *dest, const void *src, size_t len))
> {
> unsigned long umax;
> long min, max;
> + void *dest = (void *)loc;
>
> if (val & ((1UL << shift) - 1))
> return -ENOEXEC;
> @@ -194,26 +197,33 @@ static int apply_rela_bits(Elf_Addr loc, Elf_Addr val,
> return -ENOEXEC;
> }
>
> - if (bits == 8)
> - *(unsigned char *) loc = val;
> - else if (bits == 12)
> - *(unsigned short *) loc = (val & 0xfff) |
> + if (bits == 8) {
> + unsigned char tmp = val;
> + write(dest, &tmp, 1);
> + } else if (bits == 12) {
> + unsigned short tmp = (val & 0xfff) |
> (*(unsigned short *) loc & 0xf000);
> - else if (bits == 16)
> - *(unsigned short *) loc = val;
> - else if (bits == 20)
> - *(unsigned int *) loc = (val & 0xfff) << 16 |
> - (val & 0xff000) >> 4 |
> - (*(unsigned int *) loc & 0xf00000ff);
> - else if (bits == 32)
> - *(unsigned int *) loc = val;
> - else if (bits == 64)
> - *(unsigned long *) loc = val;
> + write(dest, &tmp, 2);
> + } else if (bits == 16) {
> + unsigned short tmp = val;
> + write(dest, &tmp, 2);
> + } else if (bits == 20) {
> + unsigned int tmp = (val & 0xfff) << 16 |
> + (val & 0xff000) >> 4 | (*(unsigned int *) loc & 0xf00000ff);
> + write(dest, &tmp, 4);
> + } else if (bits == 32) {
> + unsigned int tmp = val;
> + write(dest, &tmp, 4);
> + } else if (bits == 64) {
> + unsigned long tmp = val;
> + write(dest, &tmp, 8);
> + }
> return 0;
> }
>
> static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
> - const char *strtab, struct module *me)
> + const char *strtab, struct module *me,
> + void *(*write)(void *dest, const void *src, size_t len))
> {
> struct mod_arch_syminfo *info;
> Elf_Addr loc, val;
> @@ -241,17 +251,17 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
> case R_390_64: /* Direct 64 bit. */
> val += rela->r_addend;
> if (r_type == R_390_8)
> - rc = apply_rela_bits(loc, val, 0, 8, 0);
> + rc = apply_rela_bits(loc, val, 0, 8, 0, write);
> else if (r_type == R_390_12)
> - rc = apply_rela_bits(loc, val, 0, 12, 0);
> + rc = apply_rela_bits(loc, val, 0, 12, 0, write);
> else if (r_type == R_390_16)
> - rc = apply_rela_bits(loc, val, 0, 16, 0);
> + rc = apply_rela_bits(loc, val, 0, 16, 0, write);
> else if (r_type == R_390_20)
> - rc = apply_rela_bits(loc, val, 1, 20, 0);
> + rc = apply_rela_bits(loc, val, 1, 20, 0, write);
> else if (r_type == R_390_32)
> - rc = apply_rela_bits(loc, val, 0, 32, 0);
> + rc = apply_rela_bits(loc, val, 0, 32, 0, write);
> else if (r_type == R_390_64)
> - rc = apply_rela_bits(loc, val, 0, 64, 0);
> + rc = apply_rela_bits(loc, val, 0, 64, 0, write);
> break;
> case R_390_PC16: /* PC relative 16 bit. */
> case R_390_PC16DBL: /* PC relative 16 bit shifted by 1. */
> @@ -260,15 +270,15 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
> case R_390_PC64: /* PC relative 64 bit. */
> val += rela->r_addend - loc;
> if (r_type == R_390_PC16)
> - rc = apply_rela_bits(loc, val, 1, 16, 0);
> + rc = apply_rela_bits(loc, val, 1, 16, 0, write);
> else if (r_type == R_390_PC16DBL)
> - rc = apply_rela_bits(loc, val, 1, 16, 1);
> + rc = apply_rela_bits(loc, val, 1, 16, 1, write);
> else if (r_type == R_390_PC32DBL)
> - rc = apply_rela_bits(loc, val, 1, 32, 1);
> + rc = apply_rela_bits(loc, val, 1, 32, 1, write);
> else if (r_type == R_390_PC32)
> - rc = apply_rela_bits(loc, val, 1, 32, 0);
> + rc = apply_rela_bits(loc, val, 1, 32, 0, write);
> else if (r_type == R_390_PC64)
> - rc = apply_rela_bits(loc, val, 1, 64, 0);
> + rc = apply_rela_bits(loc, val, 1, 64, 0, write);
> break;
> case R_390_GOT12: /* 12 bit GOT offset. */
> case R_390_GOT16: /* 16 bit GOT offset. */
> @@ -283,33 +293,33 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
> case R_390_GOTPLT64: /* 64 bit offset to jump slot. */
> case R_390_GOTPLTENT: /* 32 bit rel. offset to jump slot >> 1. */
> if (info->got_initialized == 0) {
> - Elf_Addr *gotent;
> + Elf_Addr *gotent = me->core_layout.base +
> + me->arch.got_offset +
> + info->got_offset;
>
> - gotent = me->core_layout.base + me->arch.got_offset +
> - info->got_offset;
> - *gotent = val;
> + write(gotent, &val, sizeof(*gotent));
> info->got_initialized = 1;
> }
> val = info->got_offset + rela->r_addend;
> if (r_type == R_390_GOT12 ||
> r_type == R_390_GOTPLT12)
> - rc = apply_rela_bits(loc, val, 0, 12, 0);
> + rc = apply_rela_bits(loc, val, 0, 12, 0, write);
> else if (r_type == R_390_GOT16 ||
> r_type == R_390_GOTPLT16)
> - rc = apply_rela_bits(loc, val, 0, 16, 0);
> + rc = apply_rela_bits(loc, val, 0, 16, 0, write);
> else if (r_type == R_390_GOT20 ||
> r_type == R_390_GOTPLT20)
> - rc = apply_rela_bits(loc, val, 1, 20, 0);
> + rc = apply_rela_bits(loc, val, 1, 20, 0, write);
> else if (r_type == R_390_GOT32 ||
> r_type == R_390_GOTPLT32)
> - rc = apply_rela_bits(loc, val, 0, 32, 0);
> + rc = apply_rela_bits(loc, val, 0, 32, 0, write);
> else if (r_type == R_390_GOT64 ||
> r_type == R_390_GOTPLT64)
> - rc = apply_rela_bits(loc, val, 0, 64, 0);
> + rc = apply_rela_bits(loc, val, 0, 64, 0, write);
> else if (r_type == R_390_GOTENT ||
> r_type == R_390_GOTPLTENT) {
> val += (Elf_Addr) me->core_layout.base - loc;
> - rc = apply_rela_bits(loc, val, 1, 32, 1);
> + rc = apply_rela_bits(loc, val, 1, 32, 1, write);
> }
> break;
> case R_390_PLT16DBL: /* 16 bit PC rel. PLT shifted by 1. */
> @@ -320,25 +330,29 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
> case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */
> case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
> if (info->plt_initialized == 0) {
> - unsigned int *ip;
> - ip = me->core_layout.base + me->arch.plt_offset +
> - info->plt_offset;
> - ip[0] = 0x0d10e310; /* basr 1,0 */
> - ip[1] = 0x100a0004; /* lg 1,10(1) */
> + unsigned int insn[5];
> + unsigned int *ip = me->core_layout.base +
> + me->arch.plt_offset +
> + info->plt_offset;
> +
> + insn[0] = 0x0d10e310; /* basr 1,0 */
> + insn[1] = 0x100a0004; /* lg 1,10(1) */
> if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) {
> unsigned int *ij;
> ij = me->core_layout.base +
> me->arch.plt_offset +
> me->arch.plt_size - PLT_ENTRY_SIZE;
> - ip[2] = 0xa7f40000 + /* j __jump_r1 */
> + insn[2] = 0xa7f40000 + /* j __jump_r1 */
> (unsigned int)(u16)
> (((unsigned long) ij - 8 -
> (unsigned long) ip) / 2);
> } else {
> - ip[2] = 0x07f10000; /* br %r1 */
> + insn[2] = 0x07f10000; /* br %r1 */
> }
> - ip[3] = (unsigned int) (val >> 32);
> - ip[4] = (unsigned int) val;
> + insn[3] = (unsigned int) (val >> 32);
> + insn[4] = (unsigned int) val;
> +
> + write(ip, insn, sizeof(insn));
> info->plt_initialized = 1;
> }
> if (r_type == R_390_PLTOFF16 ||
> @@ -357,17 +371,17 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
> val += rela->r_addend - loc;
> }
> if (r_type == R_390_PLT16DBL)
> - rc = apply_rela_bits(loc, val, 1, 16, 1);
> + rc = apply_rela_bits(loc, val, 1, 16, 1, write);
> else if (r_type == R_390_PLTOFF16)
> - rc = apply_rela_bits(loc, val, 0, 16, 0);
> + rc = apply_rela_bits(loc, val, 0, 16, 0, write);
> else if (r_type == R_390_PLT32DBL)
> - rc = apply_rela_bits(loc, val, 1, 32, 1);
> + rc = apply_rela_bits(loc, val, 1, 32, 1, write);
> else if (r_type == R_390_PLT32 ||
> r_type == R_390_PLTOFF32)
> - rc = apply_rela_bits(loc, val, 0, 32, 0);
> + rc = apply_rela_bits(loc, val, 0, 32, 0, write);
> else if (r_type == R_390_PLT64 ||
> r_type == R_390_PLTOFF64)
> - rc = apply_rela_bits(loc, val, 0, 64, 0);
> + rc = apply_rela_bits(loc, val, 0, 64, 0, write);
> break;
> case R_390_GOTOFF16: /* 16 bit offset to GOT. */
> case R_390_GOTOFF32: /* 32 bit offset to GOT. */
> @@ -375,20 +389,20 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
> val = val + rela->r_addend -
> ((Elf_Addr) me->core_layout.base + me->arch.got_offset);
> if (r_type == R_390_GOTOFF16)
> - rc = apply_rela_bits(loc, val, 0, 16, 0);
> + rc = apply_rela_bits(loc, val, 0, 16, 0, write);
> else if (r_type == R_390_GOTOFF32)
> - rc = apply_rela_bits(loc, val, 0, 32, 0);
> + rc = apply_rela_bits(loc, val, 0, 32, 0, write);
> else if (r_type == R_390_GOTOFF64)
> - rc = apply_rela_bits(loc, val, 0, 64, 0);
> + rc = apply_rela_bits(loc, val, 0, 64, 0, write);
> break;
> case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
> case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
> val = (Elf_Addr) me->core_layout.base + me->arch.got_offset +
> rela->r_addend - loc;
> if (r_type == R_390_GOTPC)
> - rc = apply_rela_bits(loc, val, 1, 32, 0);
> + rc = apply_rela_bits(loc, val, 1, 32, 0, write);
> else if (r_type == R_390_GOTPCDBL)
> - rc = apply_rela_bits(loc, val, 1, 32, 1);
> + rc = apply_rela_bits(loc, val, 1, 32, 1, write);
> break;
> case R_390_COPY:
> case R_390_GLOB_DAT: /* Create GOT entry. */
> @@ -412,9 +426,10 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
> return 0;
> }
>
> -int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
> +static int __apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
> unsigned int symindex, unsigned int relsec,
> - struct module *me)
> + struct module *me,
> + void *(*write)(void *dest, const void *src, size_t len))
> {
> Elf_Addr base;
> Elf_Sym *symtab;
> @@ -430,13 +445,27 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
> n = sechdrs[relsec].sh_size / sizeof(Elf_Rela);
>
> for (i = 0; i < n; i++, rela++) {
> - rc = apply_rela(rela, base, symtab, strtab, me);
> + rc = apply_rela(rela, base, symtab, strtab, me, write);
> if (rc)
> return rc;
> }
> return 0;
> }
>
> +int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
> + unsigned int symindex, unsigned int relsec,
> + struct module *me)
> +{
> + bool early = me->state == MODULE_STATE_UNFORMED;
> + void *(*write)(void *, const void *, size_t) = memcpy;
> +
> + if (!early)
> + write = s390_kernel_write;
> +
> + return __apply_relocate_add(sechdrs, strtab, symindex, relsec, me,
> + write);
> +}
> +
> int module_finalize(const Elf_Ehdr *hdr,
> const Elf_Shdr *sechdrs,
> struct module *me)
> --
> 2.21.1
>

--
Josh

2020-04-27 12:26:21

by Joe Lawrence

[permalink] [raw]
Subject: Re: [PATCH v3 00/10] livepatch,module: Remove .klp.arch and module_disable_ro()

On 4/25/20 7:07 AM, Josh Poimboeuf wrote:
> v3:
> - klp: split klp_write_relocations() into object/section specific
> functions [joe]
> - s390: fix plt/got writes [joe]
> - s390: remove text_mutex usage [mbenes]
> - x86: do text_poke_sync() before releasing text_mutex [peterz]
> - split x86 text_mutex changes into separate patch [mbenes]
>
> v2:
> - add vmlinux.ko check [peterz]
> - remove 'klp_object' forward declaration [mbenes]
> - use text_mutex [jeyu]
> - fix documentation TOC [jeyu]
> - fix s390 issues [mbenes]
> - upstream kpatch-build now supports this
> (though it's only enabled for Linux >= 5.8)
>
> These patches add simplifications and improvements for some issues Peter
> found six months ago, as part of his non-writable text code (W^X)
> cleanups.
>
> Highlights:
>
> - Remove the livepatch arch-specific .klp.arch sections, which were used
> to do paravirt patching and alternatives patching for livepatch
> replacement code.
>
> - Add support for jump labels in patched code.
>
> - Remove the last module_disable_ro() usage.
>
> For more background, see this thread:
>
> https://lkml.kernel.org/r/20191021135312.jbbxsuipxldocdjk@treble
>
> This has been tested with kpatch-build integration tests and klp-convert
> selftests.
>

Hi Josh,

I've added some late module patching tests for klp-convert as well as
extended the existing ones. I'll put them on-top of v3 and give it some
test runs today (x86, ppc64le, s390x) and report back.

BTW, this may be out of scope for this patchset, but is it a large
amount of work to support clearing klp-relocations on target module
unload? ie, this test case:

- (target module and livepatch loaded)
- rmmod target_mod
- modprobe target_mod << fails as reloc target is non-zero

IIRC, Miroslav had taken a stab at this last year, but I don't remember
what the technical problems were then.

-- Joe

2020-04-27 12:34:26

by Miroslav Benes

[permalink] [raw]
Subject: Re: [PATCH v3 00/10] livepatch,module: Remove .klp.arch and module_disable_ro()

> BTW, this may be out of scope for this patchset, but is it a large amount of
> work to support clearing klp-relocations on target module unload? ie, this
> test case:
>
> - (target module and livepatch loaded)
> - rmmod target_mod
> - modprobe target_mod << fails as reloc target is non-zero
>
> IIRC, Miroslav had taken a stab at this last year, but I don't remember what
> the technical problems were then.

Yes, see
https://lore.kernel.org/live-patching/[email protected]/

I think the "conclusion" there still applies... but yes, it makes testing
difficult.

Miroslav

2020-04-27 16:56:33

by Joe Lawrence

[permalink] [raw]
Subject: Re: [PATCH v3 00/10] livepatch,module: Remove .klp.arch and module_disable_ro()

On Mon, Apr 27, 2020 at 08:22:03AM -0400, Joe Lawrence wrote:
> On 4/25/20 7:07 AM, Josh Poimboeuf wrote:
> > v3:
> > - klp: split klp_write_relocations() into object/section specific
> > functions [joe]
> > - s390: fix plt/got writes [joe]
> > - s390: remove text_mutex usage [mbenes]
> > - x86: do text_poke_sync() before releasing text_mutex [peterz]
> > - split x86 text_mutex changes into separate patch [mbenes]
> >
> > v2:
> > - add vmlinux.ko check [peterz]
> > - remove 'klp_object' forward declaration [mbenes]
> > - use text_mutex [jeyu]
> > - fix documentation TOC [jeyu]
> > - fix s390 issues [mbenes]
> > - upstream kpatch-build now supports this
> > (though it's only enabled for Linux >= 5.8)
> >
> > These patches add simplifications and improvements for some issues Peter
> > found six months ago, as part of his non-writable text code (W^X)
> > cleanups.
> >
> > Highlights:
> >
> > - Remove the livepatch arch-specific .klp.arch sections, which were used
> > to do paravirt patching and alternatives patching for livepatch
> > replacement code.
> >
> > - Add support for jump labels in patched code.

Nit: support for the hopefully common cases anyway: jump labels whose
keys are not going to require late-module klp-relocation (defined in an
unloaded module).

> >
> > - Remove the last module_disable_ro() usage.
> >
> > For more background, see this thread:
> >
> > https://lkml.kernel.org/r/20191021135312.jbbxsuipxldocdjk@treble
> >
> > This has been tested with kpatch-build integration tests and klp-convert
> > selftests.
> >
>
> Hi Josh,
>
> I've added some late module patching tests for klp-convert as well as
> extended the existing ones. I'll put them on-top of v3 and give it some
> test runs today (x86, ppc64le, s390x) and report back.

Ok all three arches look good with the klp-convert WIP tests. For
reference, I pushed the combined branch up to github [1] if anyone else
wants to see the klp-relocations in action first hand.

For the series:
Acked-by: Joe Lawrence <[email protected]>


[1] https://github.com/joe-lawrence/linux/tree/jp-v3-klp-convert

-- Joe

2020-04-28 09:22:12

by Miroslav Benes

[permalink] [raw]
Subject: Re: [PATCH v3 02/10] livepatch: Apply vmlinux-specific KLP relocations early

> @@ -738,18 +765,23 @@ static int klp_init_object_loaded(struct klp_patch *patch,
> int ret;
>
> mutex_lock(&text_mutex);
> -
> module_disable_ro(patch->mod);
> - ret = klp_write_object_relocations(patch->mod, obj);
> - if (ret) {
> - module_enable_ro(patch->mod, true);
> - mutex_unlock(&text_mutex);
> - return ret;
> +
> + if (klp_is_module(obj)) {
> + /*
> + * Only write module-specific relocations here
> + * (.klp.rela.{module}.*). vmlinux-specific relocations were
> + * written earlier during the initialization of the klp module
> + * itself.
> + */
> + ret = klp_apply_object_relocs(patch, obj);
> + if (ret)

+ module_enable_ro(patch->mod, true);
+ mutex_unlock(&text_mutex);

is missing here, I think. Probably lost during rebase. It is fine after
the next patch.

> + return ret;
> }
>
> arch_klp_init_object_loaded(patch, obj);
> - module_enable_ro(patch->mod, true);
>
> + module_enable_ro(patch->mod, true);
> mutex_unlock(&text_mutex);

Miroslav

2020-04-28 13:51:01

by Miroslav Benes

[permalink] [raw]
Subject: Re: [PATCH v3 00/10] livepatch,module: Remove .klp.arch and module_disable_ro()

On Sat, 25 Apr 2020, Josh Poimboeuf wrote:

> v3:
> - klp: split klp_write_relocations() into object/section specific
> functions [joe]
> - s390: fix plt/got writes [joe]
> - s390: remove text_mutex usage [mbenes]
> - x86: do text_poke_sync() before releasing text_mutex [peterz]
> - split x86 text_mutex changes into separate patch [mbenes]
>
> v2:
> - add vmlinux.ko check [peterz]
> - remove 'klp_object' forward declaration [mbenes]
> - use text_mutex [jeyu]
> - fix documentation TOC [jeyu]
> - fix s390 issues [mbenes]
> - upstream kpatch-build now supports this
> (though it's only enabled for Linux >= 5.8)
>
> These patches add simplifications and improvements for some issues Peter
> found six months ago, as part of his non-writable text code (W^X)
> cleanups.
>
> Highlights:
>
> - Remove the livepatch arch-specific .klp.arch sections, which were used
> to do paravirt patching and alternatives patching for livepatch
> replacement code.
>
> - Add support for jump labels in patched code.
>
> - Remove the last module_disable_ro() usage.
>
> For more background, see this thread:
>
> https://lkml.kernel.org/r/20191021135312.jbbxsuipxldocdjk@treble
>
> This has been tested with kpatch-build integration tests and klp-convert
> selftests.
>
> Josh Poimboeuf (7):
> livepatch: Disallow vmlinux.ko
> livepatch: Apply vmlinux-specific KLP relocations early
> livepatch: Prevent module-specific KLP rela sections from referencing
> vmlinux symbols
> s390: Change s390_kernel_write() return type to match memcpy()
> livepatch: Remove module_disable_ro() usage
> module: Remove module_disable_ro()
> x86/module: Use text_mutex in apply_relocate_add()
>
> Peter Zijlstra (3):
> livepatch: Remove .klp.arch
> s390/module: Use s390_kernel_write() for late relocations
> x86/module: Use text_poke() for late relocations
>
> Documentation/livepatch/module-elf-format.rst | 15 +-
> arch/s390/include/asm/uaccess.h | 2 +-
> arch/s390/kernel/module.c | 147 +++++++++------
> arch/s390/mm/maccess.c | 9 +-
> arch/um/kernel/um_arch.c | 16 ++
> arch/x86/kernel/Makefile | 1 -
> arch/x86/kernel/livepatch.c | 53 ------
> arch/x86/kernel/module.c | 43 ++++-
> include/linux/livepatch.h | 17 +-
> include/linux/module.h | 2 -
> kernel/livepatch/core.c | 177 +++++++++++-------
> kernel/module.c | 23 +--
> 12 files changed, 277 insertions(+), 228 deletions(-)
> delete mode 100644 arch/x86/kernel/livepatch.c

With the small issue in patch 2 fixed

Acked-by: Miroslav Benes <[email protected]>

Great stuff. I am happy we will get rid of the arch-specific code.

M

2020-04-28 14:39:34

by Josh Poimboeuf

[permalink] [raw]
Subject: Re: [PATCH v3 00/10] livepatch,module: Remove .klp.arch and module_disable_ro()

On Tue, Apr 28, 2020 at 03:48:58PM +0200, Miroslav Benes wrote:
>
> With the small issue in patch 2 fixed
>
> Acked-by: Miroslav Benes <[email protected]>
>
> Great stuff. I am happy we will get rid of the arch-specific code.

Thanks! Good catch with the bad rebase. I'll fix it up with another
revision.

--
Josh

2020-04-28 16:27:36

by Jessica Yu

[permalink] [raw]
Subject: Re: [PATCH v3 09/10] module: Remove module_disable_ro()

+++ Josh Poimboeuf [25/04/20 06:07 -0500]:
>module_disable_ro() has no more users. Remove it.
>
>Signed-off-by: Josh Poimboeuf <[email protected]>
>Acked-by: Peter Zijlstra (Intel) <[email protected]>

Hm, I guess this means we can also remove the module_enable_ro() stubs
in module.h and make it a static function again (like the other
module_enable_* functions) as there are no more outside users. I have to
remind myself after this patchset is merged :-)

Jessica

2020-04-28 16:40:13

by Josh Poimboeuf

[permalink] [raw]
Subject: Re: [PATCH v3 09/10] module: Remove module_disable_ro()

On Tue, Apr 28, 2020 at 06:25:05PM +0200, Jessica Yu wrote:
> +++ Josh Poimboeuf [25/04/20 06:07 -0500]:
> > module_disable_ro() has no more users. Remove it.
> >
> > Signed-off-by: Josh Poimboeuf <[email protected]>
> > Acked-by: Peter Zijlstra (Intel) <[email protected]>
>
> Hm, I guess this means we can also remove the module_enable_ro() stubs
> in module.h and make it a static function again (like the other
> module_enable_* functions) as there are no more outside users. I have to
> remind myself after this patchset is merged :-)

Ah, true. I'm respinning the patch set anyway, I can just add this as a
another patch.

--
Josh

2020-04-28 16:43:49

by Jessica Yu

[permalink] [raw]
Subject: Re: [PATCH v3 09/10] module: Remove module_disable_ro()

+++ Josh Poimboeuf [28/04/20 11:36 -0500]:
>On Tue, Apr 28, 2020 at 06:25:05PM +0200, Jessica Yu wrote:
>> +++ Josh Poimboeuf [25/04/20 06:07 -0500]:
>> > module_disable_ro() has no more users. Remove it.
>> >
>> > Signed-off-by: Josh Poimboeuf <[email protected]>
>> > Acked-by: Peter Zijlstra (Intel) <[email protected]>
>>
>> Hm, I guess this means we can also remove the module_enable_ro() stubs
>> in module.h and make it a static function again (like the other
>> module_enable_* functions) as there are no more outside users. I have to
>> remind myself after this patchset is merged :-)
>
>Ah, true. I'm respinning the patch set anyway, I can just add this as a
>another patch.

That would be great. Thanks!

2020-04-28 17:07:59

by Josh Poimboeuf

[permalink] [raw]
Subject: Re: [PATCH v3 09/10] module: Remove module_disable_ro()

On Tue, Apr 28, 2020 at 06:41:55PM +0200, Jessica Yu wrote:
> +++ Josh Poimboeuf [28/04/20 11:36 -0500]:
> > On Tue, Apr 28, 2020 at 06:25:05PM +0200, Jessica Yu wrote:
> > > +++ Josh Poimboeuf [25/04/20 06:07 -0500]:
> > > > module_disable_ro() has no more users. Remove it.
> > > >
> > > > Signed-off-by: Josh Poimboeuf <[email protected]>
> > > > Acked-by: Peter Zijlstra (Intel) <[email protected]>
> > >
> > > Hm, I guess this means we can also remove the module_enable_ro() stubs
> > > in module.h and make it a static function again (like the other
> > > module_enable_* functions) as there are no more outside users. I have to
> > > remind myself after this patchset is merged :-)
> >
> > Ah, true. I'm respinning the patch set anyway, I can just add this as a
> > another patch.
>
> That would be great. Thanks!

Sneak preview:

From: Josh Poimboeuf <[email protected]>
Subject: [PATCH] module: Make module_enable_ro() static again

Now that module_enable_ro() has no more external users, make it static
again.

Suggested-by: Jessica Yu <[email protected]>
Signed-off-by: Josh Poimboeuf <[email protected]>
---
include/linux/module.h | 6 ------
kernel/module.c | 4 ++--
2 files changed, 2 insertions(+), 8 deletions(-)

diff --git a/include/linux/module.h b/include/linux/module.h
index e4ef7b36feda..2c2e988bcf10 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -858,12 +858,6 @@ extern int module_sysfs_initialized;

#define __MODULE_STRING(x) __stringify(x)

-#ifdef CONFIG_STRICT_MODULE_RWX
-extern void module_enable_ro(const struct module *mod, bool after_init);
-#else
-static inline void module_enable_ro(const struct module *mod, bool after_init) { }
-#endif
-
#ifdef CONFIG_GENERIC_BUG
void module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *,
struct module *);
diff --git a/kernel/module.c b/kernel/module.c
index f0e414a01d91..6d8aab60943e 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2016,7 +2016,7 @@ static void frob_writable_data(const struct module_layout *layout,
(layout->size - layout->ro_after_init_size) >> PAGE_SHIFT);
}

-void module_enable_ro(const struct module *mod, bool after_init)
+static void module_enable_ro(const struct module *mod, bool after_init)
{
if (!rodata_enabled)
return;
@@ -2057,7 +2057,7 @@ static int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
}

#else /* !CONFIG_STRICT_MODULE_RWX */
-/* module_{enable,disable}_ro() stubs are in module.h */
+void module_enable_ro(const struct module *mod, bool after_init) {}
static void module_enable_nx(const struct module *mod) { }
static int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
char *secstrings, struct module *mod)
--
2.21.1

2020-04-28 18:58:29

by Jessica Yu

[permalink] [raw]
Subject: Re: [PATCH v3 09/10] module: Remove module_disable_ro()

+++ Josh Poimboeuf [28/04/20 12:03 -0500]:
>On Tue, Apr 28, 2020 at 06:41:55PM +0200, Jessica Yu wrote:
>> +++ Josh Poimboeuf [28/04/20 11:36 -0500]:
>> > On Tue, Apr 28, 2020 at 06:25:05PM +0200, Jessica Yu wrote:
>> > > +++ Josh Poimboeuf [25/04/20 06:07 -0500]:
>> > > > module_disable_ro() has no more users. Remove it.
>> > > >
>> > > > Signed-off-by: Josh Poimboeuf <[email protected]>
>> > > > Acked-by: Peter Zijlstra (Intel) <[email protected]>
>> > >
>> > > Hm, I guess this means we can also remove the module_enable_ro() stubs
>> > > in module.h and make it a static function again (like the other
>> > > module_enable_* functions) as there are no more outside users. I have to
>> > > remind myself after this patchset is merged :-)
>> >
>> > Ah, true. I'm respinning the patch set anyway, I can just add this as a
>> > another patch.
>>
>> That would be great. Thanks!
>
>Sneak preview:
>
>From: Josh Poimboeuf <[email protected]>
>Subject: [PATCH] module: Make module_enable_ro() static again
>
>Now that module_enable_ro() has no more external users, make it static
>again.
>
>Suggested-by: Jessica Yu <[email protected]>
>Signed-off-by: Josh Poimboeuf <[email protected]>
>---
> include/linux/module.h | 6 ------
> kernel/module.c | 4 ++--
> 2 files changed, 2 insertions(+), 8 deletions(-)
>
>diff --git a/include/linux/module.h b/include/linux/module.h
>index e4ef7b36feda..2c2e988bcf10 100644
>--- a/include/linux/module.h
>+++ b/include/linux/module.h
>@@ -858,12 +858,6 @@ extern int module_sysfs_initialized;
>
> #define __MODULE_STRING(x) __stringify(x)
>
>-#ifdef CONFIG_STRICT_MODULE_RWX
>-extern void module_enable_ro(const struct module *mod, bool after_init);
>-#else
>-static inline void module_enable_ro(const struct module *mod, bool after_init) { }
>-#endif
>-
> #ifdef CONFIG_GENERIC_BUG
> void module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *,
> struct module *);
>diff --git a/kernel/module.c b/kernel/module.c
>index f0e414a01d91..6d8aab60943e 100644
>--- a/kernel/module.c
>+++ b/kernel/module.c
>@@ -2016,7 +2016,7 @@ static void frob_writable_data(const struct module_layout *layout,
> (layout->size - layout->ro_after_init_size) >> PAGE_SHIFT);
> }
>
>-void module_enable_ro(const struct module *mod, bool after_init)
>+static void module_enable_ro(const struct module *mod, bool after_init)
> {
> if (!rodata_enabled)
> return;
>@@ -2057,7 +2057,7 @@ static int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
> }
>
> #else /* !CONFIG_STRICT_MODULE_RWX */
>-/* module_{enable,disable}_ro() stubs are in module.h */
>+void module_enable_ro(const struct module *mod, bool after_init) {}

Missing static here, but otherwise looks good. Thanks!

> static void module_enable_nx(const struct module *mod) { }
> static int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
> char *secstrings, struct module *mod)
>--
>2.21.1
>