Hi,
It seems the .static_call_sites section was not being marked as
ro-after-init in modules. Adjust the macro names, add comments, refactor
the module section list, and fix .static_call_sites.
Thanks!
-Kees
Kees Cook (4):
vmlinux.lds.h: Use regular *RODATA and *RO_AFTER_INIT_DATA suffixes
vmlinux.lds.h: Split .static_call_sites from .static_call_tramp_key
module: Use a list of strings for ro_after_init sections
module: Include .static_call_sites in module ro_after_init
arch/s390/kernel/vmlinux.lds.S | 2 +-
include/asm-generic/vmlinux.lds.h | 22 +++++++++++++++-------
kernel/module.c | 29 +++++++++++++++++------------
3 files changed, 33 insertions(+), 20 deletions(-)
--
2.30.2
Rename the various section macros that live in RODATA and
RO_AFTER_INIT_DATA. Just being called "DATA" implies they are expected
to be writable.
Cc: Arnd Bergmann <[email protected]>
Cc: Josh Poimboeuf <[email protected]>
Cc: Peter Zijlstra (Intel) <[email protected]>
Cc: [email protected]
Signed-off-by: Kees Cook <[email protected]>
---
arch/s390/kernel/vmlinux.lds.S | 2 +-
include/asm-generic/vmlinux.lds.h | 12 ++++++------
2 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 63bdb9e1bfc1..93bc74c2a71b 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -64,7 +64,7 @@ SECTIONS
__start_ro_after_init = .;
.data..ro_after_init : {
*(.data..ro_after_init)
- JUMP_TABLE_DATA
+ JUMP_TABLE_RO_AFTER_INIT_DATA
} :data
EXCEPTION_TABLE(16)
. = ALIGN(PAGE_SIZE);
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 62669b36a772..70c74fdf9c9b 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -128,7 +128,7 @@
* used to determine the order of the priority of each sched class in
* relation to each other.
*/
-#define SCHED_DATA \
+#define SCHED_RODATA \
STRUCT_ALIGN(); \
__begin_sched_classes = .; \
*(__idle_sched_class) \
@@ -396,13 +396,13 @@
. = __start_init_task + THREAD_SIZE; \
__end_init_task = .;
-#define JUMP_TABLE_DATA \
+#define JUMP_TABLE_RO_AFTER_INIT_DATA \
. = ALIGN(8); \
__start___jump_table = .; \
KEEP(*(__jump_table)) \
__stop___jump_table = .;
-#define STATIC_CALL_DATA \
+#define STATIC_CALL_RO_AFTER_INIT_DATA \
. = ALIGN(8); \
__start_static_call_sites = .; \
KEEP(*(.static_call_sites)) \
@@ -420,8 +420,8 @@
. = ALIGN(8); \
__start_ro_after_init = .; \
*(.data..ro_after_init) \
- JUMP_TABLE_DATA \
- STATIC_CALL_DATA \
+ JUMP_TABLE_RO_AFTER_INIT_DATA \
+ STATIC_CALL_RO_AFTER_INIT_DATA \
__end_ro_after_init = .;
#endif
@@ -433,7 +433,7 @@
.rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
__start_rodata = .; \
*(.rodata) *(.rodata.*) \
- SCHED_DATA \
+ SCHED_RODATA \
RO_AFTER_INIT_DATA /* Read only after init */ \
. = ALIGN(8); \
__start___tracepoints_ptrs = .; \
--
2.30.2
Instead of open-coding the section names, use a list for the sections that
need to be marked read-only after init. Unfortunately, it seems we can't
do normal section merging with scripts/module.lds.S as ld.bfd doesn't
correctly update symbol tables. For more details, see commit 6a3193cdd5e5
("kbuild: lto: Merge module sections if and only if CONFIG_LTO_CLANG
is enabled").
Cc: Arnd Bergmann <[email protected]>
Cc: Jessica Yu <[email protected]>
Cc: Josh Poimboeuf <[email protected]>
Cc: Peter Zijlstra (Intel) <[email protected]>
Cc: [email protected]
Signed-off-by: Kees Cook <[email protected]>
---
include/asm-generic/vmlinux.lds.h | 4 +++-
kernel/module.c | 28 ++++++++++++++++------------
2 files changed, 19 insertions(+), 13 deletions(-)
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 4781a8154254..d532baadaeae 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -418,7 +418,9 @@
/*
* Allow architectures to handle ro_after_init data on their
- * own by defining an empty RO_AFTER_INIT_DATA.
+ * own by defining an empty RO_AFTER_INIT_DATA. Any sections
+ * added here must be explicitly marked SHF_RO_AFTER_INIT
+ * via module_sections_ro_after_init[] in kernel/module.c.
*/
#ifndef RO_AFTER_INIT_DATA
#define RO_AFTER_INIT_DATA \
diff --git a/kernel/module.c b/kernel/module.c
index ed13917ea5f3..b0ff82cc48fe 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -3514,10 +3514,21 @@ static bool blacklisted(const char *module_name)
}
core_param(module_blacklist, module_blacklist, charp, 0400);
+/*
+ * List of sections to be marked read-only after init. This should match
+ * the RO_AFTER_INIT_DATA macro in include/asm-generic/vmlinux.lds.h.
+ */
+static const char * const module_sections_ro_after_init[] = {
+ ".data..ro_after_init",
+ "__jump_table",
+ NULL
+};
+
static struct module *layout_and_allocate(struct load_info *info, int flags)
{
struct module *mod;
unsigned int ndx;
+ const char * const *section;
int err;
err = check_modinfo(info->mod, info, flags);
@@ -3543,18 +3554,11 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
* layout_sections() can put it in the right place.
* Note: ro_after_init sections also have SHF_{WRITE,ALLOC} set.
*/
- ndx = find_sec(info, ".data..ro_after_init");
- if (ndx)
- info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT;
- /*
- * Mark the __jump_table section as ro_after_init as well: these data
- * structures are never modified, with the exception of entries that
- * refer to code in the __init section, which are annotated as such
- * at module load time.
- */
- ndx = find_sec(info, "__jump_table");
- if (ndx)
- info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT;
+ for (section = module_sections_ro_after_init; *section; section++) {
+ ndx = find_sec(info, *section);
+ if (ndx)
+ info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT;
+ }
/*
* Determine total sizes, and put offsets in sh_entsize. For now
--
2.30.2
The RO_AFTER_INIT_DATA macro and module_sections_ro_after_init[] need
to be kept in sync.
Cc: Jessica Yu <[email protected]>
Cc: Josh Poimboeuf <[email protected]>
Cc: Peter Zijlstra (Intel) <[email protected]>
Fixes: 9183c3f9ed71 ("static_call: Add inline static call infrastructure")
Signed-off-by: Kees Cook <[email protected]>
---
kernel/module.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/kernel/module.c b/kernel/module.c
index b0ff82cc48fe..06410eb68dea 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -3521,6 +3521,7 @@ core_param(module_blacklist, module_blacklist, charp, 0400);
static const char * const module_sections_ro_after_init[] = {
".data..ro_after_init",
"__jump_table",
+ ".static_call_sites",
NULL
};
--
2.30.2
On Wed, Sep 01, 2021 at 04:37:54PM -0700, Kees Cook wrote:
> Rename the various section macros that live in RODATA and
> RO_AFTER_INIT_DATA. Just being called "DATA" implies they are expected
> to be writable.
>
> Cc: Arnd Bergmann <[email protected]>
> Cc: Josh Poimboeuf <[email protected]>
> Cc: Peter Zijlstra (Intel) <[email protected]>
> Cc: [email protected]
> Signed-off-by: Kees Cook <[email protected]>
Acked-by: Josh Poimboeuf <[email protected]>
--
Josh
On Wed, Sep 01, 2021 at 04:37:56PM -0700, Kees Cook wrote:
> Instead of open-coding the section names, use a list for the sections that
> need to be marked read-only after init. Unfortunately, it seems we can't
> do normal section merging with scripts/module.lds.S as ld.bfd doesn't
> correctly update symbol tables. For more details, see commit 6a3193cdd5e5
> ("kbuild: lto: Merge module sections if and only if CONFIG_LTO_CLANG
> is enabled").
I'm missing what this has to do with section merging. Can you connect
the dots here, i.e. what sections would we want to merge and how would
that help here?
Instead of hard-coding section names in module.c, I'm wondering if we
can do something like the following to set SHF_RO_AFTER_INIT when first
creating the sections. Completely untested...
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index 0449b125d27f..d4ff34c6199c 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -13,7 +13,7 @@
#include <linux/types.h>
#define JUMP_TABLE_ENTRY \
- ".pushsection __jump_table, \"aw\" \n\t" \
+ ".pushsection __jump_table, \"0x00200003\" \n\t"\
_ASM_ALIGN "\n\t" \
".long 1b - . \n\t" \
".long %l[l_yes] - . \n\t" \
diff --git a/kernel/module.c b/kernel/module.c
index 40ec9a030eec..1dda33c9ae49 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -3549,15 +3549,6 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
* Note: ro_after_init sections also have SHF_{WRITE,ALLOC} set.
*/
ndx = find_sec(info, ".data..ro_after_init");
- if (ndx)
- info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT;
- /*
- * Mark the __jump_table section as ro_after_init as well: these data
- * structures are never modified, with the exception of entries that
- * refer to code in the __init section, which are annotated as such
- * at module load time.
- */
- ndx = find_sec(info, "__jump_table");
if (ndx)
info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT;
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index e5947fbb9e7a..b25ca38179ea 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -20,6 +20,9 @@
#include <linux/kernel.h>
#include <linux/static_call_types.h>
+/* cribbed from include/uapi/linux/elf.h */
+#define SHF_RO_AFTER_INIT 0x00200000
+
struct alternative {
struct list_head list;
struct instruction *insn;
@@ -466,7 +469,8 @@ static int create_static_call_sections(struct objtool_file *file)
list_for_each_entry(insn, &file->static_call_list, call_node)
idx++;
- sec = elf_create_section(file->elf, ".static_call_sites", SHF_WRITE,
+ sec = elf_create_section(file->elf, ".static_call_sites",
+ SHF_WRITE | SHF_RO_AFTER_INIT,
sizeof(struct static_call_site), idx);
if (!sec)
return -1;
On Wed, Sep 01, 2021 at 04:37:54PM -0700, Kees Cook wrote:
> Rename the various section macros that live in RODATA and
> RO_AFTER_INIT_DATA. Just being called "DATA" implies they are expected
> to be writable.
>
> Cc: Arnd Bergmann <[email protected]>
> Cc: Josh Poimboeuf <[email protected]>
> Cc: Peter Zijlstra (Intel) <[email protected]>
> Cc: [email protected]
> Signed-off-by: Kees Cook <[email protected]>
> ---
> arch/s390/kernel/vmlinux.lds.S | 2 +-
> include/asm-generic/vmlinux.lds.h | 12 ++++++------
> 2 files changed, 7 insertions(+), 7 deletions(-)
For the s390 bit:
Acked-by: Heiko Carstens <[email protected]>
On Thu, Sep 02, 2021 at 11:49:51PM -0700, Josh Poimboeuf wrote:
> On Wed, Sep 01, 2021 at 04:37:56PM -0700, Kees Cook wrote:
> > Instead of open-coding the section names, use a list for the sections that
> > need to be marked read-only after init. Unfortunately, it seems we can't
> > do normal section merging with scripts/module.lds.S as ld.bfd doesn't
> > correctly update symbol tables. For more details, see commit 6a3193cdd5e5
> > ("kbuild: lto: Merge module sections if and only if CONFIG_LTO_CLANG
> > is enabled").
>
> I'm missing what this has to do with section merging. Can you connect
> the dots here, i.e. what sections would we want to merge and how would
> that help here?
Right, sorry, if ld.bfd didn't have this issue, we could use section
merging in the module.lds.S file the way we do in vmlinux.lds:
#ifndef RO_AFTER_INIT_DATA
#define RO_AFTER_INIT_DATA \
. = ALIGN(8); \
__start_ro_after_init = .; \
*(.data..ro_after_init) \
JUMP_TABLE_DATA \
STATIC_CALL_DATA \
__end_ro_after_init = .;
#endif
...
. = ALIGN((align)); \
.rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
__start_rodata = .; \
*(.rodata) *(.rodata.*) \
SCHED_DATA \
RO_AFTER_INIT_DATA /* Read only after init */ \
. = ALIGN(8); \
__start___tracepoints_ptrs = .; \
KEEP(*(__tracepoints_ptrs)) /* Tracepoints: pointer array */ \
__stop___tracepoints_ptrs = .; \
*(__tracepoints_strings)/* Tracepoints: strings */ \
} \
Then jump_table and static_call sections could be collected into a
new section, as the module loader would only need to look for that
single name.
> Instead of hard-coding section names in module.c, I'm wondering if we
> can do something like the following to set SHF_RO_AFTER_INIT when first
> creating the sections. Completely untested...
>
>
> diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
> index 0449b125d27f..d4ff34c6199c 100644
> --- a/arch/x86/include/asm/jump_label.h
> +++ b/arch/x86/include/asm/jump_label.h
> @@ -13,7 +13,7 @@
> #include <linux/types.h>
>
> #define JUMP_TABLE_ENTRY \
> - ".pushsection __jump_table, \"aw\" \n\t" \
> + ".pushsection __jump_table, \"0x00200003\" \n\t"\
> _ASM_ALIGN "\n\t" \
> ".long 1b - . \n\t" \
> ".long %l[l_yes] - . \n\t" \
> diff --git a/kernel/module.c b/kernel/module.c
> index 40ec9a030eec..1dda33c9ae49 100644
> --- a/kernel/module.c
> +++ b/kernel/module.c
> @@ -3549,15 +3549,6 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
> * Note: ro_after_init sections also have SHF_{WRITE,ALLOC} set.
> */
> ndx = find_sec(info, ".data..ro_after_init");
> - if (ndx)
> - info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT;
> - /*
> - * Mark the __jump_table section as ro_after_init as well: these data
> - * structures are never modified, with the exception of entries that
> - * refer to code in the __init section, which are annotated as such
> - * at module load time.
> - */
> - ndx = find_sec(info, "__jump_table");
> if (ndx)
> info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT;
>
> diff --git a/tools/objtool/check.c b/tools/objtool/check.c
> index e5947fbb9e7a..b25ca38179ea 100644
> --- a/tools/objtool/check.c
> +++ b/tools/objtool/check.c
> @@ -20,6 +20,9 @@
> #include <linux/kernel.h>
> #include <linux/static_call_types.h>
>
> +/* cribbed from include/uapi/linux/elf.h */
> +#define SHF_RO_AFTER_INIT 0x00200000
> +
> struct alternative {
> struct list_head list;
> struct instruction *insn;
> @@ -466,7 +469,8 @@ static int create_static_call_sections(struct objtool_file *file)
> list_for_each_entry(insn, &file->static_call_list, call_node)
> idx++;
>
> - sec = elf_create_section(file->elf, ".static_call_sites", SHF_WRITE,
> + sec = elf_create_section(file->elf, ".static_call_sites",
> + SHF_WRITE | SHF_RO_AFTER_INIT,
> sizeof(struct static_call_site), idx);
> if (!sec)
> return -1;
Interesting! I got the impression from the module code that this wasn't
possible since it'd be exposing an internal set of flags to the external
linker, and would break the vmlinux section merging (since it _is_
supposed to live in the .rodata section ultimately). The modules handle
permissions slightly differently (i.e. more exact temporal), than the
kernel though. (Most of the architecture's vmlinux logic starts with
everything writable, and only does the read-only-ness after __init,
though I think s390 does it "correctly" and as such has a separate area
for the ro-after-init section.)
--
Kees Cook
On Fri, Sep 03, 2021 at 09:38:42AM -0700, Kees Cook wrote:
> On Thu, Sep 02, 2021 at 11:49:51PM -0700, Josh Poimboeuf wrote:
> > On Wed, Sep 01, 2021 at 04:37:56PM -0700, Kees Cook wrote:
> > > Instead of open-coding the section names, use a list for the sections that
> > > need to be marked read-only after init. Unfortunately, it seems we can't
> > > do normal section merging with scripts/module.lds.S as ld.bfd doesn't
> > > correctly update symbol tables. For more details, see commit 6a3193cdd5e5
> > > ("kbuild: lto: Merge module sections if and only if CONFIG_LTO_CLANG
> > > is enabled").
> >
> > I'm missing what this has to do with section merging. Can you connect
> > the dots here, i.e. what sections would we want to merge and how would
> > that help here?
>
> Right, sorry, if ld.bfd didn't have this issue, we could use section
> merging in the module.lds.S file the way we do in vmlinux.lds:
>
> #ifndef RO_AFTER_INIT_DATA
> #define RO_AFTER_INIT_DATA \
> . = ALIGN(8); \
> __start_ro_after_init = .; \
> *(.data..ro_after_init) \
> JUMP_TABLE_DATA \
> STATIC_CALL_DATA \
> __end_ro_after_init = .;
> #endif
> ...
> . = ALIGN((align)); \
> .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
> __start_rodata = .; \
> *(.rodata) *(.rodata.*) \
> SCHED_DATA \
> RO_AFTER_INIT_DATA /* Read only after init */ \
> . = ALIGN(8); \
> __start___tracepoints_ptrs = .; \
> KEEP(*(__tracepoints_ptrs)) /* Tracepoints: pointer array */ \
> __stop___tracepoints_ptrs = .; \
> *(__tracepoints_strings)/* Tracepoints: strings */ \
> } \
>
> Then jump_table and static_call sections could be collected into a
> new section, as the module loader would only need to look for that
> single name.
Hm, that could be a really nice way to converge things for vmlinux and
module linking.
After some digging, 6a3193cdd5e5 isn't necessarily a linker bug. It may
be some kind of undefined behavior when the section address isn't
specified. If you just explicitly set the section address to zero then
the "bug" goes away.
diff --git a/scripts/module.lds.S b/scripts/module.lds.S
index 04c5685c25cf..80b09b7d405c 100644
--- a/scripts/module.lds.S
+++ b/scripts/module.lds.S
@@ -30,23 +30,22 @@ SECTIONS {
__patchable_function_entries : { *(__patchable_function_entries) }
-#ifdef CONFIG_LTO_CLANG
/*
* With CONFIG_LTO_CLANG, LLD always enables -fdata-sections and
* -ffunction-sections, which increases the size of the final module.
* Merge the split sections in the final binary.
*/
- .bss : {
+ .bss 0 : {
*(.bss .bss.[0-9a-zA-Z_]*)
*(.bss..L*)
}
- .data : {
+ .data 0 : {
*(.data .data.[0-9a-zA-Z_]*)
*(.data..L*)
}
- .rodata : {
+ .rodata 0 : {
*(.rodata .rodata.[0-9a-zA-Z_]*)
*(.rodata..L*)
}
@@ -55,11 +54,10 @@ SECTIONS {
* With CONFIG_CFI_CLANG, we assume __cfi_check is at the beginning
* of the .text section, and is aligned to PAGE_SIZE.
*/
- .text : ALIGN_CFI {
+ .text 0 : ALIGN_CFI {
*(.text.__cfi_check)
*(.text .text.[0-9a-zA-Z_]* .text..L.cfi*)
}
-#endif
}
/* bring in arch-specific sections */
On Fri, Sep 03, 2021 at 09:09:03PM -0700, Josh Poimboeuf wrote:
> On Fri, Sep 03, 2021 at 09:38:42AM -0700, Kees Cook wrote:
> > On Thu, Sep 02, 2021 at 11:49:51PM -0700, Josh Poimboeuf wrote:
> > > On Wed, Sep 01, 2021 at 04:37:56PM -0700, Kees Cook wrote:
> > > > Instead of open-coding the section names, use a list for the sections that
> > > > need to be marked read-only after init. Unfortunately, it seems we can't
> > > > do normal section merging with scripts/module.lds.S as ld.bfd doesn't
> > > > correctly update symbol tables. For more details, see commit 6a3193cdd5e5
> > > > ("kbuild: lto: Merge module sections if and only if CONFIG_LTO_CLANG
> > > > is enabled").
> > >
> > > I'm missing what this has to do with section merging. Can you connect
> > > the dots here, i.e. what sections would we want to merge and how would
> > > that help here?
> >
> > Right, sorry, if ld.bfd didn't have this issue, we could use section
> > merging in the module.lds.S file the way we do in vmlinux.lds:
> >
> > #ifndef RO_AFTER_INIT_DATA
> > #define RO_AFTER_INIT_DATA \
> > . = ALIGN(8); \
> > __start_ro_after_init = .; \
> > *(.data..ro_after_init) \
> > JUMP_TABLE_DATA \
> > STATIC_CALL_DATA \
> > __end_ro_after_init = .;
> > #endif
> > ...
> > . = ALIGN((align)); \
> > .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
> > __start_rodata = .; \
> > *(.rodata) *(.rodata.*) \
> > SCHED_DATA \
> > RO_AFTER_INIT_DATA /* Read only after init */ \
> > . = ALIGN(8); \
> > __start___tracepoints_ptrs = .; \
> > KEEP(*(__tracepoints_ptrs)) /* Tracepoints: pointer array */ \
> > __stop___tracepoints_ptrs = .; \
> > *(__tracepoints_strings)/* Tracepoints: strings */ \
> > } \
> >
> > Then jump_table and static_call sections could be collected into a
> > new section, as the module loader would only need to look for that
> > single name.
>
> Hm, that could be a really nice way to converge things for vmlinux and
> module linking.
Agreed! I had really wanted to do more of this, but was stumped by the
weird symbol behavior.
> After some digging, 6a3193cdd5e5 isn't necessarily a linker bug. It may
> be some kind of undefined behavior when the section address isn't
> specified. If you just explicitly set the section address to zero then
> the "bug" goes away.
Well that's a nice find! I'll play more with this to see if I can make a
cleaner solution.
Thanks!
-Kees
>
> diff --git a/scripts/module.lds.S b/scripts/module.lds.S
> index 04c5685c25cf..80b09b7d405c 100644
> --- a/scripts/module.lds.S
> +++ b/scripts/module.lds.S
> @@ -30,23 +30,22 @@ SECTIONS {
>
> __patchable_function_entries : { *(__patchable_function_entries) }
>
> -#ifdef CONFIG_LTO_CLANG
> /*
> * With CONFIG_LTO_CLANG, LLD always enables -fdata-sections and
> * -ffunction-sections, which increases the size of the final module.
> * Merge the split sections in the final binary.
> */
> - .bss : {
> + .bss 0 : {
> *(.bss .bss.[0-9a-zA-Z_]*)
> *(.bss..L*)
> }
>
> - .data : {
> + .data 0 : {
> *(.data .data.[0-9a-zA-Z_]*)
> *(.data..L*)
> }
>
> - .rodata : {
> + .rodata 0 : {
> *(.rodata .rodata.[0-9a-zA-Z_]*)
> *(.rodata..L*)
> }
> @@ -55,11 +54,10 @@ SECTIONS {
> * With CONFIG_CFI_CLANG, we assume __cfi_check is at the beginning
> * of the .text section, and is aligned to PAGE_SIZE.
> */
> - .text : ALIGN_CFI {
> + .text 0 : ALIGN_CFI {
> *(.text.__cfi_check)
> *(.text .text.[0-9a-zA-Z_]* .text..L.cfi*)
> }
> -#endif
> }
>
> /* bring in arch-specific sections */
>
--
Kees Cook