As is done for other architectures, sort the exception table at
build-time rather than during boot.
Since sortextable appears to be a standalone C program relying on the
host elf.h to provide EM_AARCH64, I've had to add a conditional check in
order to allow cross-compilation on machines that aren't running a
bleeding-edge libc-dev.
Signed-off-by: Will Deacon <[email protected]>
---
arch/arm64/Kconfig | 1 +
arch/arm64/kernel/vmlinux.lds.S | 15 +++++++--------
scripts/sortextable.c | 5 +++++
3 files changed, 13 insertions(+), 8 deletions(-)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 48347dc..fb8fd90 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -7,6 +7,7 @@ config ARM64
select ARM_AMBA
select ARM_ARCH_TIMER
select ARM_GIC
+ select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS
select COMMON_CLK
select GENERIC_CLOCKEVENTS
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 3fae2be..3d527726 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -57,6 +57,13 @@ SECTIONS
RO_DATA(PAGE_SIZE)
+ . = ALIGN(8);
+ __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
+ __start___ex_table = .;
+ *(__ex_table)
+ __stop___ex_table = .;
+ }
+
_etext = .; /* End of text and rodata section */
. = ALIGN(PAGE_SIZE);
@@ -99,14 +106,6 @@ SECTIONS
READ_MOSTLY_DATA(64)
/*
- * The exception fixup table (might need resorting at runtime)
- */
- . = ALIGN(32);
- __start___ex_table = .;
- *(__ex_table)
- __stop___ex_table = .;
-
- /*
* and the usual data section
*/
DATA_DATA
diff --git a/scripts/sortextable.c b/scripts/sortextable.c
index 1f10e89..f9ce116 100644
--- a/scripts/sortextable.c
+++ b/scripts/sortextable.c
@@ -31,6 +31,10 @@
#include <tools/be_byteshift.h>
#include <tools/le_byteshift.h>
+#ifndef EM_AARCH64
+#define EM_AARCH64 183
+#endif
+
static int fd_map; /* File descriptor for file being modified. */
static int mmap_failed; /* Boolean flag. */
static void *ehdr_curr; /* current ElfXX_Ehdr * for resource cleanup */
@@ -249,6 +253,7 @@ do_file(char const *const fname)
custom_sort = sort_relative_table;
break;
case EM_ARM:
+ case EM_AARCH64:
case EM_MIPS:
break;
} /* end switch */
--
1.8.2.2
On 05/17/2013 09:43 AM, Will Deacon wrote:
> As is done for other architectures, sort the exception table at
> build-time rather than during boot.
>
> Since sortextable appears to be a standalone C program relying on the
> host elf.h to provide EM_AARCH64, I've had to add a conditional check in
> order to allow cross-compilation on machines that aren't running a
> bleeding-edge libc-dev.
>
> Signed-off-by: Will Deacon <[email protected]>
> ---
> arch/arm64/Kconfig | 1 +
> arch/arm64/kernel/vmlinux.lds.S | 15 +++++++--------
> scripts/sortextable.c | 5 +++++
> 3 files changed, 13 insertions(+), 8 deletions(-)
>
> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
> index 48347dc..fb8fd90 100644
> --- a/arch/arm64/Kconfig
> +++ b/arch/arm64/Kconfig
> @@ -7,6 +7,7 @@ config ARM64
> select ARM_AMBA
> select ARM_ARCH_TIMER
> select ARM_GIC
> + select BUILDTIME_EXTABLE_SORT
> select CLONE_BACKWARDS
> select COMMON_CLK
> select GENERIC_CLOCKEVENTS
> diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
> index 3fae2be..3d527726 100644
> --- a/arch/arm64/kernel/vmlinux.lds.S
> +++ b/arch/arm64/kernel/vmlinux.lds.S
> @@ -57,6 +57,13 @@ SECTIONS
>
> RO_DATA(PAGE_SIZE)
>
> + . = ALIGN(8);
> + __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
> + __start___ex_table = .;
> + *(__ex_table)
> + __stop___ex_table = .;
> + }
> +
> _etext = .; /* End of text and rodata section */
You have moved this to rodata.
o Will the runtime sort still work like this?
o Do we care? Maybe not.
David Daney
>
> . = ALIGN(PAGE_SIZE);
> @@ -99,14 +106,6 @@ SECTIONS
> READ_MOSTLY_DATA(64)
>
> /*
> - * The exception fixup table (might need resorting at runtime)
> - */
> - . = ALIGN(32);
> - __start___ex_table = .;
> - *(__ex_table)
> - __stop___ex_table = .;
> -
> - /*
> * and the usual data section
> */
> DATA_DATA
> diff --git a/scripts/sortextable.c b/scripts/sortextable.c
> index 1f10e89..f9ce116 100644
> --- a/scripts/sortextable.c
> +++ b/scripts/sortextable.c
> @@ -31,6 +31,10 @@
> #include <tools/be_byteshift.h>
> #include <tools/le_byteshift.h>
>
> +#ifndef EM_AARCH64
> +#define EM_AARCH64 183
> +#endif
> +
> static int fd_map; /* File descriptor for file being modified. */
> static int mmap_failed; /* Boolean flag. */
> static void *ehdr_curr; /* current ElfXX_Ehdr * for resource cleanup */
> @@ -249,6 +253,7 @@ do_file(char const *const fname)
> custom_sort = sort_relative_table;
> break;
> case EM_ARM:
> + case EM_AARCH64:
> case EM_MIPS:
> break;
> } /* end switch */
>
On Fri, May 17, 2013 at 05:43:41PM +0100, Will Deacon wrote:
> As is done for other architectures, sort the exception table at
> build-time rather than during boot.
>
> Since sortextable appears to be a standalone C program relying on the
> host elf.h to provide EM_AARCH64, I've had to add a conditional check in
> order to allow cross-compilation on machines that aren't running a
> bleeding-edge libc-dev.
>
> Signed-off-by: Will Deacon <[email protected]>
> ---
> arch/arm64/Kconfig | 1 +
> arch/arm64/kernel/vmlinux.lds.S | 15 +++++++--------
> scripts/sortextable.c | 5 +++++
> 3 files changed, 13 insertions(+), 8 deletions(-)
>
> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
> index 48347dc..fb8fd90 100644
> --- a/arch/arm64/Kconfig
> +++ b/arch/arm64/Kconfig
> @@ -7,6 +7,7 @@ config ARM64
> select ARM_AMBA
> select ARM_ARCH_TIMER
> select ARM_GIC
> + select BUILDTIME_EXTABLE_SORT
> select CLONE_BACKWARDS
> select COMMON_CLK
> select GENERIC_CLOCKEVENTS
> diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
> index 3fae2be..3d527726 100644
> --- a/arch/arm64/kernel/vmlinux.lds.S
> +++ b/arch/arm64/kernel/vmlinux.lds.S
> @@ -57,6 +57,13 @@ SECTIONS
>
> RO_DATA(PAGE_SIZE)
>
> + . = ALIGN(8);
> + __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
> + __start___ex_table = .;
> + *(__ex_table)
> + __stop___ex_table = .;
> + }
Here you could use:
EXCEPTION_TABLE(align)
(From include/asm-generic/vmlinux.lds.h)
Sam
Hi David,
On Fri, May 17, 2013 at 07:33:53PM +0100, David Daney wrote:
> On 05/17/2013 09:43 AM, Will Deacon wrote:
> > As is done for other architectures, sort the exception table at
> > build-time rather than during boot.
> >
> > Since sortextable appears to be a standalone C program relying on the
> > host elf.h to provide EM_AARCH64, I've had to add a conditional check in
> > order to allow cross-compilation on machines that aren't running a
> > bleeding-edge libc-dev.
> >
> > Signed-off-by: Will Deacon <[email protected]>
> > ---
> > arch/arm64/Kconfig | 1 +
> > arch/arm64/kernel/vmlinux.lds.S | 15 +++++++--------
> > scripts/sortextable.c | 5 +++++
> > 3 files changed, 13 insertions(+), 8 deletions(-)
> >
> > diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
> > index 48347dc..fb8fd90 100644
> > --- a/arch/arm64/Kconfig
> > +++ b/arch/arm64/Kconfig
> > @@ -7,6 +7,7 @@ config ARM64
> > select ARM_AMBA
> > select ARM_ARCH_TIMER
> > select ARM_GIC
> > + select BUILDTIME_EXTABLE_SORT
> > select CLONE_BACKWARDS
> > select COMMON_CLK
> > select GENERIC_CLOCKEVENTS
> > diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
> > index 3fae2be..3d527726 100644
> > --- a/arch/arm64/kernel/vmlinux.lds.S
> > +++ b/arch/arm64/kernel/vmlinux.lds.S
> > @@ -57,6 +57,13 @@ SECTIONS
> >
> > RO_DATA(PAGE_SIZE)
> >
> > + . = ALIGN(8);
> > + __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
> > + __start___ex_table = .;
> > + *(__ex_table)
> > + __stop___ex_table = .;
> > + }
> > +
> > _etext = .; /* End of text and rodata section */
>
>
> You have moved this to rodata.
I don't think I have moved this to rodata. The RO_DATA macro expands to a
bunch of sections, but it doesn't leave any of them open at the end.
> o Will the runtime sort still work like this?
>
> o Do we care? Maybe not.
For arm64 we'd be moving exclusively to build-time sorting, so I guess we
*could* stick the thing in rodata if we wanted to. Not sure it's worth the
linker script munging though.
Will
On Fri, May 17, 2013 at 07:49:50PM +0100, Sam Ravnborg wrote:
> On Fri, May 17, 2013 at 05:43:41PM +0100, Will Deacon wrote:
> > + . = ALIGN(8);
> > + __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
> > + __start___ex_table = .;
> > + *(__ex_table)
> > + __stop___ex_table = .;
> > + }
>
> Here you could use:
> EXCEPTION_TABLE(align)
>
> (From include/asm-generic/vmlinux.lds.h)
Good spot, I'll spin a v2 using that. Looks like we don't use that for
arch/arm/ because we want to discard the section ifdef CONFIG_MMU. Maybe we
could change the generic macro to expand to nothing if !MMU, but I'm not
sure if other MMU-less architectures are playing tricks with this.
Will