This patch series cleans up the section names on the ia64
architecture. It requires the architecture-independent macro
definitions from this patch series:
<http://www.spinics.net/lists/mips/msg33499.html>
The long-term goal here is to add support for building the kernel with
-ffunction-sections -fdata-sections. This requires renaming all the
magic section names in the kernel of the form .text.foo, .data.foo,
.bss.foo, and .rodata.foo to not have collisions with sections
generated for code like:
static int nosave = 0; /* -fdata-sections places in .data.nosave */
static void head(); /* -ffunction-sections places in .text.head */
Note that these patches have not been boot-tested (aside from testing
the analogous changes on x86), since I don't have access to the
appropriate hardware.
-Tim Abbott
Tim Abbott (5):
ia64: Use .ref.text, not .text.init for start_ap.
ia64: use new macro for .data.cacheline_aligned section.
ia64: use new macros for .data.init_task
ia64: use new macro for .data.read_mostly section.
ia64: convert to new generic read_mostly support.
arch/ia64/Kconfig | 3 +++
arch/ia64/include/asm/cache.h | 2 --
arch/ia64/kernel/head.S | 2 +-
arch/ia64/kernel/init_task.c | 2 +-
arch/ia64/kernel/vmlinux.lds.S | 15 +++------------
arch/ia64/xen/xensetup.S | 3 ++-
6 files changed, 10 insertions(+), 17 deletions(-)
.data.cacheline_aligned should not need a separate output section;
this change moves it into the .data section.
Signed-off-by: Tim Abbott <[email protected]>
Cc: Tony Luck <[email protected]>
Cc: [email protected]
---
arch/ia64/kernel/vmlinux.lds.S | 4 +---
1 files changed, 1 insertions(+), 3 deletions(-)
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index fa3a558..cdcd655 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -237,9 +237,6 @@ SECTIONS
.data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET)
{ *(.data.read_mostly) }
- .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET)
- { *(.data.cacheline_aligned) }
-
/* Per-cpu data: */
. = ALIGN(PERCPU_PAGE_SIZE);
PERCPU_VADDR(PERCPU_ADDR, :percpu)
@@ -256,6 +253,7 @@ SECTIONS
__cpu0_per_cpu = .;
. = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
#endif
+ CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
DATA_DATA
*(.data1)
*(.gnu.linkonce.d*)
--
1.6.2.1
Signed-off-by: Tim Abbott <[email protected]>
Cc: Tony Luck <[email protected]>
Cc: [email protected]
---
arch/ia64/Kconfig | 3 +++
arch/ia64/include/asm/cache.h | 2 --
arch/ia64/xen/xensetup.S | 3 ++-
3 files changed, 5 insertions(+), 3 deletions(-)
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 294a3b1..1e2cd63 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -91,6 +91,9 @@ config GENERIC_TIME_VSYSCALL
config HAVE_SETUP_PER_CPU_AREA
def_bool y
+config HAVE_READ_MOSTLY_DATA
+ def_bool y
+
config DMI
bool
default y
diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
index e7482bd..8f72e1b 100644
--- a/arch/ia64/include/asm/cache.h
+++ b/arch/ia64/include/asm/cache.h
@@ -24,6 +24,4 @@
# define SMP_CACHE_BYTES (1 << 3)
#endif
-#define __read_mostly __attribute__((__section__(".data.read_mostly")))
-
#endif /* _ASM_IA64_CACHE_H */
diff --git a/arch/ia64/xen/xensetup.S b/arch/ia64/xen/xensetup.S
index 28fed1f..8781561 100644
--- a/arch/ia64/xen/xensetup.S
+++ b/arch/ia64/xen/xensetup.S
@@ -10,11 +10,12 @@
#include <asm/system.h>
#include <asm/paravirt.h>
#include <asm/xen/privop.h>
+#include <linux/cache.h>
#include <linux/elfnote.h>
#include <linux/init.h>
#include <xen/interface/elfnote.h>
- .section .data.read_mostly
+ __READ_MOSTLY
.align 8
.global xen_domain_type
xen_domain_type:
--
1.6.2.1
It seems that start_ap doesn't need to be in a special location in the
kernel, but it references some init code so it should be in .ref.text.
Since this is the only thing in the .text.head section, eliminate
.text.head from the linker script.
Signed-off-by: Tim Abbott <[email protected]>
Cc: Tony Luck <[email protected]>
Cc: [email protected]
---
arch/ia64/kernel/head.S | 2 +-
arch/ia64/kernel/vmlinux.lds.S | 2 --
2 files changed, 1 insertions(+), 3 deletions(-)
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index 23f846d..8239ff6 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -181,7 +181,7 @@ swapper_pg_dir:
halt_msg:
stringz "Halting kernel\n"
- .section .text.head,"ax"
+ __REF
.global start_ap
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 4a95e86..fa3a558 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -51,8 +51,6 @@ SECTIONS
KPROBES_TEXT
*(.gnu.linkonce.t*)
}
- .text.head : AT(ADDR(.text.head) - LOAD_OFFSET)
- { *(.text.head) }
.text2 : AT(ADDR(.text2) - LOAD_OFFSET)
{ *(.text2) }
#ifdef CONFIG_SMP
--
1.6.2.1
.data.init_task should not need a separate output section; this change
moves it into the .data section.
Signed-off-by: Tim Abbott <[email protected]>
Cc: Tony Luck <[email protected]>
Cc: [email protected]
---
arch/ia64/kernel/init_task.c | 2 +-
arch/ia64/kernel/vmlinux.lds.S | 5 +----
2 files changed, 2 insertions(+), 5 deletions(-)
diff --git a/arch/ia64/kernel/init_task.c b/arch/ia64/kernel/init_task.c
index 5b0e830..8b7bbed 100644
--- a/arch/ia64/kernel/init_task.c
+++ b/arch/ia64/kernel/init_task.c
@@ -37,7 +37,7 @@ union {
struct thread_info thread_info;
} s;
unsigned long stack[KERNEL_STACK_SIZE/sizeof (unsigned long)];
-} init_task_mem asm ("init_task") __attribute__((section(".data.init_task"))) = {{
+} init_task_mem asm ("init_task") __init_task_data = {{
.task = INIT_TASK(init_task_mem.s.task),
.thread_info = INIT_THREAD_INFO(init_task_mem.s.task)
}};
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index cdcd655..9c4cc29 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -214,10 +214,6 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__init_end = .;
- /* The initial task and kernel stack */
- .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET)
- { *(.data.init_task) }
-
.data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET)
{ *(__special_page_section)
__start_gate_section = .;
@@ -253,6 +249,7 @@ SECTIONS
__cpu0_per_cpu = .;
. = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
#endif
+ INIT_TASK_DATA(PAGE_SIZE)
CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
DATA_DATA
*(.data1)
--
1.6.2.1
As with .data.cacheline_aligned, I eliminated the separate output
section and set the alignment to SMP_CACHE_BYTES. An ack from Tony
would be appreciated.
Signed-off-by: Tim Abbott <[email protected]>
Cc: Tony Luck <[email protected]>
Cc: [email protected]
---
arch/ia64/kernel/vmlinux.lds.S | 4 +---
1 files changed, 1 insertions(+), 3 deletions(-)
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 9c4cc29..e99faad 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -230,9 +230,6 @@ SECTIONS
* kernel data
*/
- .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET)
- { *(.data.read_mostly) }
-
/* Per-cpu data: */
. = ALIGN(PERCPU_PAGE_SIZE);
PERCPU_VADDR(PERCPU_ADDR, :percpu)
@@ -251,6 +248,7 @@ SECTIONS
#endif
INIT_TASK_DATA(PAGE_SIZE)
CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
+ READ_MOSTLY_DATA(SMP_CACHE_BYTES)
DATA_DATA
*(.data1)
*(.gnu.linkonce.d*)
--
1.6.2.1