For some toolchains, they use another character as newline in a macro
(e.g. arc uses '`'), so for generic assembly code, need use ASM_NL (a
macro) instead of ';' for it.
Since "linux/export.h" are mainly used for exporting work, then our fix
more likely belongs to "linux/linkage.h", and we need add the related
checking in "linkage.h".
Also need notice 80 columns wrap, and '\t' for each line.
Signed-off-by: Chen Gang <[email protected]>
---
arch/arc/include/asm/linkage.h | 2 +
include/asm-generic/vmlinux.lds.h | 350 +++++++++++++++++++------------------
include/linux/linkage.h | 19 ++-
3 files changed, 190 insertions(+), 181 deletions(-)
diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h
index 0283e9e..66ee552 100644
--- a/arch/arc/include/asm/linkage.h
+++ b/arch/arc/include/asm/linkage.h
@@ -11,6 +11,8 @@
#ifdef __ASSEMBLY__
+#define ASM_NL ` /* use '`' to mark new line in macro */
+
/* Can't use the ENTRY macro in linux/linkage.h
* gas considers ';' as comment vs. newline
*/
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index bc2121f..0ca99a9 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -10,28 +10,28 @@
* ENTRY(...)
* SECTIONS
* {
- * . = START;
- * __init_begin = .;
+ * . = START
+ * __init_begin = .
* HEAD_TEXT_SECTION
* INIT_TEXT_SECTION(PAGE_SIZE)
* INIT_DATA_SECTION(...)
* PERCPU_SECTION(CACHELINE_SIZE)
- * __init_end = .;
+ * __init_end = .
*
- * _stext = .;
+ * _stext = .
* TEXT_SECTION = 0
- * _etext = .;
+ * _etext = .
*
- * _sdata = .;
+ * _sdata = .
* RO_DATA_SECTION(PAGE_SIZE)
* RW_DATA_SECTION(...)
- * _edata = .;
+ * _edata = .
*
* EXCEPTION_TABLE(...)
* NOTES
*
* BSS_SECTION(0, 0, 0)
- * _end = .;
+ * _end = .
*
* STABS_DEBUG
* DWARF_DEBUG
@@ -52,7 +52,7 @@
#define LOAD_OFFSET 0
#endif
-#include <linux/export.h>
+#include <linux/linkage.h>
/* Align . to a 8 byte boundary equals to maximum function alignment. */
#define ALIGN_FUNCTION() . = ALIGN(8)
@@ -85,63 +85,65 @@
#endif
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
-#define MCOUNT_REC() . = ALIGN(8); \
- VMLINUX_SYMBOL(__start_mcount_loc) = .; \
- *(__mcount_loc) \
- VMLINUX_SYMBOL(__stop_mcount_loc) = .;
+#define MCOUNT_REC() . = ALIGN(8) ASM_NL \
+ VMLINUX_SYMBOL(__start_mcount_loc) = . ASM_NL \
+ *(__mcount_loc) \
+ VMLINUX_SYMBOL(__stop_mcount_loc) = . ASM_NL
#else
#define MCOUNT_REC()
#endif
#ifdef CONFIG_TRACE_BRANCH_PROFILING
-#define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
- *(_ftrace_annotated_branch) \
- VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
+#define LIKELY_PROFILE() \
+ VMLINUX_SYMBOL(__start_annotated_branch_profile) = . ASM_NL \
+ *(_ftrace_annotated_branch) \
+ VMLINUX_SYMBOL(__stop_annotated_branch_profile) = . ASM_NL
#else
#define LIKELY_PROFILE()
#endif
#ifdef CONFIG_PROFILE_ALL_BRANCHES
-#define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \
- *(_ftrace_branch) \
- VMLINUX_SYMBOL(__stop_branch_profile) = .;
+#define BRANCH_PROFILE() \
+ VMLINUX_SYMBOL(__start_branch_profile) = . ASM_NL \
+ *(_ftrace_branch) \
+ VMLINUX_SYMBOL(__stop_branch_profile) = . ASM_NL
#else
#define BRANCH_PROFILE()
#endif
#ifdef CONFIG_EVENT_TRACING
-#define FTRACE_EVENTS() . = ALIGN(8); \
- VMLINUX_SYMBOL(__start_ftrace_events) = .; \
- *(_ftrace_events) \
- VMLINUX_SYMBOL(__stop_ftrace_events) = .;
+#define FTRACE_EVENTS() . = ALIGN(8) ASM_NL \
+ VMLINUX_SYMBOL(__start_ftrace_events) = . ASM_NL \
+ *(_ftrace_events) \
+ VMLINUX_SYMBOL(__stop_ftrace_events) = . ASM_NL
#else
#define FTRACE_EVENTS()
#endif
#ifdef CONFIG_TRACING
-#define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
+#define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = . ASM_NL \
*(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
- VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
-#define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .; \
- *(__tracepoint_str) /* Trace_printk fmt' pointer */ \
- VMLINUX_SYMBOL(__stop___tracepoint_str) = .;
+ VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = . ASM_NL
+#define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = . ASM_NL \
+ *(__tracepoint_str) /* Trace_printk fmt' pointer */ \
+ VMLINUX_SYMBOL(__stop___tracepoint_str) = . ASM_NL
#else
#define TRACE_PRINTKS()
#define TRACEPOINT_STR()
#endif
#ifdef CONFIG_FTRACE_SYSCALLS
-#define TRACE_SYSCALLS() . = ALIGN(8); \
- VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
- *(__syscalls_metadata) \
- VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
+#define TRACE_SYSCALLS() . = ALIGN(8) ASM_NL \
+ VMLINUX_SYMBOL(__start_syscalls_metadata) = . ASM_NL \
+ *(__syscalls_metadata) \
+ VMLINUX_SYMBOL(__stop_syscalls_metadata) = . ASM_NL
#else
#define TRACE_SYSCALLS()
#endif
#ifdef CONFIG_CLKSRC_OF
-#define CLKSRC_OF_TABLES() . = ALIGN(8); \
- VMLINUX_SYMBOL(__clksrc_of_table) = .; \
+#define CLKSRC_OF_TABLES() . = ALIGN(8) ASM_NL \
+ VMLINUX_SYMBOL(__clksrc_of_table) = . ASM_NL \
*(__clksrc_of_table) \
*(__clksrc_of_table_end)
#else
@@ -150,8 +152,8 @@
#ifdef CONFIG_IRQCHIP
#define IRQCHIP_OF_MATCH_TABLE() \
- . = ALIGN(8); \
- VMLINUX_SYMBOL(__irqchip_begin) = .; \
+ . = ALIGN(8) ASM_NL \
+ VMLINUX_SYMBOL(__irqchip_begin) = . ASM_NL \
*(__irqchip_of_table) \
*(__irqchip_of_end)
#else
@@ -159,19 +161,19 @@
#endif
#ifdef CONFIG_COMMON_CLK
-#define CLK_OF_TABLES() . = ALIGN(8); \
- VMLINUX_SYMBOL(__clk_of_table) = .; \
- *(__clk_of_table) \
+#define CLK_OF_TABLES() . = ALIGN(8) ASM_NL \
+ VMLINUX_SYMBOL(__clk_of_table) = . ASM_NL \
+ *(__clk_of_table) \
*(__clk_of_table_end)
#else
#define CLK_OF_TABLES()
#endif
#define KERNEL_DTB() \
- STRUCT_ALIGN(); \
- VMLINUX_SYMBOL(__dtb_start) = .; \
+ STRUCT_ALIGN() ASM_NL \
+ VMLINUX_SYMBOL(__dtb_start) = . ASM_NL \
*(.dtb.init.rodata) \
- VMLINUX_SYMBOL(__dtb_end) = .;
+ VMLINUX_SYMBOL(__dtb_end) = . ASM_NL
/* .data section */
#define DATA_DATA \
@@ -181,17 +183,17 @@
MEM_KEEP(init.data) \
MEM_KEEP(exit.data) \
*(.data.unlikely) \
- STRUCT_ALIGN(); \
+ STRUCT_ALIGN() ASM_NL \
*(__tracepoints) \
/* implement dynamic printk debug */ \
- . = ALIGN(8); \
- VMLINUX_SYMBOL(__start___jump_table) = .; \
+ . = ALIGN(8) ASM_NL \
+ VMLINUX_SYMBOL(__start___jump_table) = . ASM_NL \
*(__jump_table) \
- VMLINUX_SYMBOL(__stop___jump_table) = .; \
- . = ALIGN(8); \
- VMLINUX_SYMBOL(__start___verbose) = .; \
+ VMLINUX_SYMBOL(__stop___jump_table) = . ASM_NL \
+ . = ALIGN(8) ASM_NL \
+ VMLINUX_SYMBOL(__start___verbose) = . ASM_NL \
*(__verbose) \
- VMLINUX_SYMBOL(__stop___verbose) = .; \
+ VMLINUX_SYMBOL(__stop___verbose) = . ASM_NL \
LIKELY_PROFILE() \
BRANCH_PROFILE() \
TRACE_PRINTKS() \
@@ -201,42 +203,42 @@
* Data section helpers
*/
#define NOSAVE_DATA \
- . = ALIGN(PAGE_SIZE); \
- VMLINUX_SYMBOL(__nosave_begin) = .; \
+ . = ALIGN(PAGE_SIZE) ASM_NL \
+ VMLINUX_SYMBOL(__nosave_begin) = . ASM_NL \
*(.data..nosave) \
- . = ALIGN(PAGE_SIZE); \
- VMLINUX_SYMBOL(__nosave_end) = .;
+ . = ALIGN(PAGE_SIZE) ASM_NL \
+ VMLINUX_SYMBOL(__nosave_end) = . ASM_NL
#define PAGE_ALIGNED_DATA(page_align) \
- . = ALIGN(page_align); \
+ . = ALIGN(page_align) ASM_NL \
*(.data..page_aligned)
#define READ_MOSTLY_DATA(align) \
- . = ALIGN(align); \
+ . = ALIGN(align) ASM_NL \
*(.data..read_mostly) \
- . = ALIGN(align);
+ . = ALIGN(align) ASM_NL
#define CACHELINE_ALIGNED_DATA(align) \
- . = ALIGN(align); \
+ . = ALIGN(align) ASM_NL \
*(.data..cacheline_aligned)
#define INIT_TASK_DATA(align) \
- . = ALIGN(align); \
+ . = ALIGN(align) ASM_NL \
*(.data..init_task)
/*
* Read only Data
*/
#define RO_DATA_SECTION(align) \
- . = ALIGN((align)); \
+ . = ALIGN((align)) ASM_NL \
.rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start_rodata) = .; \
+ VMLINUX_SYMBOL(__start_rodata) = . ASM_NL \
*(.rodata) *(.rodata.*) \
*(__vermagic) /* Kernel version magic */ \
- . = ALIGN(8); \
- VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
+ . = ALIGN(8) ASM_NL \
+ VMLINUX_SYMBOL(__start___tracepoints_ptrs) = . ASM_NL \
*(__tracepoints_ptrs) /* Tracepoints: pointer array */\
- VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .; \
+ VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = . ASM_NL \
*(__tracepoints_strings)/* Tracepoints: strings */ \
} \
\
@@ -248,106 +250,106 @@
\
/* PCI quirks */ \
.pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
+ VMLINUX_SYMBOL(__start_pci_fixups_early) = . ASM_NL \
*(.pci_fixup_early) \
- VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
- VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
+ VMLINUX_SYMBOL(__end_pci_fixups_early) = . ASM_NL \
+ VMLINUX_SYMBOL(__start_pci_fixups_header) = . ASM_NL \
*(.pci_fixup_header) \
- VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
- VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
+ VMLINUX_SYMBOL(__end_pci_fixups_header) = . ASM_NL \
+ VMLINUX_SYMBOL(__start_pci_fixups_final) = . ASM_NL \
*(.pci_fixup_final) \
- VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
- VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
+ VMLINUX_SYMBOL(__end_pci_fixups_final) = . ASM_NL \
+ VMLINUX_SYMBOL(__start_pci_fixups_enable) = . ASM_NL \
*(.pci_fixup_enable) \
- VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
- VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
+ VMLINUX_SYMBOL(__end_pci_fixups_enable) = . ASM_NL \
+ VMLINUX_SYMBOL(__start_pci_fixups_resume) = . ASM_NL \
*(.pci_fixup_resume) \
- VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
- VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
+ VMLINUX_SYMBOL(__end_pci_fixups_resume) = . ASM_NL \
+ VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = . ASM_NL \
*(.pci_fixup_resume_early) \
- VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
- VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
+ VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = . ASM_NL \
+ VMLINUX_SYMBOL(__start_pci_fixups_suspend) = . ASM_NL \
*(.pci_fixup_suspend) \
- VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
+ VMLINUX_SYMBOL(__end_pci_fixups_suspend) = . ASM_NL \
} \
\
/* Built-in firmware blobs */ \
.builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start_builtin_fw) = .; \
+ VMLINUX_SYMBOL(__start_builtin_fw) = . ASM_NL \
*(.builtin_fw) \
- VMLINUX_SYMBOL(__end_builtin_fw) = .; \
+ VMLINUX_SYMBOL(__end_builtin_fw) = . ASM_NL \
} \
\
TRACEDATA \
\
/* Kernel symbol table: Normal symbols */ \
__ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___ksymtab) = .; \
+ VMLINUX_SYMBOL(__start___ksymtab) = . ASM_NL \
*(SORT(___ksymtab+*)) \
- VMLINUX_SYMBOL(__stop___ksymtab) = .; \
+ VMLINUX_SYMBOL(__stop___ksymtab) = . ASM_NL \
} \
\
/* Kernel symbol table: GPL-only symbols */ \
__ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
+ VMLINUX_SYMBOL(__start___ksymtab_gpl) = . ASM_NL \
*(SORT(___ksymtab_gpl+*)) \
- VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
+ VMLINUX_SYMBOL(__stop___ksymtab_gpl) = . ASM_NL \
} \
\
/* Kernel symbol table: Normal unused symbols */ \
__ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
+ VMLINUX_SYMBOL(__start___ksymtab_unused) = . ASM_NL \
*(SORT(___ksymtab_unused+*)) \
- VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
+ VMLINUX_SYMBOL(__stop___ksymtab_unused) = . ASM_NL \
} \
\
/* Kernel symbol table: GPL-only unused symbols */ \
__ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
+ VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = . ASM_NL \
*(SORT(___ksymtab_unused_gpl+*)) \
- VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
+ VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = . ASM_NL \
} \
\
/* Kernel symbol table: GPL-future-only symbols */ \
__ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
+ VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = . ASM_NL \
*(SORT(___ksymtab_gpl_future+*)) \
- VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
+ VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = . ASM_NL \
} \
\
/* Kernel symbol table: Normal symbols */ \
__kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___kcrctab) = .; \
+ VMLINUX_SYMBOL(__start___kcrctab) = . ASM_NL \
*(SORT(___kcrctab+*)) \
- VMLINUX_SYMBOL(__stop___kcrctab) = .; \
+ VMLINUX_SYMBOL(__stop___kcrctab) = . ASM_NL \
} \
\
/* Kernel symbol table: GPL-only symbols */ \
__kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
+ VMLINUX_SYMBOL(__start___kcrctab_gpl) = . ASM_NL \
*(SORT(___kcrctab_gpl+*)) \
- VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
+ VMLINUX_SYMBOL(__stop___kcrctab_gpl) = . ASM_NL \
} \
\
/* Kernel symbol table: Normal unused symbols */ \
__kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \
+ VMLINUX_SYMBOL(__start___kcrctab_unused) = . ASM_NL \
*(SORT(___kcrctab_unused+*)) \
- VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \
+ VMLINUX_SYMBOL(__stop___kcrctab_unused) = . ASM_NL \
} \
\
/* Kernel symbol table: GPL-only unused symbols */ \
__kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
+ VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = . ASM_NL \
*(SORT(___kcrctab_unused_gpl+*)) \
- VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
+ VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = . ASM_NL \
} \
\
/* Kernel symbol table: GPL-future-only symbols */ \
__kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
+ VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = . ASM_NL \
*(SORT(___kcrctab_gpl_future+*)) \
- VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
+ VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = . ASM_NL \
} \
\
/* Kernel symbol table: strings */ \
@@ -364,20 +366,20 @@
\
/* Built-in module parameters. */ \
__param : AT(ADDR(__param) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___param) = .; \
+ VMLINUX_SYMBOL(__start___param) = . ASM_NL \
*(__param) \
- VMLINUX_SYMBOL(__stop___param) = .; \
+ VMLINUX_SYMBOL(__stop___param) = . ASM_NL \
} \
\
/* Built-in module versions. */ \
__modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___modver) = .; \
+ VMLINUX_SYMBOL(__start___modver) = . ASM_NL \
*(__modver) \
- VMLINUX_SYMBOL(__stop___modver) = .; \
- . = ALIGN((align)); \
- VMLINUX_SYMBOL(__end_rodata) = .; \
+ VMLINUX_SYMBOL(__stop___modver) = . ASM_NL \
+ . = ALIGN((align)) ASM_NL \
+ VMLINUX_SYMBOL(__end_rodata) = . ASM_NL \
} \
- . = ALIGN((align));
+ . = ALIGN((align)) ASM_NL
/* RODATA & RO_DATA provided for backward compatibility.
* All archs are supposed to use RO_DATA() */
@@ -386,15 +388,15 @@
#define SECURITY_INIT \
.security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__security_initcall_start) = .; \
+ VMLINUX_SYMBOL(__security_initcall_start) = . ASM_NL \
*(.security_initcall.init) \
- VMLINUX_SYMBOL(__security_initcall_end) = .; \
+ VMLINUX_SYMBOL(__security_initcall_end) = . ASM_NL \
}
/* .text section. Map to function alignment to avoid address changes
* during second ld run in second ld pass when generating System.map */
#define TEXT_TEXT \
- ALIGN_FUNCTION(); \
+ ALIGN_FUNCTION() ASM_NL \
*(.text.hot) \
*(.text) \
*(.ref.text) \
@@ -406,37 +408,37 @@
/* sched.text is aling to function alignment to secure we have same
* address even at second ld pass when generating System.map */
#define SCHED_TEXT \
- ALIGN_FUNCTION(); \
- VMLINUX_SYMBOL(__sched_text_start) = .; \
+ ALIGN_FUNCTION() ASM_NL \
+ VMLINUX_SYMBOL(__sched_text_start) = . ASM_NL \
*(.sched.text) \
- VMLINUX_SYMBOL(__sched_text_end) = .;
+ VMLINUX_SYMBOL(__sched_text_end) = . ASM_NL
/* spinlock.text is aling to function alignment to secure we have same
* address even at second ld pass when generating System.map */
#define LOCK_TEXT \
- ALIGN_FUNCTION(); \
- VMLINUX_SYMBOL(__lock_text_start) = .; \
+ ALIGN_FUNCTION() ASM_NL \
+ VMLINUX_SYMBOL(__lock_text_start) = . ASM_NL \
*(.spinlock.text) \
- VMLINUX_SYMBOL(__lock_text_end) = .;
+ VMLINUX_SYMBOL(__lock_text_end) = . ASM_NL
#define KPROBES_TEXT \
- ALIGN_FUNCTION(); \
- VMLINUX_SYMBOL(__kprobes_text_start) = .; \
+ ALIGN_FUNCTION() ASM_NL \
+ VMLINUX_SYMBOL(__kprobes_text_start) = . ASM_NL \
*(.kprobes.text) \
- VMLINUX_SYMBOL(__kprobes_text_end) = .;
+ VMLINUX_SYMBOL(__kprobes_text_end) = . ASM_NL
#define ENTRY_TEXT \
- ALIGN_FUNCTION(); \
- VMLINUX_SYMBOL(__entry_text_start) = .; \
+ ALIGN_FUNCTION() ASM_NL \
+ VMLINUX_SYMBOL(__entry_text_start) = . ASM_NL \
*(.entry.text) \
- VMLINUX_SYMBOL(__entry_text_end) = .;
+ VMLINUX_SYMBOL(__entry_text_end) = . ASM_NL
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#define IRQENTRY_TEXT \
- ALIGN_FUNCTION(); \
- VMLINUX_SYMBOL(__irqentry_text_start) = .; \
+ ALIGN_FUNCTION() ASM_NL \
+ VMLINUX_SYMBOL(__irqentry_text_start) = . ASM_NL \
*(.irqentry.text) \
- VMLINUX_SYMBOL(__irqentry_text_end) = .;
+ VMLINUX_SYMBOL(__irqentry_text_end) = . ASM_NL
#else
#define IRQENTRY_TEXT
#endif
@@ -444,7 +446,7 @@
/* Section used for early init (in .S files) */
#define HEAD_TEXT *(.head.text)
-#define HEAD_TEXT_SECTION \
+#define HEAD_TEXT_SECTION \
.head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \
HEAD_TEXT \
}
@@ -453,28 +455,28 @@
* Exception table
*/
#define EXCEPTION_TABLE(align) \
- . = ALIGN(align); \
+ . = ALIGN(align) ASM_NL \
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___ex_table) = .; \
+ VMLINUX_SYMBOL(__start___ex_table) = . ASM_NL \
*(__ex_table) \
- VMLINUX_SYMBOL(__stop___ex_table) = .; \
+ VMLINUX_SYMBOL(__stop___ex_table) = . ASM_NL \
}
/*
* Init task
*/
#define INIT_TASK_DATA_SECTION(align) \
- . = ALIGN(align); \
+ . = ALIGN(align) ASM_NL \
.data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \
INIT_TASK_DATA(align) \
}
#ifdef CONFIG_CONSTRUCTORS
-#define KERNEL_CTORS() . = ALIGN(8); \
- VMLINUX_SYMBOL(__ctors_start) = .; \
- *(.ctors) \
- *(.init_array) \
- VMLINUX_SYMBOL(__ctors_end) = .;
+#define KERNEL_CTORS() . = ALIGN(8) ASM_NL \
+ VMLINUX_SYMBOL(__ctors_start) = . ASM_NL \
+ *(.ctors) \
+ *(.init_array) \
+ VMLINUX_SYMBOL(__ctors_end) = . ASM_NL
#else
#define KERNEL_CTORS()
#endif
@@ -515,7 +517,7 @@
* zeroed during startup
*/
#define SBSS(sbss_align) \
- . = ALIGN(sbss_align); \
+ . = ALIGN(sbss_align) ASM_NL \
.sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
*(.sbss) \
*(.scommon) \
@@ -530,7 +532,7 @@
#endif
#define BSS(bss_align) \
- . = ALIGN(bss_align); \
+ . = ALIGN(bss_align) ASM_NL \
.bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
BSS_FIRST_SECTIONS \
*(.bss..page_aligned) \
@@ -581,11 +583,11 @@
#ifdef CONFIG_GENERIC_BUG
#define BUG_TABLE \
- . = ALIGN(8); \
+ . = ALIGN(8) ASM_NL \
__bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___bug_table) = .; \
+ VMLINUX_SYMBOL(__start___bug_table) = . ASM_NL \
*(__bug_table) \
- VMLINUX_SYMBOL(__stop___bug_table) = .; \
+ VMLINUX_SYMBOL(__stop___bug_table) = . ASM_NL \
}
#else
#define BUG_TABLE
@@ -593,11 +595,11 @@
#ifdef CONFIG_PM_TRACE
#define TRACEDATA \
- . = ALIGN(4); \
+ . = ALIGN(4) ASM_NL \
.tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__tracedata_start) = .; \
+ VMLINUX_SYMBOL(__tracedata_start) = . ASM_NL \
*(.tracedata) \
- VMLINUX_SYMBOL(__tracedata_end) = .; \
+ VMLINUX_SYMBOL(__tracedata_end) = . ASM_NL \
}
#else
#define TRACEDATA
@@ -605,24 +607,24 @@
#define NOTES \
.notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start_notes) = .; \
+ VMLINUX_SYMBOL(__start_notes) = . ASM_NL \
*(.note.*) \
- VMLINUX_SYMBOL(__stop_notes) = .; \
+ VMLINUX_SYMBOL(__stop_notes) = . ASM_NL \
}
#define INIT_SETUP(initsetup_align) \
- . = ALIGN(initsetup_align); \
- VMLINUX_SYMBOL(__setup_start) = .; \
+ . = ALIGN(initsetup_align) ASM_NL \
+ VMLINUX_SYMBOL(__setup_start) = . ASM_NL \
*(.init.setup) \
- VMLINUX_SYMBOL(__setup_end) = .;
+ VMLINUX_SYMBOL(__setup_end) = . ASM_NL
#define INIT_CALLS_LEVEL(level) \
- VMLINUX_SYMBOL(__initcall##level##_start) = .; \
+ VMLINUX_SYMBOL(__initcall##level##_start) = . ASM_NL \
*(.initcall##level##.init) \
*(.initcall##level##s.init) \
#define INIT_CALLS \
- VMLINUX_SYMBOL(__initcall_start) = .; \
+ VMLINUX_SYMBOL(__initcall_start) = . ASM_NL \
*(.initcallearly.init) \
INIT_CALLS_LEVEL(0) \
INIT_CALLS_LEVEL(1) \
@@ -633,24 +635,24 @@
INIT_CALLS_LEVEL(rootfs) \
INIT_CALLS_LEVEL(6) \
INIT_CALLS_LEVEL(7) \
- VMLINUX_SYMBOL(__initcall_end) = .;
+ VMLINUX_SYMBOL(__initcall_end) = . ASM_NL
#define CON_INITCALL \
- VMLINUX_SYMBOL(__con_initcall_start) = .; \
+ VMLINUX_SYMBOL(__con_initcall_start) = . ASM_NL \
*(.con_initcall.init) \
- VMLINUX_SYMBOL(__con_initcall_end) = .;
+ VMLINUX_SYMBOL(__con_initcall_end) = . ASM_NL
#define SECURITY_INITCALL \
- VMLINUX_SYMBOL(__security_initcall_start) = .; \
+ VMLINUX_SYMBOL(__security_initcall_start) = . ASM_NL \
*(.security_initcall.init) \
- VMLINUX_SYMBOL(__security_initcall_end) = .;
+ VMLINUX_SYMBOL(__security_initcall_end) = . ASM_NL
#ifdef CONFIG_BLK_DEV_INITRD
#define INIT_RAM_FS \
- . = ALIGN(4); \
- VMLINUX_SYMBOL(__initramfs_start) = .; \
+ . = ALIGN(4) ASM_NL \
+ VMLINUX_SYMBOL(__initramfs_start) = . ASM_NL \
*(.init.ramfs) \
- . = ALIGN(8); \
+ . = ALIGN(8) ASM_NL \
*(.init.ramfs.info)
#else
#define INIT_RAM_FS
@@ -685,16 +687,16 @@
* sharing between subsections for different purposes.
*/
#define PERCPU_INPUT(cacheline) \
- VMLINUX_SYMBOL(__per_cpu_start) = .; \
+ VMLINUX_SYMBOL(__per_cpu_start) = . ASM_NL \
*(.data..percpu..first) \
- . = ALIGN(PAGE_SIZE); \
+ . = ALIGN(PAGE_SIZE) ASM_NL \
*(.data..percpu..page_aligned) \
- . = ALIGN(cacheline); \
+ . = ALIGN(cacheline) ASM_NL \
*(.data..percpu..readmostly) \
- . = ALIGN(cacheline); \
+ . = ALIGN(cacheline) ASM_NL \
*(.data..percpu) \
*(.data..percpu..shared_aligned) \
- VMLINUX_SYMBOL(__per_cpu_end) = .;
+ VMLINUX_SYMBOL(__per_cpu_end) = . ASM_NL
/**
* PERCPU_VADDR - define output section for percpu area
@@ -721,12 +723,12 @@
* address, use PERCPU_SECTION.
*/
#define PERCPU_VADDR(cacheline, vaddr, phdr) \
- VMLINUX_SYMBOL(__per_cpu_load) = .; \
+ VMLINUX_SYMBOL(__per_cpu_load) = . ASM_NL \
.data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
- LOAD_OFFSET) { \
PERCPU_INPUT(cacheline) \
} phdr \
- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
+ . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu) ASM_NL
/**
* PERCPU_SECTION - define output section for percpu area, simple version
@@ -741,9 +743,9 @@
* .data..percpu which is required for relocatable x86_32 configuration.
*/
#define PERCPU_SECTION(cacheline) \
- . = ALIGN(PAGE_SIZE); \
+ . = ALIGN(PAGE_SIZE) ASM_NL \
.data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__per_cpu_load) = .; \
+ VMLINUX_SYMBOL(__per_cpu_load) = . ASM_NL \
PERCPU_INPUT(cacheline) \
}
@@ -767,7 +769,7 @@
*
* use 0 as page_align if page_aligned data is not used */
#define RW_DATA_SECTION(cacheline, pagealigned, inittask) \
- . = ALIGN(PAGE_SIZE); \
+ . = ALIGN(PAGE_SIZE) ASM_NL \
.data : AT(ADDR(.data) - LOAD_OFFSET) { \
INIT_TASK_DATA(inittask) \
NOSAVE_DATA \
@@ -779,11 +781,11 @@
}
#define INIT_TEXT_SECTION(inittext_align) \
- . = ALIGN(inittext_align); \
+ . = ALIGN(inittext_align) ASM_NL \
.init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(_sinittext) = .; \
+ VMLINUX_SYMBOL(_sinittext) = . ASM_NL \
INIT_TEXT \
- VMLINUX_SYMBOL(_einittext) = .; \
+ VMLINUX_SYMBOL(_einittext) = . ASM_NL \
}
#define INIT_DATA_SECTION(initsetup_align) \
@@ -797,9 +799,9 @@
}
#define BSS_SECTION(sbss_align, bss_align, stop_align) \
- . = ALIGN(sbss_align); \
- VMLINUX_SYMBOL(__bss_start) = .; \
+ . = ALIGN(sbss_align) ASM_NL \
+ VMLINUX_SYMBOL(__bss_start) = . ASM_NL \
SBSS(sbss_align) \
BSS(bss_align) \
- . = ALIGN(stop_align); \
- VMLINUX_SYMBOL(__bss_stop) = .;
+ . = ALIGN(stop_align) ASM_NL \
+ VMLINUX_SYMBOL(__bss_stop) = . ASM_NL
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index d3e8ad2..a6a42dd 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -6,6 +6,11 @@
#include <linux/export.h>
#include <asm/linkage.h>
+/* Some toolchains use other characters (e.g. '`') to mark new line in macro */
+#ifndef ASM_NL
+#define ASM_NL ;
+#endif
+
#ifdef __cplusplus
#define CPP_ASMLINKAGE extern "C"
#else
@@ -75,21 +80,21 @@
#ifndef ENTRY
#define ENTRY(name) \
- .globl name; \
- ALIGN; \
- name:
+ .globl name ASM_NL \
+ ALIGN ASM_NL \
+ name:
#endif
#endif /* LINKER_SCRIPT */
#ifndef WEAK
#define WEAK(name) \
- .weak name; \
+ .weak name ASM_NL \
name:
#endif
#ifndef END
#define END(name) \
- .size name, .-name
+ .size name, .-name
#endif
/* If symbol 'name' is treated as a subroutine (gets called, and returns)
@@ -98,8 +103,8 @@
*/
#ifndef ENDPROC
#define ENDPROC(name) \
- .type name, @function; \
- END(name)
+ .type name, @function ASM_NL \
+ END(name)
#endif
#endif
--
1.7.7.6
Hi Chen,
On Mon, Oct 28, 2013 at 2:30 PM, Chen Gang <[email protected]> wrote:
> For some toolchains, they use another character as newline in a macro
> (e.g. arc uses '`'), so for generic assembly code, need use ASM_NL (a
> macro) instead of ';' for it.
> Since "linux/export.h" are mainly used for exporting work, then our fix
> more likely belongs to "linux/linkage.h", and we need add the related
> checking in "linkage.h".
>
> Also need notice 80 columns wrap, and '\t' for each line.
>
>
> Signed-off-by: Chen Gang <[email protected]>
> ---
> arch/arc/include/asm/linkage.h | 2 +
> include/asm-generic/vmlinux.lds.h | 350 +++++++++++++++++++------------------
Actually vmlinux.lds.h is not an assembly code, but rather a linker script,
later preprocessed by cpp.
> include/linux/linkage.h | 19 ++-
> 3 files changed, 190 insertions(+), 181 deletions(-)
>
> diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h
> index 0283e9e..66ee552 100644
> --- a/arch/arc/include/asm/linkage.h
> +++ b/arch/arc/include/asm/linkage.h
> @@ -11,6 +11,8 @@
>
> #ifdef __ASSEMBLY__
>
> +#define ASM_NL ` /* use '`' to mark new line in macro */
> +
> /* Can't use the ENTRY macro in linux/linkage.h
> * gas considers ';' as comment vs. newline
> */
> diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
> index bc2121f..0ca99a9 100644
> --- a/include/asm-generic/vmlinux.lds.h
> +++ b/include/asm-generic/vmlinux.lds.h
> @@ -10,28 +10,28 @@
> * ENTRY(...)
> * SECTIONS
> * {
> - * . = START;
> - * __init_begin = .;
> + * . = START
> + * __init_begin = .
This doesn't look correct: these are simple assignments (inside a
comment) inside
a linker script, and simple assignment definition in 'info ld' says
that semicolon after
expression is required. Same for all the following removals of ';' and
replacements
of ';' with ASM_NL in this file.
> * HEAD_TEXT_SECTION
> * INIT_TEXT_SECTION(PAGE_SIZE)
> * INIT_DATA_SECTION(...)
> * PERCPU_SECTION(CACHELINE_SIZE)
> - * __init_end = .;
> + * __init_end = .
> *
> - * _stext = .;
> + * _stext = .
> * TEXT_SECTION = 0
> - * _etext = .;
> + * _etext = .
> *
> - * _sdata = .;
> + * _sdata = .
> * RO_DATA_SECTION(PAGE_SIZE)
> * RW_DATA_SECTION(...)
> - * _edata = .;
> + * _edata = .
> *
> * EXCEPTION_TABLE(...)
> * NOTES
> *
> * BSS_SECTION(0, 0, 0)
> - * _end = .;
> + * _end = .
> *
> * STABS_DEBUG
> * DWARF_DEBUG
> @@ -52,7 +52,7 @@
> #define LOAD_OFFSET 0
> #endif
>
> -#include <linux/export.h>
> +#include <linux/linkage.h>
>
> /* Align . to a 8 byte boundary equals to maximum function alignment. */
> #define ALIGN_FUNCTION() . = ALIGN(8)
> @@ -85,63 +85,65 @@
> #endif
>
> #ifdef CONFIG_FTRACE_MCOUNT_RECORD
> -#define MCOUNT_REC() . = ALIGN(8); \
> - VMLINUX_SYMBOL(__start_mcount_loc) = .; \
> - *(__mcount_loc) \
> - VMLINUX_SYMBOL(__stop_mcount_loc) = .;
> +#define MCOUNT_REC() . = ALIGN(8) ASM_NL \
> + VMLINUX_SYMBOL(__start_mcount_loc) = . ASM_NL \
> + *(__mcount_loc) \
> + VMLINUX_SYMBOL(__stop_mcount_loc) = . ASM_NL
> #else
> #define MCOUNT_REC()
> #endif
>
> #ifdef CONFIG_TRACE_BRANCH_PROFILING
> -#define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
> - *(_ftrace_annotated_branch) \
> - VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
> +#define LIKELY_PROFILE() \
> + VMLINUX_SYMBOL(__start_annotated_branch_profile) = . ASM_NL \
> + *(_ftrace_annotated_branch) \
> + VMLINUX_SYMBOL(__stop_annotated_branch_profile) = . ASM_NL
> #else
> #define LIKELY_PROFILE()
> #endif
>
> #ifdef CONFIG_PROFILE_ALL_BRANCHES
> -#define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \
> - *(_ftrace_branch) \
> - VMLINUX_SYMBOL(__stop_branch_profile) = .;
> +#define BRANCH_PROFILE() \
> + VMLINUX_SYMBOL(__start_branch_profile) = . ASM_NL \
> + *(_ftrace_branch) \
> + VMLINUX_SYMBOL(__stop_branch_profile) = . ASM_NL
> #else
> #define BRANCH_PROFILE()
> #endif
>
> #ifdef CONFIG_EVENT_TRACING
> -#define FTRACE_EVENTS() . = ALIGN(8); \
> - VMLINUX_SYMBOL(__start_ftrace_events) = .; \
> - *(_ftrace_events) \
> - VMLINUX_SYMBOL(__stop_ftrace_events) = .;
> +#define FTRACE_EVENTS() . = ALIGN(8) ASM_NL \
> + VMLINUX_SYMBOL(__start_ftrace_events) = . ASM_NL \
> + *(_ftrace_events) \
> + VMLINUX_SYMBOL(__stop_ftrace_events) = . ASM_NL
> #else
> #define FTRACE_EVENTS()
> #endif
>
> #ifdef CONFIG_TRACING
> -#define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
> +#define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = . ASM_NL \
> *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
> - VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
> -#define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .; \
> - *(__tracepoint_str) /* Trace_printk fmt' pointer */ \
> - VMLINUX_SYMBOL(__stop___tracepoint_str) = .;
> + VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = . ASM_NL
> +#define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = . ASM_NL \
> + *(__tracepoint_str) /* Trace_printk fmt' pointer */ \
> + VMLINUX_SYMBOL(__stop___tracepoint_str) = . ASM_NL
> #else
> #define TRACE_PRINTKS()
> #define TRACEPOINT_STR()
> #endif
>
> #ifdef CONFIG_FTRACE_SYSCALLS
> -#define TRACE_SYSCALLS() . = ALIGN(8); \
> - VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
> - *(__syscalls_metadata) \
> - VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
> +#define TRACE_SYSCALLS() . = ALIGN(8) ASM_NL \
> + VMLINUX_SYMBOL(__start_syscalls_metadata) = . ASM_NL \
> + *(__syscalls_metadata) \
> + VMLINUX_SYMBOL(__stop_syscalls_metadata) = . ASM_NL
> #else
> #define TRACE_SYSCALLS()
> #endif
>
> #ifdef CONFIG_CLKSRC_OF
> -#define CLKSRC_OF_TABLES() . = ALIGN(8); \
> - VMLINUX_SYMBOL(__clksrc_of_table) = .; \
> +#define CLKSRC_OF_TABLES() . = ALIGN(8) ASM_NL \
> + VMLINUX_SYMBOL(__clksrc_of_table) = . ASM_NL \
> *(__clksrc_of_table) \
> *(__clksrc_of_table_end)
> #else
> @@ -150,8 +152,8 @@
>
> #ifdef CONFIG_IRQCHIP
> #define IRQCHIP_OF_MATCH_TABLE() \
> - . = ALIGN(8); \
> - VMLINUX_SYMBOL(__irqchip_begin) = .; \
> + . = ALIGN(8) ASM_NL \
> + VMLINUX_SYMBOL(__irqchip_begin) = . ASM_NL \
> *(__irqchip_of_table) \
> *(__irqchip_of_end)
> #else
> @@ -159,19 +161,19 @@
> #endif
>
> #ifdef CONFIG_COMMON_CLK
> -#define CLK_OF_TABLES() . = ALIGN(8); \
> - VMLINUX_SYMBOL(__clk_of_table) = .; \
> - *(__clk_of_table) \
> +#define CLK_OF_TABLES() . = ALIGN(8) ASM_NL \
> + VMLINUX_SYMBOL(__clk_of_table) = . ASM_NL \
> + *(__clk_of_table) \
> *(__clk_of_table_end)
> #else
> #define CLK_OF_TABLES()
> #endif
>
> #define KERNEL_DTB() \
> - STRUCT_ALIGN(); \
> - VMLINUX_SYMBOL(__dtb_start) = .; \
> + STRUCT_ALIGN() ASM_NL \
> + VMLINUX_SYMBOL(__dtb_start) = . ASM_NL \
> *(.dtb.init.rodata) \
> - VMLINUX_SYMBOL(__dtb_end) = .;
> + VMLINUX_SYMBOL(__dtb_end) = . ASM_NL
>
> /* .data section */
> #define DATA_DATA \
> @@ -181,17 +183,17 @@
> MEM_KEEP(init.data) \
> MEM_KEEP(exit.data) \
> *(.data.unlikely) \
> - STRUCT_ALIGN(); \
> + STRUCT_ALIGN() ASM_NL \
> *(__tracepoints) \
> /* implement dynamic printk debug */ \
> - . = ALIGN(8); \
> - VMLINUX_SYMBOL(__start___jump_table) = .; \
> + . = ALIGN(8) ASM_NL \
> + VMLINUX_SYMBOL(__start___jump_table) = . ASM_NL \
> *(__jump_table) \
> - VMLINUX_SYMBOL(__stop___jump_table) = .; \
> - . = ALIGN(8); \
> - VMLINUX_SYMBOL(__start___verbose) = .; \
> + VMLINUX_SYMBOL(__stop___jump_table) = . ASM_NL \
> + . = ALIGN(8) ASM_NL \
> + VMLINUX_SYMBOL(__start___verbose) = . ASM_NL \
> *(__verbose) \
> - VMLINUX_SYMBOL(__stop___verbose) = .; \
> + VMLINUX_SYMBOL(__stop___verbose) = . ASM_NL \
> LIKELY_PROFILE() \
> BRANCH_PROFILE() \
> TRACE_PRINTKS() \
> @@ -201,42 +203,42 @@
> * Data section helpers
> */
> #define NOSAVE_DATA \
> - . = ALIGN(PAGE_SIZE); \
> - VMLINUX_SYMBOL(__nosave_begin) = .; \
> + . = ALIGN(PAGE_SIZE) ASM_NL \
> + VMLINUX_SYMBOL(__nosave_begin) = . ASM_NL \
> *(.data..nosave) \
> - . = ALIGN(PAGE_SIZE); \
> - VMLINUX_SYMBOL(__nosave_end) = .;
> + . = ALIGN(PAGE_SIZE) ASM_NL \
> + VMLINUX_SYMBOL(__nosave_end) = . ASM_NL
>
> #define PAGE_ALIGNED_DATA(page_align) \
> - . = ALIGN(page_align); \
> + . = ALIGN(page_align) ASM_NL \
> *(.data..page_aligned)
>
> #define READ_MOSTLY_DATA(align) \
> - . = ALIGN(align); \
> + . = ALIGN(align) ASM_NL \
> *(.data..read_mostly) \
> - . = ALIGN(align);
> + . = ALIGN(align) ASM_NL
>
> #define CACHELINE_ALIGNED_DATA(align) \
> - . = ALIGN(align); \
> + . = ALIGN(align) ASM_NL \
> *(.data..cacheline_aligned)
>
> #define INIT_TASK_DATA(align) \
> - . = ALIGN(align); \
> + . = ALIGN(align) ASM_NL \
> *(.data..init_task)
>
> /*
> * Read only Data
> */
> #define RO_DATA_SECTION(align) \
> - . = ALIGN((align)); \
> + . = ALIGN((align)) ASM_NL \
> .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
> - VMLINUX_SYMBOL(__start_rodata) = .; \
> + VMLINUX_SYMBOL(__start_rodata) = . ASM_NL \
> *(.rodata) *(.rodata.*) \
> *(__vermagic) /* Kernel version magic */ \
> - . = ALIGN(8); \
> - VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
> + . = ALIGN(8) ASM_NL \
> + VMLINUX_SYMBOL(__start___tracepoints_ptrs) = . ASM_NL \
> *(__tracepoints_ptrs) /* Tracepoints: pointer array */\
> - VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .; \
> + VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = . ASM_NL \
> *(__tracepoints_strings)/* Tracepoints: strings */ \
> } \
> \
> @@ -248,106 +250,106 @@
> \
> /* PCI quirks */ \
> .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
> - VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
> + VMLINUX_SYMBOL(__start_pci_fixups_early) = . ASM_NL \
> *(.pci_fixup_early) \
> - VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
> - VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
> + VMLINUX_SYMBOL(__end_pci_fixups_early) = . ASM_NL \
> + VMLINUX_SYMBOL(__start_pci_fixups_header) = . ASM_NL \
> *(.pci_fixup_header) \
> - VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
> - VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
> + VMLINUX_SYMBOL(__end_pci_fixups_header) = . ASM_NL \
> + VMLINUX_SYMBOL(__start_pci_fixups_final) = . ASM_NL \
> *(.pci_fixup_final) \
> - VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
> - VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
> + VMLINUX_SYMBOL(__end_pci_fixups_final) = . ASM_NL \
> + VMLINUX_SYMBOL(__start_pci_fixups_enable) = . ASM_NL \
> *(.pci_fixup_enable) \
> - VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
> - VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
> + VMLINUX_SYMBOL(__end_pci_fixups_enable) = . ASM_NL \
> + VMLINUX_SYMBOL(__start_pci_fixups_resume) = . ASM_NL \
> *(.pci_fixup_resume) \
> - VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
> - VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
> + VMLINUX_SYMBOL(__end_pci_fixups_resume) = . ASM_NL \
> + VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = . ASM_NL \
> *(.pci_fixup_resume_early) \
> - VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
> - VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
> + VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = . ASM_NL \
> + VMLINUX_SYMBOL(__start_pci_fixups_suspend) = . ASM_NL \
> *(.pci_fixup_suspend) \
> - VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
> + VMLINUX_SYMBOL(__end_pci_fixups_suspend) = . ASM_NL \
> } \
> \
> /* Built-in firmware blobs */ \
> .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
> - VMLINUX_SYMBOL(__start_builtin_fw) = .; \
> + VMLINUX_SYMBOL(__start_builtin_fw) = . ASM_NL \
> *(.builtin_fw) \
> - VMLINUX_SYMBOL(__end_builtin_fw) = .; \
> + VMLINUX_SYMBOL(__end_builtin_fw) = . ASM_NL \
> } \
> \
> TRACEDATA \
> \
> /* Kernel symbol table: Normal symbols */ \
> __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
> - VMLINUX_SYMBOL(__start___ksymtab) = .; \
> + VMLINUX_SYMBOL(__start___ksymtab) = . ASM_NL \
> *(SORT(___ksymtab+*)) \
> - VMLINUX_SYMBOL(__stop___ksymtab) = .; \
> + VMLINUX_SYMBOL(__stop___ksymtab) = . ASM_NL \
> } \
> \
> /* Kernel symbol table: GPL-only symbols */ \
> __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
> - VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
> + VMLINUX_SYMBOL(__start___ksymtab_gpl) = . ASM_NL \
> *(SORT(___ksymtab_gpl+*)) \
> - VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
> + VMLINUX_SYMBOL(__stop___ksymtab_gpl) = . ASM_NL \
> } \
> \
> /* Kernel symbol table: Normal unused symbols */ \
> __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
> - VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
> + VMLINUX_SYMBOL(__start___ksymtab_unused) = . ASM_NL \
> *(SORT(___ksymtab_unused+*)) \
> - VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
> + VMLINUX_SYMBOL(__stop___ksymtab_unused) = . ASM_NL \
> } \
> \
> /* Kernel symbol table: GPL-only unused symbols */ \
> __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
> - VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
> + VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = . ASM_NL \
> *(SORT(___ksymtab_unused_gpl+*)) \
> - VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
> + VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = . ASM_NL \
> } \
> \
> /* Kernel symbol table: GPL-future-only symbols */ \
> __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
> - VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
> + VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = . ASM_NL \
> *(SORT(___ksymtab_gpl_future+*)) \
> - VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
> + VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = . ASM_NL \
> } \
> \
> /* Kernel symbol table: Normal symbols */ \
> __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
> - VMLINUX_SYMBOL(__start___kcrctab) = .; \
> + VMLINUX_SYMBOL(__start___kcrctab) = . ASM_NL \
> *(SORT(___kcrctab+*)) \
> - VMLINUX_SYMBOL(__stop___kcrctab) = .; \
> + VMLINUX_SYMBOL(__stop___kcrctab) = . ASM_NL \
> } \
> \
> /* Kernel symbol table: GPL-only symbols */ \
> __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
> - VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
> + VMLINUX_SYMBOL(__start___kcrctab_gpl) = . ASM_NL \
> *(SORT(___kcrctab_gpl+*)) \
> - VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
> + VMLINUX_SYMBOL(__stop___kcrctab_gpl) = . ASM_NL \
> } \
> \
> /* Kernel symbol table: Normal unused symbols */ \
> __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
> - VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \
> + VMLINUX_SYMBOL(__start___kcrctab_unused) = . ASM_NL \
> *(SORT(___kcrctab_unused+*)) \
> - VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \
> + VMLINUX_SYMBOL(__stop___kcrctab_unused) = . ASM_NL \
> } \
> \
> /* Kernel symbol table: GPL-only unused symbols */ \
> __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
> - VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
> + VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = . ASM_NL \
> *(SORT(___kcrctab_unused_gpl+*)) \
> - VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
> + VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = . ASM_NL \
> } \
> \
> /* Kernel symbol table: GPL-future-only symbols */ \
> __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
> - VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
> + VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = . ASM_NL \
> *(SORT(___kcrctab_gpl_future+*)) \
> - VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
> + VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = . ASM_NL \
> } \
> \
> /* Kernel symbol table: strings */ \
> @@ -364,20 +366,20 @@
> \
> /* Built-in module parameters. */ \
> __param : AT(ADDR(__param) - LOAD_OFFSET) { \
> - VMLINUX_SYMBOL(__start___param) = .; \
> + VMLINUX_SYMBOL(__start___param) = . ASM_NL \
> *(__param) \
> - VMLINUX_SYMBOL(__stop___param) = .; \
> + VMLINUX_SYMBOL(__stop___param) = . ASM_NL \
> } \
> \
> /* Built-in module versions. */ \
> __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
> - VMLINUX_SYMBOL(__start___modver) = .; \
> + VMLINUX_SYMBOL(__start___modver) = . ASM_NL \
> *(__modver) \
> - VMLINUX_SYMBOL(__stop___modver) = .; \
> - . = ALIGN((align)); \
> - VMLINUX_SYMBOL(__end_rodata) = .; \
> + VMLINUX_SYMBOL(__stop___modver) = . ASM_NL \
> + . = ALIGN((align)) ASM_NL \
> + VMLINUX_SYMBOL(__end_rodata) = . ASM_NL \
> } \
> - . = ALIGN((align));
> + . = ALIGN((align)) ASM_NL
>
> /* RODATA & RO_DATA provided for backward compatibility.
> * All archs are supposed to use RO_DATA() */
> @@ -386,15 +388,15 @@
>
> #define SECURITY_INIT \
> .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
> - VMLINUX_SYMBOL(__security_initcall_start) = .; \
> + VMLINUX_SYMBOL(__security_initcall_start) = . ASM_NL \
> *(.security_initcall.init) \
> - VMLINUX_SYMBOL(__security_initcall_end) = .; \
> + VMLINUX_SYMBOL(__security_initcall_end) = . ASM_NL \
> }
>
> /* .text section. Map to function alignment to avoid address changes
> * during second ld run in second ld pass when generating System.map */
> #define TEXT_TEXT \
> - ALIGN_FUNCTION(); \
> + ALIGN_FUNCTION() ASM_NL \
> *(.text.hot) \
> *(.text) \
> *(.ref.text) \
> @@ -406,37 +408,37 @@
> /* sched.text is aling to function alignment to secure we have same
> * address even at second ld pass when generating System.map */
> #define SCHED_TEXT \
> - ALIGN_FUNCTION(); \
> - VMLINUX_SYMBOL(__sched_text_start) = .; \
> + ALIGN_FUNCTION() ASM_NL \
> + VMLINUX_SYMBOL(__sched_text_start) = . ASM_NL \
> *(.sched.text) \
> - VMLINUX_SYMBOL(__sched_text_end) = .;
> + VMLINUX_SYMBOL(__sched_text_end) = . ASM_NL
>
> /* spinlock.text is aling to function alignment to secure we have same
> * address even at second ld pass when generating System.map */
> #define LOCK_TEXT \
> - ALIGN_FUNCTION(); \
> - VMLINUX_SYMBOL(__lock_text_start) = .; \
> + ALIGN_FUNCTION() ASM_NL \
> + VMLINUX_SYMBOL(__lock_text_start) = . ASM_NL \
> *(.spinlock.text) \
> - VMLINUX_SYMBOL(__lock_text_end) = .;
> + VMLINUX_SYMBOL(__lock_text_end) = . ASM_NL
>
> #define KPROBES_TEXT \
> - ALIGN_FUNCTION(); \
> - VMLINUX_SYMBOL(__kprobes_text_start) = .; \
> + ALIGN_FUNCTION() ASM_NL \
> + VMLINUX_SYMBOL(__kprobes_text_start) = . ASM_NL \
> *(.kprobes.text) \
> - VMLINUX_SYMBOL(__kprobes_text_end) = .;
> + VMLINUX_SYMBOL(__kprobes_text_end) = . ASM_NL
>
> #define ENTRY_TEXT \
> - ALIGN_FUNCTION(); \
> - VMLINUX_SYMBOL(__entry_text_start) = .; \
> + ALIGN_FUNCTION() ASM_NL \
> + VMLINUX_SYMBOL(__entry_text_start) = . ASM_NL \
> *(.entry.text) \
> - VMLINUX_SYMBOL(__entry_text_end) = .;
> + VMLINUX_SYMBOL(__entry_text_end) = . ASM_NL
>
> #ifdef CONFIG_FUNCTION_GRAPH_TRACER
> #define IRQENTRY_TEXT \
> - ALIGN_FUNCTION(); \
> - VMLINUX_SYMBOL(__irqentry_text_start) = .; \
> + ALIGN_FUNCTION() ASM_NL \
> + VMLINUX_SYMBOL(__irqentry_text_start) = . ASM_NL \
> *(.irqentry.text) \
> - VMLINUX_SYMBOL(__irqentry_text_end) = .;
> + VMLINUX_SYMBOL(__irqentry_text_end) = . ASM_NL
> #else
> #define IRQENTRY_TEXT
> #endif
> @@ -444,7 +446,7 @@
> /* Section used for early init (in .S files) */
> #define HEAD_TEXT *(.head.text)
>
> -#define HEAD_TEXT_SECTION \
> +#define HEAD_TEXT_SECTION \
> .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \
> HEAD_TEXT \
> }
> @@ -453,28 +455,28 @@
> * Exception table
> */
> #define EXCEPTION_TABLE(align) \
> - . = ALIGN(align); \
> + . = ALIGN(align) ASM_NL \
> __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
> - VMLINUX_SYMBOL(__start___ex_table) = .; \
> + VMLINUX_SYMBOL(__start___ex_table) = . ASM_NL \
> *(__ex_table) \
> - VMLINUX_SYMBOL(__stop___ex_table) = .; \
> + VMLINUX_SYMBOL(__stop___ex_table) = . ASM_NL \
> }
>
> /*
> * Init task
> */
> #define INIT_TASK_DATA_SECTION(align) \
> - . = ALIGN(align); \
> + . = ALIGN(align) ASM_NL \
> .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \
> INIT_TASK_DATA(align) \
> }
>
> #ifdef CONFIG_CONSTRUCTORS
> -#define KERNEL_CTORS() . = ALIGN(8); \
> - VMLINUX_SYMBOL(__ctors_start) = .; \
> - *(.ctors) \
> - *(.init_array) \
> - VMLINUX_SYMBOL(__ctors_end) = .;
> +#define KERNEL_CTORS() . = ALIGN(8) ASM_NL \
> + VMLINUX_SYMBOL(__ctors_start) = . ASM_NL \
> + *(.ctors) \
> + *(.init_array) \
> + VMLINUX_SYMBOL(__ctors_end) = . ASM_NL
> #else
> #define KERNEL_CTORS()
> #endif
> @@ -515,7 +517,7 @@
> * zeroed during startup
> */
> #define SBSS(sbss_align) \
> - . = ALIGN(sbss_align); \
> + . = ALIGN(sbss_align) ASM_NL \
> .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
> *(.sbss) \
> *(.scommon) \
> @@ -530,7 +532,7 @@
> #endif
>
> #define BSS(bss_align) \
> - . = ALIGN(bss_align); \
> + . = ALIGN(bss_align) ASM_NL \
> .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
> BSS_FIRST_SECTIONS \
> *(.bss..page_aligned) \
> @@ -581,11 +583,11 @@
>
> #ifdef CONFIG_GENERIC_BUG
> #define BUG_TABLE \
> - . = ALIGN(8); \
> + . = ALIGN(8) ASM_NL \
> __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
> - VMLINUX_SYMBOL(__start___bug_table) = .; \
> + VMLINUX_SYMBOL(__start___bug_table) = . ASM_NL \
> *(__bug_table) \
> - VMLINUX_SYMBOL(__stop___bug_table) = .; \
> + VMLINUX_SYMBOL(__stop___bug_table) = . ASM_NL \
> }
> #else
> #define BUG_TABLE
> @@ -593,11 +595,11 @@
>
> #ifdef CONFIG_PM_TRACE
> #define TRACEDATA \
> - . = ALIGN(4); \
> + . = ALIGN(4) ASM_NL \
> .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
> - VMLINUX_SYMBOL(__tracedata_start) = .; \
> + VMLINUX_SYMBOL(__tracedata_start) = . ASM_NL \
> *(.tracedata) \
> - VMLINUX_SYMBOL(__tracedata_end) = .; \
> + VMLINUX_SYMBOL(__tracedata_end) = . ASM_NL \
> }
> #else
> #define TRACEDATA
> @@ -605,24 +607,24 @@
>
> #define NOTES \
> .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
> - VMLINUX_SYMBOL(__start_notes) = .; \
> + VMLINUX_SYMBOL(__start_notes) = . ASM_NL \
> *(.note.*) \
> - VMLINUX_SYMBOL(__stop_notes) = .; \
> + VMLINUX_SYMBOL(__stop_notes) = . ASM_NL \
> }
>
> #define INIT_SETUP(initsetup_align) \
> - . = ALIGN(initsetup_align); \
> - VMLINUX_SYMBOL(__setup_start) = .; \
> + . = ALIGN(initsetup_align) ASM_NL \
> + VMLINUX_SYMBOL(__setup_start) = . ASM_NL \
> *(.init.setup) \
> - VMLINUX_SYMBOL(__setup_end) = .;
> + VMLINUX_SYMBOL(__setup_end) = . ASM_NL
>
> #define INIT_CALLS_LEVEL(level) \
> - VMLINUX_SYMBOL(__initcall##level##_start) = .; \
> + VMLINUX_SYMBOL(__initcall##level##_start) = . ASM_NL \
> *(.initcall##level##.init) \
> *(.initcall##level##s.init) \
>
> #define INIT_CALLS \
> - VMLINUX_SYMBOL(__initcall_start) = .; \
> + VMLINUX_SYMBOL(__initcall_start) = . ASM_NL \
> *(.initcallearly.init) \
> INIT_CALLS_LEVEL(0) \
> INIT_CALLS_LEVEL(1) \
> @@ -633,24 +635,24 @@
> INIT_CALLS_LEVEL(rootfs) \
> INIT_CALLS_LEVEL(6) \
> INIT_CALLS_LEVEL(7) \
> - VMLINUX_SYMBOL(__initcall_end) = .;
> + VMLINUX_SYMBOL(__initcall_end) = . ASM_NL
>
> #define CON_INITCALL \
> - VMLINUX_SYMBOL(__con_initcall_start) = .; \
> + VMLINUX_SYMBOL(__con_initcall_start) = . ASM_NL \
> *(.con_initcall.init) \
> - VMLINUX_SYMBOL(__con_initcall_end) = .;
> + VMLINUX_SYMBOL(__con_initcall_end) = . ASM_NL
>
> #define SECURITY_INITCALL \
> - VMLINUX_SYMBOL(__security_initcall_start) = .; \
> + VMLINUX_SYMBOL(__security_initcall_start) = . ASM_NL \
> *(.security_initcall.init) \
> - VMLINUX_SYMBOL(__security_initcall_end) = .;
> + VMLINUX_SYMBOL(__security_initcall_end) = . ASM_NL
>
> #ifdef CONFIG_BLK_DEV_INITRD
> #define INIT_RAM_FS \
> - . = ALIGN(4); \
> - VMLINUX_SYMBOL(__initramfs_start) = .; \
> + . = ALIGN(4) ASM_NL \
> + VMLINUX_SYMBOL(__initramfs_start) = . ASM_NL \
> *(.init.ramfs) \
> - . = ALIGN(8); \
> + . = ALIGN(8) ASM_NL \
> *(.init.ramfs.info)
> #else
> #define INIT_RAM_FS
> @@ -685,16 +687,16 @@
> * sharing between subsections for different purposes.
> */
> #define PERCPU_INPUT(cacheline) \
> - VMLINUX_SYMBOL(__per_cpu_start) = .; \
> + VMLINUX_SYMBOL(__per_cpu_start) = . ASM_NL \
> *(.data..percpu..first) \
> - . = ALIGN(PAGE_SIZE); \
> + . = ALIGN(PAGE_SIZE) ASM_NL \
> *(.data..percpu..page_aligned) \
> - . = ALIGN(cacheline); \
> + . = ALIGN(cacheline) ASM_NL \
> *(.data..percpu..readmostly) \
> - . = ALIGN(cacheline); \
> + . = ALIGN(cacheline) ASM_NL \
> *(.data..percpu) \
> *(.data..percpu..shared_aligned) \
> - VMLINUX_SYMBOL(__per_cpu_end) = .;
> + VMLINUX_SYMBOL(__per_cpu_end) = . ASM_NL
>
> /**
> * PERCPU_VADDR - define output section for percpu area
> @@ -721,12 +723,12 @@
> * address, use PERCPU_SECTION.
> */
> #define PERCPU_VADDR(cacheline, vaddr, phdr) \
> - VMLINUX_SYMBOL(__per_cpu_load) = .; \
> + VMLINUX_SYMBOL(__per_cpu_load) = . ASM_NL \
> .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
> - LOAD_OFFSET) { \
> PERCPU_INPUT(cacheline) \
> } phdr \
> - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
> + . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu) ASM_NL
>
> /**
> * PERCPU_SECTION - define output section for percpu area, simple version
> @@ -741,9 +743,9 @@
> * .data..percpu which is required for relocatable x86_32 configuration.
> */
> #define PERCPU_SECTION(cacheline) \
> - . = ALIGN(PAGE_SIZE); \
> + . = ALIGN(PAGE_SIZE) ASM_NL \
> .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
> - VMLINUX_SYMBOL(__per_cpu_load) = .; \
> + VMLINUX_SYMBOL(__per_cpu_load) = . ASM_NL \
> PERCPU_INPUT(cacheline) \
> }
>
> @@ -767,7 +769,7 @@
> *
> * use 0 as page_align if page_aligned data is not used */
> #define RW_DATA_SECTION(cacheline, pagealigned, inittask) \
> - . = ALIGN(PAGE_SIZE); \
> + . = ALIGN(PAGE_SIZE) ASM_NL \
> .data : AT(ADDR(.data) - LOAD_OFFSET) { \
> INIT_TASK_DATA(inittask) \
> NOSAVE_DATA \
> @@ -779,11 +781,11 @@
> }
>
> #define INIT_TEXT_SECTION(inittext_align) \
> - . = ALIGN(inittext_align); \
> + . = ALIGN(inittext_align) ASM_NL \
> .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \
> - VMLINUX_SYMBOL(_sinittext) = .; \
> + VMLINUX_SYMBOL(_sinittext) = . ASM_NL \
> INIT_TEXT \
> - VMLINUX_SYMBOL(_einittext) = .; \
> + VMLINUX_SYMBOL(_einittext) = . ASM_NL \
> }
>
> #define INIT_DATA_SECTION(initsetup_align) \
> @@ -797,9 +799,9 @@
> }
>
> #define BSS_SECTION(sbss_align, bss_align, stop_align) \
> - . = ALIGN(sbss_align); \
> - VMLINUX_SYMBOL(__bss_start) = .; \
> + . = ALIGN(sbss_align) ASM_NL \
> + VMLINUX_SYMBOL(__bss_start) = . ASM_NL \
> SBSS(sbss_align) \
> BSS(bss_align) \
> - . = ALIGN(stop_align); \
> - VMLINUX_SYMBOL(__bss_stop) = .;
> + . = ALIGN(stop_align) ASM_NL \
> + VMLINUX_SYMBOL(__bss_stop) = . ASM_NL
> diff --git a/include/linux/linkage.h b/include/linux/linkage.h
> index d3e8ad2..a6a42dd 100644
> --- a/include/linux/linkage.h
> +++ b/include/linux/linkage.h
> @@ -6,6 +6,11 @@
> #include <linux/export.h>
> #include <asm/linkage.h>
>
> +/* Some toolchains use other characters (e.g. '`') to mark new line in macro */
> +#ifndef ASM_NL
> +#define ASM_NL ;
> +#endif
> +
> #ifdef __cplusplus
> #define CPP_ASMLINKAGE extern "C"
> #else
> @@ -75,21 +80,21 @@
>
> #ifndef ENTRY
> #define ENTRY(name) \
> - .globl name; \
> - ALIGN; \
> - name:
> + .globl name ASM_NL \
> + ALIGN ASM_NL \
> + name:
> #endif
> #endif /* LINKER_SCRIPT */
>
> #ifndef WEAK
> #define WEAK(name) \
> - .weak name; \
> + .weak name ASM_NL \
> name:
> #endif
>
> #ifndef END
> #define END(name) \
> - .size name, .-name
> + .size name, .-name
> #endif
>
> /* If symbol 'name' is treated as a subroutine (gets called, and returns)
> @@ -98,8 +103,8 @@
> */
> #ifndef ENDPROC
> #define ENDPROC(name) \
> - .type name, @function; \
> - END(name)
> + .type name, @function ASM_NL \
> + END(name)
> #endif
>
> #endif
> --
> 1.7.7.6
> --
> To unsubscribe from this list: send the line "unsubscribe linux-arch" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html
--
Thanks.
-- Max
On 10/28/2013 04:00 PM, Chen Gang wrote:
> For some toolchains, they use another character as newline in a macro
> (e.g. arc uses '`'), so for generic assembly code, need use ASM_NL (a
> macro) instead of ';' for it.
>
> Since "linux/export.h" are mainly used for exporting work, then our fix
> more likely belongs to "linux/linkage.h", and we need add the related
> checking in "linkage.h".
>
> Also need notice 80 columns wrap, and '\t' for each line.
>
>
> Signed-off-by: Chen Gang <[email protected]>
> ---
> arch/arc/include/asm/linkage.h | 2 +
> include/asm-generic/vmlinux.lds.h | 350 +++++++++++++++++++------------------
> include/linux/linkage.h | 19 +
Like Max said, please drop the linker script bits - they are OK - otherwise ARC
kernel won't have been building at all.
And you also need to fix kernel/modsign_certificate.S as part of same patchset as
that is where the problem started in first place.
-Vineet
On 10/28/2013 09:45 PM, Vineet Gupta wrote:
> On 10/28/2013 04:00 PM, Chen Gang wrote:
>> For some toolchains, they use another character as newline in a macro
>> (e.g. arc uses '`'), so for generic assembly code, need use ASM_NL (a
>> macro) instead of ';' for it.
>>
>> Since "linux/export.h" are mainly used for exporting work, then our fix
>> more likely belongs to "linux/linkage.h", and we need add the related
>> checking in "linkage.h".
>>
>> Also need notice 80 columns wrap, and '\t' for each line.
>>
>>
>> Signed-off-by: Chen Gang <[email protected]>
>> ---
>> arch/arc/include/asm/linkage.h | 2 +
>> include/asm-generic/vmlinux.lds.h | 350 +++++++++++++++++++------------------
>> include/linux/linkage.h | 19 +
>
> Like Max said, please drop the linker script bits - they are OK - otherwise ARC
> kernel won't have been building at all.
>
OK, thank you and Max, I will send patch v2 for it.
Hmm... after this modification, it passed allmodconfig for x86_64. And
for arc, at least, it can build to "mm/" sub-directory (after here, I
stopped compiling).
> And you also need to fix kernel/modsign_certificate.S as part of same patchset as
> that is where the problem started in first place.
>
Hmm... can that be another patch?
For "kernel/modsign_certificate.S", I recommend to expand the macro
which will shrink code line, either need not include additional header
file, I feel that will be simpler for both code reader and writers.
Thanks.
--
Chen Gang
On 10/29/2013 09:04 AM, Chen Gang wrote:
> On 10/28/2013 09:45 PM, Vineet Gupta wrote:
>> On 10/28/2013 04:00 PM, Chen Gang wrote:
>>> For some toolchains, they use another character as newline in a macro
>>> (e.g. arc uses '`'), so for generic assembly code, need use ASM_NL (a
>>> macro) instead of ';' for it.
>>>
>>> Since "linux/export.h" are mainly used for exporting work, then our fix
>>> more likely belongs to "linux/linkage.h", and we need add the related
>>> checking in "linkage.h".
>>>
>>> Also need notice 80 columns wrap, and '\t' for each line.
>>>
>>>
>>> Signed-off-by: Chen Gang <[email protected]>
>>> ---
>>> arch/arc/include/asm/linkage.h | 2 +
>>> include/asm-generic/vmlinux.lds.h | 350 +++++++++++++++++++------------------
>>> include/linux/linkage.h | 19 +
>>
>> Like Max said, please drop the linker script bits - they are OK - otherwise ARC
>> kernel won't have been building at all.
>>
>
> OK, thank you and Max, I will send patch v2 for it.
>
> Hmm... after this modification, it passed allmodconfig for x86_64. And
> for arc, at least, it can build to "mm/" sub-directory (after here, I
> stopped compiling).
It seems I should not stopped when build to "mm/" sub-directory -- that
will let myself find: "ld and as are really 'individual' enough,
although both of them belong binutils". :-)
>
>
>> And you also need to fix kernel/modsign_certificate.S as part of same patchset as
>> that is where the problem started in first place.
>>
>
> Hmm... can that be another patch?
>
> For "kernel/modsign_certificate.S", I recommend to expand the macro
> which will shrink code line, either need not include additional header
> file, I feel that will be simpler for both code reader and writers.
>
>
> Thanks.
>
--
Chen Gang
For some assemblers, they use another character as newline in a macro
(e.g. arc uses '`'), so for generic assembly code, need use ASM_NL (a
macro) instead of ';' for it.
Also need notice about code styles ('\t' for each line).
Signed-off-by: Chen Gang <[email protected]>
---
arch/arc/include/asm/linkage.h | 2 ++
include/linux/linkage.h | 19 ++++++++++++-------
2 files changed, 14 insertions(+), 7 deletions(-)
diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h
index 0283e9e..66ee552 100644
--- a/arch/arc/include/asm/linkage.h
+++ b/arch/arc/include/asm/linkage.h
@@ -11,6 +11,8 @@
#ifdef __ASSEMBLY__
+#define ASM_NL ` /* use '`' to mark new line in macro */
+
/* Can't use the ENTRY macro in linux/linkage.h
* gas considers ';' as comment vs. newline
*/
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index d3e8ad2..a6a42dd 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -6,6 +6,11 @@
#include <linux/export.h>
#include <asm/linkage.h>
+/* Some toolchains use other characters (e.g. '`') to mark new line in macro */
+#ifndef ASM_NL
+#define ASM_NL ;
+#endif
+
#ifdef __cplusplus
#define CPP_ASMLINKAGE extern "C"
#else
@@ -75,21 +80,21 @@
#ifndef ENTRY
#define ENTRY(name) \
- .globl name; \
- ALIGN; \
- name:
+ .globl name ASM_NL \
+ ALIGN ASM_NL \
+ name:
#endif
#endif /* LINKER_SCRIPT */
#ifndef WEAK
#define WEAK(name) \
- .weak name; \
+ .weak name ASM_NL \
name:
#endif
#ifndef END
#define END(name) \
- .size name, .-name
+ .size name, .-name
#endif
/* If symbol 'name' is treated as a subroutine (gets called, and returns)
@@ -98,8 +103,8 @@
*/
#ifndef ENDPROC
#define ENDPROC(name) \
- .type name, @function; \
- END(name)
+ .type name, @function ASM_NL \
+ END(name)
#endif
#endif
--
1.7.7.6
On 10/29/2013 07:21 AM, Chen Gang wrote:
> For some assemblers, they use another character as newline in a macro
> (e.g. arc uses '`'), so for generic assembly code, need use ASM_NL (a
> macro) instead of ';' for it.
>
> Also need notice about code styles ('\t' for each line).
>
>
> Signed-off-by: Chen Gang <[email protected]>
> ---
> arch/arc/include/asm/linkage.h | 2 ++
> include/linux/linkage.h | 19 ++++++++++++-------
Acked-by: Vineet Gupta <[email protected]>
-Vineet
On 10/29/2013 01:55 PM, Vineet Gupta wrote:
> On 10/29/2013 07:21 AM, Chen Gang wrote:
>> For some assemblers, they use another character as newline in a macro
>> (e.g. arc uses '`'), so for generic assembly code, need use ASM_NL (a
>> macro) instead of ';' for it.
>>
>> Also need notice about code styles ('\t' for each line).
>>
>>
>> Signed-off-by: Chen Gang <[email protected]>
>> ---
>> arch/arc/include/asm/linkage.h | 2 ++
>> include/linux/linkage.h | 19 ++++++++++++-------
>
> Acked-by: Vineet Gupta <[email protected]>
>
Thank you very much!
--
Chen Gang
On 10/28/2013 07:18 PM, Max Filippov wrote:
> Hi Chen,
>
> On Mon, Oct 28, 2013 at 2:30 PM, Chen Gang <[email protected]> wrote:
>> > For some toolchains, they use another character as newline in a macro
>> > (e.g. arc uses '`'), so for generic assembly code, need use ASM_NL (a
>> > macro) instead of ';' for it.
>> > Since "linux/export.h" are mainly used for exporting work, then our fix
>> > more likely belongs to "linux/linkage.h", and we need add the related
>> > checking in "linkage.h".
>> >
>> > Also need notice 80 columns wrap, and '\t' for each line.
>> >
>> >
>> > Signed-off-by: Chen Gang <[email protected]>
>> > ---
>> > arch/arc/include/asm/linkage.h | 2 +
>> > include/asm-generic/vmlinux.lds.h | 350 +++++++++++++++++++------------------
> Actually vmlinux.lds.h is not an assembly code, but rather a linker script,
> later preprocessed by cpp.
>
Consult: do xtensa also has its own ASM_NL?
:-)
Thanks.
--
Chen Gang
On Tue, Oct 29, 2013 at 11:57 AM, Chen Gang <[email protected]> wrote:
> On 10/28/2013 07:18 PM, Max Filippov wrote:
>> Hi Chen,
>>
>> On Mon, Oct 28, 2013 at 2:30 PM, Chen Gang <[email protected]> wrote:
>>> > For some toolchains, they use another character as newline in a macro
>>> > (e.g. arc uses '`'), so for generic assembly code, need use ASM_NL (a
>>> > macro) instead of ';' for it.
>>> > Since "linux/export.h" are mainly used for exporting work, then our fix
>>> > more likely belongs to "linux/linkage.h", and we need add the related
>>> > checking in "linkage.h".
>>> >
>>> > Also need notice 80 columns wrap, and '\t' for each line.
>>> >
>>> >
>>> > Signed-off-by: Chen Gang <[email protected]>
>>> > ---
>>> > arch/arc/include/asm/linkage.h | 2 +
>>> > include/asm-generic/vmlinux.lds.h | 350 +++++++++++++++++++------------------
>> Actually vmlinux.lds.h is not an assembly code, but rather a linker script,
>> later preprocessed by cpp.
>>
>
> Consult: do xtensa also has its own ASM_NL?
It works well with common semicolon.
--
Thanks.
-- Max
On 10/29/2013 04:34 PM, Max Filippov wrote:
> On Tue, Oct 29, 2013 at 11:57 AM, Chen Gang <[email protected]> wrote:
>> On 10/28/2013 07:18 PM, Max Filippov wrote:
>>> Hi Chen,
>>>
>>> On Mon, Oct 28, 2013 at 2:30 PM, Chen Gang <[email protected]> wrote:
>>>>> For some toolchains, they use another character as newline in a macro
>>>>> (e.g. arc uses '`'), so for generic assembly code, need use ASM_NL (a
>>>>> macro) instead of ';' for it.
>>>>> Since "linux/export.h" are mainly used for exporting work, then our fix
>>>>> more likely belongs to "linux/linkage.h", and we need add the related
>>>>> checking in "linkage.h".
>>>>>
>>>>> Also need notice 80 columns wrap, and '\t' for each line.
>>>>>
>>>>>
>>>>> Signed-off-by: Chen Gang <[email protected]>
>>>>> ---
>>>>> arch/arc/include/asm/linkage.h | 2 +
>>>>> include/asm-generic/vmlinux.lds.h | 350 +++++++++++++++++++------------------
>>> Actually vmlinux.lds.h is not an assembly code, but rather a linker script,
>>> later preprocessed by cpp.
>>>
>>
>> Consult: do xtensa also has its own ASM_NL?
>
> It works well with common semicolon.
>
Thank you for your confirmation.
After scan the "arch/" sub-directory, except arc, the cris, c6x, frv,
and parisc are also use ';' as comments, I guess they will welcome this
patch.
And some of another architectures may use '#', '|', '!', '@' or "//" as
line comment mark. It seems most of them support ';' as newline in the
macro, but it is also necessary to give a check.
If this patch is applied, I will/should sync all the related
architectures to let ASM_NL consistency.
:-)
Thanks.
--
Chen Gang
Chen Gang <[email protected]> wrote:
> > And you also need to fix kernel/modsign_certificate.S as part of same patchset as
> > that is where the problem started in first place.
> >
>
> Hmm... can that be another patch?
>
> For "kernel/modsign_certificate.S", I recommend to expand the macro
> which will shrink code line, either need not include additional header
> file, I feel that will be simpler for both code reader and writers.
I recommend leaving that file alone. That gets moved to a .S file in patches
queued in the security tree.
David
On 10/29/2013 06:39 PM, David Howells wrote:
> Chen Gang <[email protected]> wrote:
>
>>> > > And you also need to fix kernel/modsign_certificate.S as part of same patchset as
>>> > > that is where the problem started in first place.
>>> > >
>> >
>> > Hmm... can that be another patch?
>> >
>> > For "kernel/modsign_certificate.S", I recommend to expand the macro
>> > which will shrink code line, either need not include additional header
>> > file, I feel that will be simpler for both code reader and writers.
> I recommend leaving that file alone. That gets moved to a .S file in patches
> queued in the security tree.
Excuse me, I am not quite familiar with our version merging, I guess
your meaning is "this file will be removed, and the related contents
will be in another .S file, so we need not fix it within this file".
If what I guess is correct, I support your recommendation (and if what I
guess is incorrect, please let me know, thanks). :-)
BTW: I still use next-20130927 tree for it, and another next-tree branch
which is next-20131025 do not have this file.
Thanks.
--
Chen Gang
Chen Gang <[email protected]> wrote:
> >> > For "kernel/modsign_certificate.S", I recommend to expand the macro
> >> > which will shrink code line, either need not include additional header
> >> > file, I feel that will be simpler for both code reader and writers.
> > I recommend leaving that file alone. That gets moved to a .S file in patches
> > queued in the security tree.
>
> Excuse me, I am not quite familiar with our version merging, I guess
> your meaning is "this file will be removed, and the related contents
> will be in another .S file, so we need not fix it within this file".
>
> If what I guess is correct, I support your recommendation (and if what I
> guess is incorrect, please let me know, thanks). :-)
Sorry, I misread what you were saying. The code has already been split out of
the .c file of course (I'd forgotten that it had). In the security tree next
branch, the .S file gets renamed and slightly modified here:
http://git.kernel.org/cgit/linux/kernel/git/jmorris/linux-security.git/commit/?h=next&id=b56e5a17b6b9acd16997960504b9940d0d7984e7
David
On 10/29/2013 09:59 PM, David Howells wrote:
> Chen Gang <[email protected]> wrote:
>
>>>>> For "kernel/modsign_certificate.S", I recommend to expand the macro
>>>>> which will shrink code line, either need not include additional header
>>>>> file, I feel that will be simpler for both code reader and writers.
>>> I recommend leaving that file alone. That gets moved to a .S file in patches
>>> queued in the security tree.
>>
>> Excuse me, I am not quite familiar with our version merging, I guess
>> your meaning is "this file will be removed, and the related contents
>> will be in another .S file, so we need not fix it within this file".
>>
>> If what I guess is correct, I support your recommendation (and if what I
>> guess is incorrect, please let me know, thanks). :-)
>
> Sorry, I misread what you were saying. The code has already been split out of
> the .c file of course (I'd forgotten that it had). In the security tree next
> branch, the .S file gets renamed and slightly modified here:
>
> http://git.kernel.org/cgit/linux/kernel/git/jmorris/linux-security.git/commit/?h=next&id=b56e5a17b6b9acd16997960504b9940d0d7984e7
>
Oh, thank you for your information, and I will send patch v3 for it.
I need/should based on another next-tree branch, and use its tag
"next-20131025" to continue (which already contents original related
changes).
And also, excuse me, my English is not quite well (which easily lead
other members misunderstanding).
Thanks.
--
Chen Gang
On 2013-11-21 10:58, Vineet Gupta wrote:
> On 11/01/2013 11:15 AM, Vineet Gupta wrote:
>> +CC Sam, Tim, Joe, Michal, David
>>
>> Guys can you please take a look at this patch and suggest the best way to merge.
>>
>> The root cause is some gas ports (including ARC) use ';' for comment (vs. newline)
>> More background at https://lkml.org/lkml/2013/10/24/365
>
> Ping ! Any objections to this. Given that this is exception case, it is simpler
> than defining a Kconfig control.
>
> -Vineet
>>
>>
>> On 10/29/2013 07:21 AM, Chen Gang wrote:
>>> For some assemblers, they use another character as newline in a macro
>>> (e.g. arc uses '`'), so for generic assembly code, need use ASM_NL (a
>>> macro) instead of ';' for it.
>>>
>>> Also need notice about code styles ('\t' for each line).
>>>
>>>
>>> Signed-off-by: Chen Gang <[email protected]>
>>> ---
>>> arch/arc/include/asm/linkage.h | 2 ++
>>> include/linux/linkage.h | 19 ++++++++++++-------
>>> 2 files changed, 14 insertions(+), 7 deletions(-)
>>>
>>> diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h
>>> index 0283e9e..66ee552 100644
>>> --- a/arch/arc/include/asm/linkage.h
>>> +++ b/arch/arc/include/asm/linkage.h
>>> @@ -11,6 +11,8 @@
>>>
>>> #ifdef __ASSEMBLY__
>>>
>>> +#define ASM_NL ` /* use '`' to mark new line in macro */
>>> +
>>> /* Can't use the ENTRY macro in linux/linkage.h
>>> * gas considers ';' as comment vs. newline
>>> */
>>> diff --git a/include/linux/linkage.h b/include/linux/linkage.h
>>> index d3e8ad2..a6a42dd 100644
>>> --- a/include/linux/linkage.h
>>> +++ b/include/linux/linkage.h
>>> @@ -6,6 +6,11 @@
>>> #include <linux/export.h>
>>> #include <asm/linkage.h>
>>>
>>> +/* Some toolchains use other characters (e.g. '`') to mark new line in macro */
>>> +#ifndef ASM_NL
>>> +#define ASM_NL ;
>>> +#endif
>>> +
>>> #ifdef __cplusplus
>>> #define CPP_ASMLINKAGE extern "C"
>>> #else
>>> @@ -75,21 +80,21 @@
>>>
>>> #ifndef ENTRY
>>> #define ENTRY(name) \
>>> - .globl name; \
>>> - ALIGN; \
>>> - name:
>>> + .globl name ASM_NL \
>>> + ALIGN ASM_NL \
>>> + name:
>>> #endif
>>> #endif /* LINKER_SCRIPT */
>>>
>>> #ifndef WEAK
>>> #define WEAK(name) \
>>> - .weak name; \
>>> + .weak name ASM_NL \
>>> name:
>>> #endif
>>>
>>> #ifndef END
>>> #define END(name) \
>>> - .size name, .-name
>>> + .size name, .-name
>>> #endif
>>>
>>> /* If symbol 'name' is treated as a subroutine (gets called, and returns)
>>> @@ -98,8 +103,8 @@
>>> */
>>> #ifndef ENDPROC
>>> #define ENDPROC(name) \
>>> - .type name, @function; \
>>> - END(name)
>>> + .type name, @function ASM_NL \
>>> + END(name)
>>> #endif
>>>
>>> #endif
>>>
>>
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at http://www.tux.org/lkml/
>
Hi Michal,
On Friday 03 January 2014 06:08 PM, Michal Marek wrote:
> On 2013-11-21 10:58, Vineet Gupta wrote:
>> On 11/01/2013 11:15 AM, Vineet Gupta wrote:
>>> +CC Sam, Tim, Joe, Michal, David
>>>
>>> Guys can you please take a look at this patch and suggest the best way to merge.
>>>
>>> The root cause is some gas ports (including ARC) use ';' for comment (vs. newline)
>>> More background at https://lkml.org/lkml/2013/10/24/365
>> Ping ! Any objections to this. Given that this is exception case, it is simpler
>> than defining a Kconfig control.
>>
>> -Vineet
>>>
>>> On 10/29/2013 07:21 AM, Chen Gang wrote:
>>>> For some assemblers, they use another character as newline in a macro
>>>> (e.g. arc uses '`'), so for generic assembly code, need use ASM_NL (a
>>>> macro) instead of ';' for it.
>>>>
>>>> Also need notice about code styles ('\t' for each line).
>>>>
>>>>
>>>> Signed-off-by: Chen Gang <[email protected]>
>>>> ---
>>>> arch/arc/include/asm/linkage.h | 2 ++
>>>> include/linux/linkage.h | 19 ++++++++++++-------
>>>> 2 files changed, 14 insertions(+), 7 deletions(-)
>>>>
>>>> diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h
>>>> index 0283e9e..66ee552 100644
>>>> --- a/arch/arc/include/asm/linkage.h
>>>> +++ b/arch/arc/include/asm/linkage.h
>>>> @@ -11,6 +11,8 @@
>>>>
>>>> #ifdef __ASSEMBLY__
>>>>
>>>> +#define ASM_NL ` /* use '`' to mark new line in macro */
>>>> +
>>>> /* Can't use the ENTRY macro in linux/linkage.h
>>>> * gas considers ';' as comment vs. newline
>>>> */
>>>> diff --git a/include/linux/linkage.h b/include/linux/linkage.h
>>>> index d3e8ad2..a6a42dd 100644
>>>> --- a/include/linux/linkage.h
>>>> +++ b/include/linux/linkage.h
>>>> @@ -6,6 +6,11 @@
>>>> #include <linux/export.h>
>>>> #include <asm/linkage.h>
>>>>
>>>> +/* Some toolchains use other characters (e.g. '`') to mark new line in macro */
>>>> +#ifndef ASM_NL
>>>> +#define ASM_NL ;
>>>> +#endif
>>>> +
>>>> #ifdef __cplusplus
>>>> #define CPP_ASMLINKAGE extern "C"
>>>> #else
>>>> @@ -75,21 +80,21 @@
>>>>
>>>> #ifndef ENTRY
>>>> #define ENTRY(name) \
>>>> - .globl name; \
>>>> - ALIGN; \
>>>> - name:
>>>> + .globl name ASM_NL \
>>>> + ALIGN ASM_NL \
>>>> + name:
>>>> #endif
>>>> #endif /* LINKER_SCRIPT */
>>>>
>>>> #ifndef WEAK
>>>> #define WEAK(name) \
>>>> - .weak name; \
>>>> + .weak name ASM_NL \
>>>> name:
>>>> #endif
>>>>
>>>> #ifndef END
>>>> #define END(name) \
>>>> - .size name, .-name
>>>> + .size name, .-name
>>>> #endif
>>>>
>>>> /* If symbol 'name' is treated as a subroutine (gets called, and returns)
>>>> @@ -98,8 +103,8 @@
>>>> */
>>>> #ifndef ENDPROC
>>>> #define ENDPROC(name) \
>>>> - .type name, @function; \
>>>> - END(name)
>>>> + .type name, @function ASM_NL \
>>>> + END(name)
>>>> #endif
>>>>
>>>> #endif
>>>>
>>>
Did you hit reply by mistake - as I can't seem to find any comments...
-Vineet
On 2014-01-06 06:36, Vineet Gupta wrote:
> Hi Michal,
>
> On Friday 03 January 2014 06:08 PM, Michal Marek wrote:
>> On 2013-11-21 10:58, Vineet Gupta wrote:
>>> On 11/01/2013 11:15 AM, Vineet Gupta wrote:
>>>> +CC Sam, Tim, Joe, Michal, David
>>>>
>>>> Guys can you please take a look at this patch and suggest the best way to merge.
>>>>
>>>> The root cause is some gas ports (including ARC) use ';' for comment (vs. newline)
>>>> More background at https://lkml.org/lkml/2013/10/24/365
>>> Ping ! Any objections to this. Given that this is exception case, it is simpler
>>> than defining a Kconfig control.
>>>
>>> -Vineet
>>>>
>>>> On 10/29/2013 07:21 AM, Chen Gang wrote:
>>>>> For some assemblers, they use another character as newline in a macro
>>>>> (e.g. arc uses '`'), so for generic assembly code, need use ASM_NL (a
>>>>> macro) instead of ';' for it.
>>>>>
>>>>> Also need notice about code styles ('\t' for each line).
>>>>>
>>>>>
>>>>> Signed-off-by: Chen Gang <[email protected]>
>
> Did you hit reply by mistake - as I can't seem to find any comments...
Oops :). I wanted to ask if this is the final patch that should be
merged, or if there are more known instances of .S code that needs
s/;/ASM_NL/. I did not find any suspects outside arch/ by grepping. If
this is the final patch, Chen, could you please resubmit it? I do not
have the original copy.
Michal
Forwarding to Chen's email address:
On Monday 06 January 2014 02:56 PM, Michal Marek wrote:
> On 2014-01-06 06:36, Vineet Gupta wrote:
>> Hi Michal,
>>
>> On Friday 03 January 2014 06:08 PM, Michal Marek wrote:
>>> On 2013-11-21 10:58, Vineet Gupta wrote:
>>>> On 11/01/2013 11:15 AM, Vineet Gupta wrote:
>>>>> +CC Sam, Tim, Joe, Michal, David
>>>>>
>>>>> Guys can you please take a look at this patch and suggest the best way to merge.
>>>>>
>>>>> The root cause is some gas ports (including ARC) use ';' for comment (vs. newline)
>>>>> More background at https://lkml.org/lkml/2013/10/24/365
>>>> Ping ! Any objections to this. Given that this is exception case, it is simpler
>>>> than defining a Kconfig control.
>>>>
>>>> -Vineet
>>>>>
>>>>> On 10/29/2013 07:21 AM, Chen Gang wrote:
>>>>>> For some assemblers, they use another character as newline in a macro
>>>>>> (e.g. arc uses '`'), so for generic assembly code, need use ASM_NL (a
>>>>>> macro) instead of ';' for it.
>>>>>>
>>>>>> Also need notice about code styles ('\t' for each line).
>>>>>>
>>>>>>
>>>>>> Signed-off-by: Chen Gang <[email protected]>
>
>>
>> Did you hit reply by mistake - as I can't seem to find any comments...
>
> Oops :). I wanted to ask if this is the final patch that should be
> merged, or if there are more known instances of .S code that needs
> s/;/ASM_NL/. I did not find any suspects outside arch/ by grepping. If
> this is the final patch, Chen, could you please resubmit it? I do not
> have the original copy.
>
> Michal
>
-Vineet
On 01/09/2014 02:41 PM, Vineet Gupta wrote:
> Forwarding to Chen's email address:
>
Oh, thank you for your forwarding, sorry for I really did not notice
about it.
> On Monday 06 January 2014 02:56 PM, Michal Marek wrote:
...
>> Oops :). I wanted to ask if this is the final patch that should be
>> merged, or if there are more known instances of .S code that needs
>> s/;/ASM_NL/. I did not find any suspects outside arch/ by grepping. If
>> this is the final patch, Chen, could you please resubmit it? I do not
>> have the original copy.
>>
OK, thank, I will/should send it again within this week end (before
2014-01-12 night).
And I will/should use my gmail address instead of asianux address as
signed-of-by. (if still need the original one, please let me know,
thanks).
BTW:
I am sorry for replying late, during these days, I changed job, move
house for it, and familiar with related environments (about android
kernel and user mode programs).
But I will/should still need keep 1-3 patches/month for upstream kernel
(and this year, I will/should also need make patches for qemu/kvm/xen,
and toolchain).
Thanks.
--
Chen Gang
Open, share and attitude like air, water and life which God blessed
For some assemblers, they use another character as newline in a macro
(e.g. arc uses '`'), so for generic assembly code, need use ASM_NL (a
macro) instead of ';' for it.
Signed-off-by: Chen Gang <[email protected]>
Acked-by: Vineet Gupta <[email protected]>
---
arch/arc/include/asm/linkage.h | 2 ++
include/linux/linkage.h | 19 ++++++++++++-------
2 files changed, 14 insertions(+), 7 deletions(-)
diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h
index 0283e9e..66ee552 100644
--- a/arch/arc/include/asm/linkage.h
+++ b/arch/arc/include/asm/linkage.h
@@ -11,6 +11,8 @@
#ifdef __ASSEMBLY__
+#define ASM_NL ` /* use '`' to mark new line in macro */
+
/* Can't use the ENTRY macro in linux/linkage.h
* gas considers ';' as comment vs. newline
*/
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index d3e8ad2..a6a42dd 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -6,6 +6,11 @@
#include <linux/export.h>
#include <asm/linkage.h>
+/* Some toolchains use other characters (e.g. '`') to mark new line in macro */
+#ifndef ASM_NL
+#define ASM_NL ;
+#endif
+
#ifdef __cplusplus
#define CPP_ASMLINKAGE extern "C"
#else
@@ -75,21 +80,21 @@
#ifndef ENTRY
#define ENTRY(name) \
- .globl name; \
- ALIGN; \
- name:
+ .globl name ASM_NL \
+ ALIGN ASM_NL \
+ name:
#endif
#endif /* LINKER_SCRIPT */
#ifndef WEAK
#define WEAK(name) \
- .weak name; \
+ .weak name ASM_NL \
name:
#endif
#ifndef END
#define END(name) \
- .size name, .-name
+ .size name, .-name
#endif
/* If symbol 'name' is treated as a subroutine (gets called, and returns)
@@ -98,8 +103,8 @@
*/
#ifndef ENDPROC
#define ENDPROC(name) \
- .type name, @function; \
- END(name)
+ .type name, @function ASM_NL \
+ END(name)
#endif
#endif
--
1.7.11.7
Hello Maintainers:
Please help check this patch when you have time.
Thanks.
On 01/12/2014 09:59 AM, Chen Gang wrote:
> For some assemblers, they use another character as newline in a macro
> (e.g. arc uses '`'), so for generic assembly code, need use ASM_NL (a
> macro) instead of ';' for it.
>
>
> Signed-off-by: Chen Gang <[email protected]>
> Acked-by: Vineet Gupta <[email protected]>
> ---
> arch/arc/include/asm/linkage.h | 2 ++
> include/linux/linkage.h | 19 ++++++++++++-------
> 2 files changed, 14 insertions(+), 7 deletions(-)
>
> diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h
> index 0283e9e..66ee552 100644
> --- a/arch/arc/include/asm/linkage.h
> +++ b/arch/arc/include/asm/linkage.h
> @@ -11,6 +11,8 @@
>
> #ifdef __ASSEMBLY__
>
> +#define ASM_NL ` /* use '`' to mark new line in macro */
> +
> /* Can't use the ENTRY macro in linux/linkage.h
> * gas considers ';' as comment vs. newline
> */
> diff --git a/include/linux/linkage.h b/include/linux/linkage.h
> index d3e8ad2..a6a42dd 100644
> --- a/include/linux/linkage.h
> +++ b/include/linux/linkage.h
> @@ -6,6 +6,11 @@
> #include <linux/export.h>
> #include <asm/linkage.h>
>
> +/* Some toolchains use other characters (e.g. '`') to mark new line in macro */
> +#ifndef ASM_NL
> +#define ASM_NL ;
> +#endif
> +
> #ifdef __cplusplus
> #define CPP_ASMLINKAGE extern "C"
> #else
> @@ -75,21 +80,21 @@
>
> #ifndef ENTRY
> #define ENTRY(name) \
> - .globl name; \
> - ALIGN; \
> - name:
> + .globl name ASM_NL \
> + ALIGN ASM_NL \
> + name:
> #endif
> #endif /* LINKER_SCRIPT */
>
> #ifndef WEAK
> #define WEAK(name) \
> - .weak name; \
> + .weak name ASM_NL \
> name:
> #endif
>
> #ifndef END
> #define END(name) \
> - .size name, .-name
> + .size name, .-name
> #endif
>
> /* If symbol 'name' is treated as a subroutine (gets called, and returns)
> @@ -98,8 +103,8 @@
> */
> #ifndef ENDPROC
> #define ENDPROC(name) \
> - .type name, @function; \
> - END(name)
> + .type name, @function ASM_NL \
> + END(name)
> #endif
>
> #endif
>
--
Chen Gang
Open, share and attitude like air, water and life which God blessed
Hi Mike,
On Saturday 18 January 2014 03:14 PM, Chen Gang wrote:
> Hello Maintainers:
>
> Please help check this patch when you have time.
>
> Thanks.
Do you know whose tree this is goona go thru. I can take it thru ARC (but maybe
for 3.15, however it would be better it went thru mm or some such).
-Vineet
>
> On 01/12/2014 09:59 AM, Chen Gang wrote:
>> For some assemblers, they use another character as newline in a macro
>> (e.g. arc uses '`'), so for generic assembly code, need use ASM_NL (a
>> macro) instead of ';' for it.
>>
>>
>> Signed-off-by: Chen Gang <[email protected]>
>> Acked-by: Vineet Gupta <[email protected]>
>> ---
>> arch/arc/include/asm/linkage.h | 2 ++
>> include/linux/linkage.h | 19 ++++++++++++-------
>> 2 files changed, 14 insertions(+), 7 deletions(-)
>>
>> diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h
>> index 0283e9e..66ee552 100644
>> --- a/arch/arc/include/asm/linkage.h
>> +++ b/arch/arc/include/asm/linkage.h
>> @@ -11,6 +11,8 @@
>>
>> #ifdef __ASSEMBLY__
>>
>> +#define ASM_NL ` /* use '`' to mark new line in macro */
>> +
>> /* Can't use the ENTRY macro in linux/linkage.h
>> * gas considers ';' as comment vs. newline
>> */
>> diff --git a/include/linux/linkage.h b/include/linux/linkage.h
>> index d3e8ad2..a6a42dd 100644
>> --- a/include/linux/linkage.h
>> +++ b/include/linux/linkage.h
>> @@ -6,6 +6,11 @@
>> #include <linux/export.h>
>> #include <asm/linkage.h>
>>
>> +/* Some toolchains use other characters (e.g. '`') to mark new line in macro */
>> +#ifndef ASM_NL
>> +#define ASM_NL ;
>> +#endif
>> +
>> #ifdef __cplusplus
>> #define CPP_ASMLINKAGE extern "C"
>> #else
>> @@ -75,21 +80,21 @@
>>
>> #ifndef ENTRY
>> #define ENTRY(name) \
>> - .globl name; \
>> - ALIGN; \
>> - name:
>> + .globl name ASM_NL \
>> + ALIGN ASM_NL \
>> + name:
>> #endif
>> #endif /* LINKER_SCRIPT */
>>
>> #ifndef WEAK
>> #define WEAK(name) \
>> - .weak name; \
>> + .weak name ASM_NL \
>> name:
>> #endif
>>
>> #ifndef END
>> #define END(name) \
>> - .size name, .-name
>> + .size name, .-name
>> #endif
>>
>> /* If symbol 'name' is treated as a subroutine (gets called, and returns)
>> @@ -98,8 +103,8 @@
>> */
>> #ifndef ENDPROC
>> #define ENDPROC(name) \
>> - .type name, @function; \
>> - END(name)
>> + .type name, @function ASM_NL \
>> + END(name)
>> #endif
>>
>> #endif
>>
>
On 01/21/2014 12:55 PM, Vineet Gupta wrote:
> Hi Mike,
>
> On Saturday 18 January 2014 03:14 PM, Chen Gang wrote:
>> Hello Maintainers:
>>
>> Please help check this patch when you have time.
>>
>> Thanks.
>
> Do you know whose tree this is goona go thru. I can take it thru ARC (but maybe
> for 3.15, however it would be better it went thru mm or some such).
>
Hello all:
Is this patch OK? if need additional improvement, please let me know,
thanks.
>
>>
>> On 01/12/2014 09:59 AM, Chen Gang wrote:
>>> For some assemblers, they use another character as newline in a macro
>>> (e.g. arc uses '`'), so for generic assembly code, need use ASM_NL (a
>>> macro) instead of ';' for it.
>>>
>>>
>>> Signed-off-by: Chen Gang <[email protected]>
>>> Acked-by: Vineet Gupta <[email protected]>
>>> ---
>>> arch/arc/include/asm/linkage.h | 2 ++
>>> include/linux/linkage.h | 19 ++++++++++++-------
>>> 2 files changed, 14 insertions(+), 7 deletions(-)
>>>
>>> diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h
>>> index 0283e9e..66ee552 100644
>>> --- a/arch/arc/include/asm/linkage.h
>>> +++ b/arch/arc/include/asm/linkage.h
>>> @@ -11,6 +11,8 @@
>>>
>>> #ifdef __ASSEMBLY__
>>>
>>> +#define ASM_NL ` /* use '`' to mark new line in macro */
>>> +
>>> /* Can't use the ENTRY macro in linux/linkage.h
>>> * gas considers ';' as comment vs. newline
>>> */
>>> diff --git a/include/linux/linkage.h b/include/linux/linkage.h
>>> index d3e8ad2..a6a42dd 100644
>>> --- a/include/linux/linkage.h
>>> +++ b/include/linux/linkage.h
>>> @@ -6,6 +6,11 @@
>>> #include <linux/export.h>
>>> #include <asm/linkage.h>
>>>
>>> +/* Some toolchains use other characters (e.g. '`') to mark new line in macro */
>>> +#ifndef ASM_NL
>>> +#define ASM_NL ;
>>> +#endif
>>> +
>>> #ifdef __cplusplus
>>> #define CPP_ASMLINKAGE extern "C"
>>> #else
>>> @@ -75,21 +80,21 @@
>>>
>>> #ifndef ENTRY
>>> #define ENTRY(name) \
>>> - .globl name; \
>>> - ALIGN; \
>>> - name:
>>> + .globl name ASM_NL \
>>> + ALIGN ASM_NL \
>>> + name:
>>> #endif
>>> #endif /* LINKER_SCRIPT */
>>>
>>> #ifndef WEAK
>>> #define WEAK(name) \
>>> - .weak name; \
>>> + .weak name ASM_NL \
>>> name:
>>> #endif
>>>
>>> #ifndef END
>>> #define END(name) \
>>> - .size name, .-name
>>> + .size name, .-name
>>> #endif
>>>
>>> /* If symbol 'name' is treated as a subroutine (gets called, and returns)
>>> @@ -98,8 +103,8 @@
>>> */
>>> #ifndef ENDPROC
>>> #define ENDPROC(name) \
>>> - .type name, @function; \
>>> - END(name)
>>> + .type name, @function ASM_NL \
>>> + END(name)
>>> #endif
>>>
>>> #endif
>>>
>>
>
Thanks.
--
Chen Gang
Open, share and attitude like air, water and life which God blessed
Dne 25.1.2014 12:07, Chen Gang napsal(a):
> On 01/21/2014 12:55 PM, Vineet Gupta wrote:
>> Hi Mike,
>>
>> On Saturday 18 January 2014 03:14 PM, Chen Gang wrote:
>>> Hello Maintainers:
>>>
>>> Please help check this patch when you have time.
>>>
>>> Thanks.
>>
>> Do you know whose tree this is goona go thru. I can take it thru ARC (but maybe
>> for 3.15, however it would be better it went thru mm or some such).
>>
>
> Hello all:
>
> Is this patch OK? if need additional improvement, please let me know,
> thanks.
I applied the patch to kbuild.git#kbuild, sorry for the delay.
Michal
On 01/28/2014 05:57 AM, Michal Marek wrote:
> Dne 25.1.2014 12:07, Chen Gang napsal(a):
>> On 01/21/2014 12:55 PM, Vineet Gupta wrote:
>>> Hi Mike,
>>>
>>> On Saturday 18 January 2014 03:14 PM, Chen Gang wrote:
>>>> Hello Maintainers:
>>>>
>>>> Please help check this patch when you have time.
>>>>
>>>> Thanks.
>>>
>>> Do you know whose tree this is goona go thru. I can take it thru ARC (but maybe
>>> for 3.15, however it would be better it went thru mm or some such).
>>>
>>
>> Hello all:
>>
>> Is this patch OK? if need additional improvement, please let me know,
>> thanks.
>
> I applied the patch to kbuild.git#kbuild, sorry for the delay.
>
That's all right (in fact, don't need sorry), most of us are really
busy, so thank you very much for your work on it.
And next, I will/should continue make patches for upstream kernel in my
free time (at least, should provide 1-3 patches per month in this year).
:-)
Thanks.
--
Chen Gang
Open, share and attitude like air, water and life which God blessed