2023-10-24 13:35:09

by Clément Léger

[permalink] [raw]
Subject: [PATCH v2 0/5] riscv: cleanup assembly usage of ENTRY()/END() and use local labels

This series does a cleanup of all ENTRY()/END() macros that are used in
arch/riscv/ as well as use of local labels. This allows to remove the
use of the now deprecated ENTRY()/END()/WEAK() macros as well as using
the new SYM_*() ones which provide a better understanding of what is
meant to be annotated. Some wrong usage of SYM_FUNC_START() are also
fixed in this series by using the correct annotations. Finally a few
labels that were meant to be local have been renamed to use the .L
suffix and thus not to be emitted as visible symbols.

Note: the patches have been split between arch/riscv/ and
arch/riscv/kvm/ due to having different maintainers.

---

Changes in V2:
- Remove duplicated SYM_FUNC_END(memmove)
- Use SYM_DATA for simple .quad usage
- Added Andrew Rb:

Clément Léger (5):
riscv: use ".L" local labels in assembly when applicable
riscv: Use SYM_*() assembly macros instead of deprecated ones
riscv: kernel: Use correct SYM_DATA_*() macro for data
riscv: kvm: Use SYM_*() assembly macros instead of deprecated ones
riscv: kvm: use ".L" local labels in assembly when applicable

arch/riscv/kernel/copy-unaligned.S | 8 +--
arch/riscv/kernel/entry.S | 19 +++----
arch/riscv/kernel/fpu.S | 8 +--
arch/riscv/kernel/head.S | 30 +++++-----
arch/riscv/kernel/hibernate-asm.S | 12 ++--
arch/riscv/kernel/mcount-dyn.S | 20 +++----
arch/riscv/kernel/mcount.S | 18 +++---
arch/riscv/kernel/probes/rethook_trampoline.S | 4 +-
arch/riscv/kernel/suspend_entry.S | 4 +-
arch/riscv/kernel/vdso/flush_icache.S | 4 +-
arch/riscv/kernel/vdso/getcpu.S | 4 +-
arch/riscv/kernel/vdso/rt_sigreturn.S | 4 +-
arch/riscv/kernel/vdso/sys_hwprobe.S | 4 +-
arch/riscv/kvm/vcpu_switch.S | 32 +++++------
arch/riscv/lib/memcpy.S | 6 +-
arch/riscv/lib/memmove.S | 57 +++++++++----------
arch/riscv/lib/memset.S | 6 +-
arch/riscv/lib/uaccess.S | 11 ++--
arch/riscv/purgatory/entry.S | 16 ++----
19 files changed, 124 insertions(+), 143 deletions(-)

--
2.42.0


2023-10-24 13:35:57

by Clément Léger

[permalink] [raw]
Subject: [PATCH v2 4/5] riscv: kvm: Use SYM_*() assembly macros instead of deprecated ones

ENTRY()/END()/WEAK() macros are deprecated and we should make use of the
new SYM_*() macros [1] for better annotation of symbols. Replace the
deprecated ones with the new ones and fix wrong usage of END()/ENDPROC()
to correctly describe the symbols.

[1] https://docs.kernel.org/core-api/asm-annotations.html

Signed-off-by: Clément Léger <[email protected]>
Reviewed-by: Andrew Jones <[email protected]>
---
arch/riscv/kvm/vcpu_switch.S | 28 ++++++++++++----------------
1 file changed, 12 insertions(+), 16 deletions(-)

diff --git a/arch/riscv/kvm/vcpu_switch.S b/arch/riscv/kvm/vcpu_switch.S
index d74df8eb4d71..8b18473780ac 100644
--- a/arch/riscv/kvm/vcpu_switch.S
+++ b/arch/riscv/kvm/vcpu_switch.S
@@ -15,7 +15,7 @@
.altmacro
.option norelax

-ENTRY(__kvm_riscv_switch_to)
+SYM_FUNC_START(__kvm_riscv_switch_to)
/* Save Host GPRs (except A0 and T0-T6) */
REG_S ra, (KVM_ARCH_HOST_RA)(a0)
REG_S sp, (KVM_ARCH_HOST_SP)(a0)
@@ -208,9 +208,9 @@ __kvm_switch_return:

/* Return to C code */
ret
-ENDPROC(__kvm_riscv_switch_to)
+SYM_FUNC_END(__kvm_riscv_switch_to)

-ENTRY(__kvm_riscv_unpriv_trap)
+SYM_CODE_START(__kvm_riscv_unpriv_trap)
/*
* We assume that faulting unpriv load/store instruction is
* 4-byte long and blindly increment SEPC by 4.
@@ -231,12 +231,10 @@ ENTRY(__kvm_riscv_unpriv_trap)
csrr a1, CSR_HTINST
REG_S a1, (KVM_ARCH_TRAP_HTINST)(a0)
sret
-ENDPROC(__kvm_riscv_unpriv_trap)
+SYM_CODE_END(__kvm_riscv_unpriv_trap)

#ifdef CONFIG_FPU
- .align 3
- .global __kvm_riscv_fp_f_save
-__kvm_riscv_fp_f_save:
+SYM_FUNC_START(__kvm_riscv_fp_f_save)
csrr t2, CSR_SSTATUS
li t1, SR_FS
csrs CSR_SSTATUS, t1
@@ -276,10 +274,9 @@ __kvm_riscv_fp_f_save:
sw t0, KVM_ARCH_FP_F_FCSR(a0)
csrw CSR_SSTATUS, t2
ret
+SYM_FUNC_END(__kvm_riscv_fp_f_save)

- .align 3
- .global __kvm_riscv_fp_d_save
-__kvm_riscv_fp_d_save:
+SYM_FUNC_START(__kvm_riscv_fp_d_save)
csrr t2, CSR_SSTATUS
li t1, SR_FS
csrs CSR_SSTATUS, t1
@@ -319,10 +316,9 @@ __kvm_riscv_fp_d_save:
sw t0, KVM_ARCH_FP_D_FCSR(a0)
csrw CSR_SSTATUS, t2
ret
+SYM_FUNC_END(__kvm_riscv_fp_d_save)

- .align 3
- .global __kvm_riscv_fp_f_restore
-__kvm_riscv_fp_f_restore:
+SYM_FUNC_START(__kvm_riscv_fp_f_restore)
csrr t2, CSR_SSTATUS
li t1, SR_FS
lw t0, KVM_ARCH_FP_F_FCSR(a0)
@@ -362,10 +358,9 @@ __kvm_riscv_fp_f_restore:
fscsr t0
csrw CSR_SSTATUS, t2
ret
+SYM_FUNC_END(__kvm_riscv_fp_f_restore)

- .align 3
- .global __kvm_riscv_fp_d_restore
-__kvm_riscv_fp_d_restore:
+SYM_FUNC_START(__kvm_riscv_fp_d_restore)
csrr t2, CSR_SSTATUS
li t1, SR_FS
lw t0, KVM_ARCH_FP_D_FCSR(a0)
@@ -405,4 +400,5 @@ __kvm_riscv_fp_d_restore:
fscsr t0
csrw CSR_SSTATUS, t2
ret
+SYM_FUNC_END(__kvm_riscv_fp_d_restore)
#endif
--
2.42.0

2023-10-24 13:37:14

by Clément Léger

[permalink] [raw]
Subject: [PATCH v2 5/5] riscv: kvm: use ".L" local labels in assembly when applicable

For the sake of coherency, use local labels in assembly when
applicable. This also avoid kprobes being confused when applying a
kprobe since the size of function is computed by checking where the
next visible symbol is located. This might end up in computing some
function size to be way shorter than expected and thus failing to apply
kprobes to the specified offset.

Signed-off-by: Clément Léger <[email protected]>
Reviewed-by: Andrew Jones <[email protected]>
---
arch/riscv/kvm/vcpu_switch.S | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/arch/riscv/kvm/vcpu_switch.S b/arch/riscv/kvm/vcpu_switch.S
index 8b18473780ac..0c26189aa01c 100644
--- a/arch/riscv/kvm/vcpu_switch.S
+++ b/arch/riscv/kvm/vcpu_switch.S
@@ -45,7 +45,7 @@ SYM_FUNC_START(__kvm_riscv_switch_to)
REG_L t0, (KVM_ARCH_GUEST_SSTATUS)(a0)
REG_L t1, (KVM_ARCH_GUEST_HSTATUS)(a0)
REG_L t2, (KVM_ARCH_GUEST_SCOUNTEREN)(a0)
- la t4, __kvm_switch_return
+ la t4, .Lkvm_switch_return
REG_L t5, (KVM_ARCH_GUEST_SEPC)(a0)

/* Save Host and Restore Guest SSTATUS */
@@ -113,7 +113,7 @@ SYM_FUNC_START(__kvm_riscv_switch_to)

/* Back to Host */
.align 2
-__kvm_switch_return:
+.Lkvm_switch_return:
/* Swap Guest A0 with SSCRATCH */
csrrw a0, CSR_SSCRATCH, a0

--
2.42.0

2023-10-24 13:37:29

by Clément Léger

[permalink] [raw]
Subject: [PATCH v2 1/5] riscv: use ".L" local labels in assembly when applicable

For the sake of coherency, use local labels in assembly when
applicable. This also avoid kprobes being confused when applying a
kprobe since the size of function is computed by checking where the
next visible symbol is located. This might end up in computing some
function size to be way shorter than expected and thus failing to apply
kprobes to the specified offset.

Signed-off-by: Clément Léger <[email protected]>
Reviewed-by: Andrew Jones <[email protected]>
---
arch/riscv/kernel/entry.S | 10 +++----
arch/riscv/kernel/head.S | 18 ++++++-------
arch/riscv/kernel/mcount.S | 10 +++----
arch/riscv/lib/memmove.S | 54 +++++++++++++++++++-------------------
4 files changed, 46 insertions(+), 46 deletions(-)

diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 143a2bb3e697..64ac0dd6176b 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -21,9 +21,9 @@ SYM_CODE_START(handle_exception)
* register will contain 0, and we should continue on the current TP.
*/
csrrw tp, CSR_SCRATCH, tp
- bnez tp, _save_context
+ bnez tp, .Lsave_context

-_restore_kernel_tpsp:
+.Lrestore_kernel_tpsp:
csrr tp, CSR_SCRATCH
REG_S sp, TASK_TI_KERNEL_SP(tp)

@@ -35,7 +35,7 @@ _restore_kernel_tpsp:
REG_L sp, TASK_TI_KERNEL_SP(tp)
#endif

-_save_context:
+.Lsave_context:
REG_S sp, TASK_TI_USER_SP(tp)
REG_L sp, TASK_TI_KERNEL_SP(tp)
addi sp, sp, -(PT_SIZE_ON_STACK)
@@ -205,10 +205,10 @@ SYM_CODE_START_LOCAL(handle_kernel_stack_overflow)
REG_S x30, PT_T5(sp)
REG_S x31, PT_T6(sp)

- la ra, restore_caller_reg
+ la ra, .Lrestore_caller_reg
tail get_overflow_stack

-restore_caller_reg:
+.Lrestore_caller_reg:
//save per-cpu overflow stack
REG_S a0, -8(sp)
//restore caller register from shadow_stack
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index 3710ea5d160f..7e1b83f18a50 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -168,12 +168,12 @@ secondary_start_sbi:
XIP_FIXUP_OFFSET a0
call relocate_enable_mmu
#endif
- call setup_trap_vector
+ call .Lsetup_trap_vector
tail smp_callin
#endif /* CONFIG_SMP */

.align 2
-setup_trap_vector:
+.Lsetup_trap_vector:
/* Set trap vector to exception handler */
la a0, handle_exception
csrw CSR_TVEC, a0
@@ -210,7 +210,7 @@ ENTRY(_start_kernel)
* not implement PMPs, so we set up a quick trap handler to just skip
* touching the PMPs on any trap.
*/
- la a0, pmp_done
+ la a0, .Lpmp_done
csrw CSR_TVEC, a0

li a0, -1
@@ -218,7 +218,7 @@ ENTRY(_start_kernel)
li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X)
csrw CSR_PMPCFG0, a0
.align 2
-pmp_done:
+.Lpmp_done:

/*
* The hartid in a0 is expected later on, and we have no firmware
@@ -282,12 +282,12 @@ pmp_done:
/* Clear BSS for flat non-ELF images */
la a3, __bss_start
la a4, __bss_stop
- ble a4, a3, clear_bss_done
-clear_bss:
+ ble a4, a3, .Lclear_bss_done
+.Lclear_bss:
REG_S zero, (a3)
add a3, a3, RISCV_SZPTR
- blt a3, a4, clear_bss
-clear_bss_done:
+ blt a3, a4, .Lclear_bss
+.Lclear_bss_done:
#endif
la a2, boot_cpu_hartid
XIP_FIXUP_OFFSET a2
@@ -311,7 +311,7 @@ clear_bss_done:
call relocate_enable_mmu
#endif /* CONFIG_MMU */

- call setup_trap_vector
+ call .Lsetup_trap_vector
/* Restore C environment */
la tp, init_task
la sp, init_thread_union + THREAD_SIZE
diff --git a/arch/riscv/kernel/mcount.S b/arch/riscv/kernel/mcount.S
index 8818a8fa9ff3..ab4dd0594fe7 100644
--- a/arch/riscv/kernel/mcount.S
+++ b/arch/riscv/kernel/mcount.S
@@ -85,16 +85,16 @@ ENTRY(MCOUNT_NAME)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
la t0, ftrace_graph_return
REG_L t1, 0(t0)
- bne t1, t4, do_ftrace_graph_caller
+ bne t1, t4, .Ldo_ftrace_graph_caller

la t3, ftrace_graph_entry
REG_L t2, 0(t3)
la t6, ftrace_graph_entry_stub
- bne t2, t6, do_ftrace_graph_caller
+ bne t2, t6, .Ldo_ftrace_graph_caller
#endif
la t3, ftrace_trace_function
REG_L t5, 0(t3)
- bne t5, t4, do_trace
+ bne t5, t4, .Ldo_trace
ret

#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -102,7 +102,7 @@ ENTRY(MCOUNT_NAME)
* A pseudo representation for the function graph tracer:
* prepare_to_return(&ra_to_caller_of_caller, ra_to_caller)
*/
-do_ftrace_graph_caller:
+.Ldo_ftrace_graph_caller:
addi a0, s0, -SZREG
mv a1, ra
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
@@ -118,7 +118,7 @@ do_ftrace_graph_caller:
* A pseudo representation for the function tracer:
* (*ftrace_trace_function)(ra_to_caller, ra_to_caller_of_caller)
*/
-do_trace:
+.Ldo_trace:
REG_L a1, -SZREG(s0)
mv a0, ra

diff --git a/arch/riscv/lib/memmove.S b/arch/riscv/lib/memmove.S
index 838ff2022fe3..1930b388c3a0 100644
--- a/arch/riscv/lib/memmove.S
+++ b/arch/riscv/lib/memmove.S
@@ -26,8 +26,8 @@ SYM_FUNC_START_WEAK(memmove)
*/

/* Return if nothing to do */
- beq a0, a1, return_from_memmove
- beqz a2, return_from_memmove
+ beq a0, a1, .Lreturn_from_memmove
+ beqz a2, .Lreturn_from_memmove

/*
* Register Uses
@@ -60,7 +60,7 @@ SYM_FUNC_START_WEAK(memmove)
* small enough not to bother.
*/
andi t0, a2, -(2 * SZREG)
- beqz t0, byte_copy
+ beqz t0, .Lbyte_copy

/*
* Now solve for t5 and t6.
@@ -87,14 +87,14 @@ SYM_FUNC_START_WEAK(memmove)
*/
xor t0, a0, a1
andi t1, t0, (SZREG - 1)
- beqz t1, coaligned_copy
+ beqz t1, .Lcoaligned_copy
/* Fall through to misaligned fixup copy */

-misaligned_fixup_copy:
- bltu a1, a0, misaligned_fixup_copy_reverse
+.Lmisaligned_fixup_copy:
+ bltu a1, a0, .Lmisaligned_fixup_copy_reverse

-misaligned_fixup_copy_forward:
- jal t0, byte_copy_until_aligned_forward
+.Lmisaligned_fixup_copy_forward:
+ jal t0, .Lbyte_copy_until_aligned_forward

andi a5, a1, (SZREG - 1) /* Find the alignment offset of src (a1) */
slli a6, a5, 3 /* Multiply by 8 to convert that to bits to shift */
@@ -153,10 +153,10 @@ misaligned_fixup_copy_forward:
mv t3, t6 /* Fix the dest pointer in case the loop was broken */

add a1, t3, a5 /* Restore the src pointer */
- j byte_copy_forward /* Copy any remaining bytes */
+ j .Lbyte_copy_forward /* Copy any remaining bytes */

-misaligned_fixup_copy_reverse:
- jal t0, byte_copy_until_aligned_reverse
+.Lmisaligned_fixup_copy_reverse:
+ jal t0, .Lbyte_copy_until_aligned_reverse

andi a5, a4, (SZREG - 1) /* Find the alignment offset of src (a4) */
slli a6, a5, 3 /* Multiply by 8 to convert that to bits to shift */
@@ -215,18 +215,18 @@ misaligned_fixup_copy_reverse:
mv t4, t5 /* Fix the dest pointer in case the loop was broken */

add a4, t4, a5 /* Restore the src pointer */
- j byte_copy_reverse /* Copy any remaining bytes */
+ j .Lbyte_copy_reverse /* Copy any remaining bytes */

/*
* Simple copy loops for SZREG co-aligned memory locations.
* These also make calls to do byte copies for any unaligned
* data at their terminations.
*/
-coaligned_copy:
- bltu a1, a0, coaligned_copy_reverse
+.Lcoaligned_copy:
+ bltu a1, a0, .Lcoaligned_copy_reverse

-coaligned_copy_forward:
- jal t0, byte_copy_until_aligned_forward
+.Lcoaligned_copy_forward:
+ jal t0, .Lbyte_copy_until_aligned_forward

1:
REG_L t1, ( 0 * SZREG)(a1)
@@ -235,10 +235,10 @@ coaligned_copy_forward:
REG_S t1, (-1 * SZREG)(t3)
bne t3, t6, 1b

- j byte_copy_forward /* Copy any remaining bytes */
+ j .Lbyte_copy_forward /* Copy any remaining bytes */

-coaligned_copy_reverse:
- jal t0, byte_copy_until_aligned_reverse
+.Lcoaligned_copy_reverse:
+ jal t0, .Lbyte_copy_until_aligned_reverse

1:
REG_L t1, (-1 * SZREG)(a4)
@@ -247,7 +247,7 @@ coaligned_copy_reverse:
REG_S t1, ( 0 * SZREG)(t4)
bne t4, t5, 1b

- j byte_copy_reverse /* Copy any remaining bytes */
+ j .Lbyte_copy_reverse /* Copy any remaining bytes */

/*
* These are basically sub-functions within the function. They
@@ -258,7 +258,7 @@ coaligned_copy_reverse:
* up from where they were left and we avoid code duplication
* without any overhead except the call in and return jumps.
*/
-byte_copy_until_aligned_forward:
+.Lbyte_copy_until_aligned_forward:
beq t3, t5, 2f
1:
lb t1, 0(a1)
@@ -269,7 +269,7 @@ byte_copy_until_aligned_forward:
2:
jalr zero, 0x0(t0) /* Return to multibyte copy loop */

-byte_copy_until_aligned_reverse:
+.Lbyte_copy_until_aligned_reverse:
beq t4, t6, 2f
1:
lb t1, -1(a4)
@@ -285,10 +285,10 @@ byte_copy_until_aligned_reverse:
* These will byte copy until they reach the end of data to copy.
* At that point, they will call to return from memmove.
*/
-byte_copy:
- bltu a1, a0, byte_copy_reverse
+.Lbyte_copy:
+ bltu a1, a0, .Lbyte_copy_reverse

-byte_copy_forward:
+.Lbyte_copy_forward:
beq t3, t4, 2f
1:
lb t1, 0(a1)
@@ -299,7 +299,7 @@ byte_copy_forward:
2:
ret

-byte_copy_reverse:
+.Lbyte_copy_reverse:
beq t4, t3, 2f
1:
lb t1, -1(a4)
@@ -309,7 +309,7 @@ byte_copy_reverse:
bne t4, t3, 1b
2:

-return_from_memmove:
+.Lreturn_from_memmove:
ret

SYM_FUNC_END(memmove)
--
2.42.0

2023-10-24 13:38:12

by Clément Léger

[permalink] [raw]
Subject: [PATCH v2 2/5] riscv: Use SYM_*() assembly macros instead of deprecated ones

ENTRY()/END()/WEAK() macros are deprecated and we should make use of the
new SYM_*() macros [1] for better annotation of symbols. Replace the
deprecated ones with the new ones and fix wrong usage of END()/ENDPROC()
to correctly describe the symbols.

[1] https://docs.kernel.org/core-api/asm-annotations.html

Signed-off-by: Clément Léger <[email protected]>
---
arch/riscv/kernel/copy-unaligned.S | 8 ++++----
arch/riscv/kernel/fpu.S | 8 ++++----
arch/riscv/kernel/head.S | 12 +++++------
arch/riscv/kernel/hibernate-asm.S | 12 +++++------
arch/riscv/kernel/mcount-dyn.S | 20 ++++++++-----------
arch/riscv/kernel/mcount.S | 8 ++++----
arch/riscv/kernel/probes/rethook_trampoline.S | 4 ++--
arch/riscv/kernel/suspend_entry.S | 4 ++--
arch/riscv/kernel/vdso/flush_icache.S | 4 ++--
arch/riscv/kernel/vdso/getcpu.S | 4 ++--
arch/riscv/kernel/vdso/rt_sigreturn.S | 4 ++--
arch/riscv/kernel/vdso/sys_hwprobe.S | 4 ++--
arch/riscv/lib/memcpy.S | 6 +++---
arch/riscv/lib/memmove.S | 3 +--
arch/riscv/lib/memset.S | 6 +++---
arch/riscv/lib/uaccess.S | 11 +++++-----
arch/riscv/purgatory/entry.S | 16 ++++-----------
17 files changed, 60 insertions(+), 74 deletions(-)

diff --git a/arch/riscv/kernel/copy-unaligned.S b/arch/riscv/kernel/copy-unaligned.S
index cfdecfbaad62..2b3d9398c113 100644
--- a/arch/riscv/kernel/copy-unaligned.S
+++ b/arch/riscv/kernel/copy-unaligned.S
@@ -9,7 +9,7 @@
/* void __riscv_copy_words_unaligned(void *, const void *, size_t) */
/* Performs a memcpy without aligning buffers, using word loads and stores. */
/* Note: The size is truncated to a multiple of 8 * SZREG */
-ENTRY(__riscv_copy_words_unaligned)
+SYM_FUNC_START(__riscv_copy_words_unaligned)
andi a4, a2, ~((8*SZREG)-1)
beqz a4, 2f
add a3, a1, a4
@@ -36,12 +36,12 @@ ENTRY(__riscv_copy_words_unaligned)

2:
ret
-END(__riscv_copy_words_unaligned)
+SYM_FUNC_END(__riscv_copy_words_unaligned)

/* void __riscv_copy_bytes_unaligned(void *, const void *, size_t) */
/* Performs a memcpy without aligning buffers, using only byte accesses. */
/* Note: The size is truncated to a multiple of 8 */
-ENTRY(__riscv_copy_bytes_unaligned)
+SYM_FUNC_START(__riscv_copy_bytes_unaligned)
andi a4, a2, ~(8-1)
beqz a4, 2f
add a3, a1, a4
@@ -68,4 +68,4 @@ ENTRY(__riscv_copy_bytes_unaligned)

2:
ret
-END(__riscv_copy_bytes_unaligned)
+SYM_FUNC_END(__riscv_copy_bytes_unaligned)
diff --git a/arch/riscv/kernel/fpu.S b/arch/riscv/kernel/fpu.S
index dd2205473de7..6201afcd6b45 100644
--- a/arch/riscv/kernel/fpu.S
+++ b/arch/riscv/kernel/fpu.S
@@ -19,7 +19,7 @@
#include <asm/csr.h>
#include <asm/asm-offsets.h>

-ENTRY(__fstate_save)
+SYM_FUNC_START(__fstate_save)
li a2, TASK_THREAD_F0
add a0, a0, a2
li t1, SR_FS
@@ -60,9 +60,9 @@ ENTRY(__fstate_save)
sw t0, TASK_THREAD_FCSR_F0(a0)
csrc CSR_STATUS, t1
ret
-ENDPROC(__fstate_save)
+SYM_FUNC_END(__fstate_save)

-ENTRY(__fstate_restore)
+SYM_FUNC_START(__fstate_restore)
li a2, TASK_THREAD_F0
add a0, a0, a2
li t1, SR_FS
@@ -103,4 +103,4 @@ ENTRY(__fstate_restore)
fscsr t0
csrc CSR_STATUS, t1
ret
-ENDPROC(__fstate_restore)
+SYM_FUNC_END(__fstate_restore)
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index 7e1b83f18a50..56f78ec304c6 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -18,7 +18,7 @@
#include "efi-header.S"

__HEAD
-ENTRY(_start)
+SYM_CODE_START(_start)
/*
* Image header expected by Linux boot-loaders. The image header data
* structure is described in asm/image.h.
@@ -191,9 +191,9 @@ secondary_start_sbi:
wfi
j .Lsecondary_park

-END(_start)
+SYM_CODE_END(_start)

-ENTRY(_start_kernel)
+SYM_CODE_START(_start_kernel)
/* Mask all interrupts */
csrw CSR_IE, zero
csrw CSR_IP, zero
@@ -353,10 +353,10 @@ ENTRY(_start_kernel)
tail .Lsecondary_start_common
#endif /* CONFIG_RISCV_BOOT_SPINWAIT */

-END(_start_kernel)
+SYM_CODE_END(_start_kernel)

#ifdef CONFIG_RISCV_M_MODE
-ENTRY(reset_regs)
+SYM_CODE_START_LOCAL(reset_regs)
li sp, 0
li gp, 0
li tp, 0
@@ -454,5 +454,5 @@ ENTRY(reset_regs)
.Lreset_regs_done_vector:
#endif /* CONFIG_RISCV_ISA_V */
ret
-END(reset_regs)
+SYM_CODE_END(reset_regs)
#endif /* CONFIG_RISCV_M_MODE */
diff --git a/arch/riscv/kernel/hibernate-asm.S b/arch/riscv/kernel/hibernate-asm.S
index d698dd7df637..d040dcf4add4 100644
--- a/arch/riscv/kernel/hibernate-asm.S
+++ b/arch/riscv/kernel/hibernate-asm.S
@@ -21,7 +21,7 @@
*
* Always returns 0
*/
-ENTRY(__hibernate_cpu_resume)
+SYM_FUNC_START(__hibernate_cpu_resume)
/* switch to hibernated image's page table. */
csrw CSR_SATP, s0
sfence.vma
@@ -34,7 +34,7 @@ ENTRY(__hibernate_cpu_resume)
mv a0, zero

ret
-END(__hibernate_cpu_resume)
+SYM_FUNC_END(__hibernate_cpu_resume)

/*
* Prepare to restore the image.
@@ -42,7 +42,7 @@ END(__hibernate_cpu_resume)
* a1: satp of temporary page tables.
* a2: cpu_resume.
*/
-ENTRY(hibernate_restore_image)
+SYM_FUNC_START(hibernate_restore_image)
mv s0, a0
mv s1, a1
mv s2, a2
@@ -50,7 +50,7 @@ ENTRY(hibernate_restore_image)
REG_L a1, relocated_restore_code

jr a1
-END(hibernate_restore_image)
+SYM_FUNC_END(hibernate_restore_image)

/*
* The below code will be executed from a 'safe' page.
@@ -58,7 +58,7 @@ END(hibernate_restore_image)
* back to the original memory location. Finally, it jumps to __hibernate_cpu_resume()
* to restore the CPU context.
*/
-ENTRY(hibernate_core_restore_code)
+SYM_FUNC_START(hibernate_core_restore_code)
/* switch to temp page table. */
csrw satp, s1
sfence.vma
@@ -73,4 +73,4 @@ ENTRY(hibernate_core_restore_code)
bnez s4, .Lcopy

jr s2
-END(hibernate_core_restore_code)
+SYM_FUNC_END(hibernate_core_restore_code)
diff --git a/arch/riscv/kernel/mcount-dyn.S b/arch/riscv/kernel/mcount-dyn.S
index 669b8697aa38..58dd96a2a153 100644
--- a/arch/riscv/kernel/mcount-dyn.S
+++ b/arch/riscv/kernel/mcount-dyn.S
@@ -82,7 +82,7 @@
.endm
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */

-ENTRY(ftrace_caller)
+SYM_FUNC_START(ftrace_caller)
SAVE_ABI

addi a0, t0, -FENTRY_RA_OFFSET
@@ -91,8 +91,7 @@ ENTRY(ftrace_caller)
mv a1, ra
mv a3, sp

-ftrace_call:
- .global ftrace_call
+SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
call ftrace_stub

#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -102,16 +101,15 @@ ftrace_call:
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
mv a2, s0
#endif
-ftrace_graph_call:
- .global ftrace_graph_call
+SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL)
call ftrace_stub
#endif
RESTORE_ABI
jr t0
-ENDPROC(ftrace_caller)
+SYM_FUNC_END(ftrace_caller)

#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
-ENTRY(ftrace_regs_caller)
+SYM_FUNC_START(ftrace_regs_caller)
SAVE_ALL

addi a0, t0, -FENTRY_RA_OFFSET
@@ -120,8 +118,7 @@ ENTRY(ftrace_regs_caller)
mv a1, ra
mv a3, sp

-ftrace_regs_call:
- .global ftrace_regs_call
+SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL)
call ftrace_stub

#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -131,12 +128,11 @@ ftrace_regs_call:
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
mv a2, s0
#endif
-ftrace_graph_regs_call:
- .global ftrace_graph_regs_call
+SYM_INNER_LABEL(ftrace_graph_regs_call, SYM_L_GLOBAL)
call ftrace_stub
#endif

RESTORE_ALL
jr t0
-ENDPROC(ftrace_regs_caller)
+SYM_FUNC_END(ftrace_regs_caller)
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
diff --git a/arch/riscv/kernel/mcount.S b/arch/riscv/kernel/mcount.S
index ab4dd0594fe7..b4dd9ed6849e 100644
--- a/arch/riscv/kernel/mcount.S
+++ b/arch/riscv/kernel/mcount.S
@@ -61,7 +61,7 @@ SYM_TYPED_FUNC_START(ftrace_stub_graph)
ret
SYM_FUNC_END(ftrace_stub_graph)

-ENTRY(return_to_handler)
+SYM_FUNC_START(return_to_handler)
/*
* On implementing the frame point test, the ideal way is to compare the
* s0 (frame pointer, if enabled) on entry and the sp (stack pointer) on return.
@@ -76,11 +76,11 @@ ENTRY(return_to_handler)
mv a2, a0
RESTORE_RET_ABI_STATE
jalr a2
-ENDPROC(return_to_handler)
+SYM_FUNC_END(return_to_handler)
#endif

#ifndef CONFIG_DYNAMIC_FTRACE
-ENTRY(MCOUNT_NAME)
+SYM_FUNC_START(MCOUNT_NAME)
la t4, ftrace_stub
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
la t0, ftrace_graph_return
@@ -126,6 +126,6 @@ ENTRY(MCOUNT_NAME)
jalr t5
RESTORE_ABI_STATE
ret
-ENDPROC(MCOUNT_NAME)
+SYM_FUNC_END(MCOUNT_NAME)
#endif
EXPORT_SYMBOL(MCOUNT_NAME)
diff --git a/arch/riscv/kernel/probes/rethook_trampoline.S b/arch/riscv/kernel/probes/rethook_trampoline.S
index 21bac92a170a..f2cd83d9b0f0 100644
--- a/arch/riscv/kernel/probes/rethook_trampoline.S
+++ b/arch/riscv/kernel/probes/rethook_trampoline.S
@@ -75,7 +75,7 @@
REG_L x31, PT_T6(sp)
.endm

-ENTRY(arch_rethook_trampoline)
+SYM_CODE_START(arch_rethook_trampoline)
addi sp, sp, -(PT_SIZE_ON_STACK)
save_all_base_regs

@@ -90,4 +90,4 @@ ENTRY(arch_rethook_trampoline)
addi sp, sp, PT_SIZE_ON_STACK

ret
-ENDPROC(arch_rethook_trampoline)
+SYM_CODE_END(arch_rethook_trampoline)
diff --git a/arch/riscv/kernel/suspend_entry.S b/arch/riscv/kernel/suspend_entry.S
index f7960c7c5f9e..a59c4c903696 100644
--- a/arch/riscv/kernel/suspend_entry.S
+++ b/arch/riscv/kernel/suspend_entry.S
@@ -16,7 +16,7 @@
.altmacro
.option norelax

-ENTRY(__cpu_suspend_enter)
+SYM_FUNC_START(__cpu_suspend_enter)
/* Save registers (except A0 and T0-T6) */
REG_S ra, (SUSPEND_CONTEXT_REGS + PT_RA)(a0)
REG_S sp, (SUSPEND_CONTEXT_REGS + PT_SP)(a0)
@@ -57,7 +57,7 @@ ENTRY(__cpu_suspend_enter)

/* Return to C code */
ret
-END(__cpu_suspend_enter)
+SYM_FUNC_END(__cpu_suspend_enter)

SYM_TYPED_FUNC_START(__cpu_resume_enter)
/* Load the global pointer */
diff --git a/arch/riscv/kernel/vdso/flush_icache.S b/arch/riscv/kernel/vdso/flush_icache.S
index 82f97d67c23e..8f884227e8bc 100644
--- a/arch/riscv/kernel/vdso/flush_icache.S
+++ b/arch/riscv/kernel/vdso/flush_icache.S
@@ -8,7 +8,7 @@

.text
/* int __vdso_flush_icache(void *start, void *end, unsigned long flags); */
-ENTRY(__vdso_flush_icache)
+SYM_FUNC_START(__vdso_flush_icache)
.cfi_startproc
#ifdef CONFIG_SMP
li a7, __NR_riscv_flush_icache
@@ -19,4 +19,4 @@ ENTRY(__vdso_flush_icache)
#endif
ret
.cfi_endproc
-ENDPROC(__vdso_flush_icache)
+SYM_FUNC_END(__vdso_flush_icache)
diff --git a/arch/riscv/kernel/vdso/getcpu.S b/arch/riscv/kernel/vdso/getcpu.S
index bb0c05e2ffba..9c1bd531907f 100644
--- a/arch/riscv/kernel/vdso/getcpu.S
+++ b/arch/riscv/kernel/vdso/getcpu.S
@@ -8,11 +8,11 @@

.text
/* int __vdso_getcpu(unsigned *cpu, unsigned *node, void *unused); */
-ENTRY(__vdso_getcpu)
+SYM_FUNC_START(__vdso_getcpu)
.cfi_startproc
/* For now, just do the syscall. */
li a7, __NR_getcpu
ecall
ret
.cfi_endproc
-ENDPROC(__vdso_getcpu)
+SYM_FUNC_END(__vdso_getcpu)
diff --git a/arch/riscv/kernel/vdso/rt_sigreturn.S b/arch/riscv/kernel/vdso/rt_sigreturn.S
index 10438c7c626a..3dc022aa8931 100644
--- a/arch/riscv/kernel/vdso/rt_sigreturn.S
+++ b/arch/riscv/kernel/vdso/rt_sigreturn.S
@@ -7,10 +7,10 @@
#include <asm/unistd.h>

.text
-ENTRY(__vdso_rt_sigreturn)
+SYM_FUNC_START(__vdso_rt_sigreturn)
.cfi_startproc
.cfi_signal_frame
li a7, __NR_rt_sigreturn
ecall
.cfi_endproc
-ENDPROC(__vdso_rt_sigreturn)
+SYM_FUNC_END(__vdso_rt_sigreturn)
diff --git a/arch/riscv/kernel/vdso/sys_hwprobe.S b/arch/riscv/kernel/vdso/sys_hwprobe.S
index 4e704146c77a..77e57f830521 100644
--- a/arch/riscv/kernel/vdso/sys_hwprobe.S
+++ b/arch/riscv/kernel/vdso/sys_hwprobe.S
@@ -5,11 +5,11 @@
#include <asm/unistd.h>

.text
-ENTRY(riscv_hwprobe)
+SYM_FUNC_START(riscv_hwprobe)
.cfi_startproc
li a7, __NR_riscv_hwprobe
ecall
ret

.cfi_endproc
-ENDPROC(riscv_hwprobe)
+SYM_FUNC_END(riscv_hwprobe)
diff --git a/arch/riscv/lib/memcpy.S b/arch/riscv/lib/memcpy.S
index 1a40d01a9543..44e009ec5fef 100644
--- a/arch/riscv/lib/memcpy.S
+++ b/arch/riscv/lib/memcpy.S
@@ -7,8 +7,7 @@
#include <asm/asm.h>

/* void *memcpy(void *, const void *, size_t) */
-ENTRY(__memcpy)
-WEAK(memcpy)
+SYM_FUNC_START(__memcpy)
move t6, a0 /* Preserve return value */

/* Defer to byte-oriented copy for small sizes */
@@ -105,6 +104,7 @@ WEAK(memcpy)
bltu a1, a3, 5b
6:
ret
-END(__memcpy)
+SYM_FUNC_END(__memcpy)
+SYM_FUNC_ALIAS_WEAK(memcpy, __memcpy)
SYM_FUNC_ALIAS(__pi_memcpy, __memcpy)
SYM_FUNC_ALIAS(__pi___memcpy, __memcpy)
diff --git a/arch/riscv/lib/memmove.S b/arch/riscv/lib/memmove.S
index 1930b388c3a0..cb3e2e7ef0ba 100644
--- a/arch/riscv/lib/memmove.S
+++ b/arch/riscv/lib/memmove.S
@@ -7,7 +7,6 @@
#include <asm/asm.h>

SYM_FUNC_START(__memmove)
-SYM_FUNC_START_WEAK(memmove)
/*
* Returns
* a0 - dest
@@ -312,7 +311,7 @@ SYM_FUNC_START_WEAK(memmove)
.Lreturn_from_memmove:
ret

-SYM_FUNC_END(memmove)
SYM_FUNC_END(__memmove)
+SYM_FUNC_ALIAS_WEAK(memmove, __memmove)
SYM_FUNC_ALIAS(__pi_memmove, __memmove)
SYM_FUNC_ALIAS(__pi___memmove, __memmove)
diff --git a/arch/riscv/lib/memset.S b/arch/riscv/lib/memset.S
index 34c5360c6705..35f358e70bdb 100644
--- a/arch/riscv/lib/memset.S
+++ b/arch/riscv/lib/memset.S
@@ -8,8 +8,7 @@
#include <asm/asm.h>

/* void *memset(void *, int, size_t) */
-ENTRY(__memset)
-WEAK(memset)
+SYM_FUNC_START(__memset)
move t0, a0 /* Preserve return value */

/* Defer to byte-oriented fill for small sizes */
@@ -110,4 +109,5 @@ WEAK(memset)
bltu t0, a3, 5b
6:
ret
-END(__memset)
+SYM_FUNC_END(__memset)
+SYM_FUNC_ALIAS_WEAK(memset, __memset)
diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S
index 09b47ebacf2e..3ab438f30d13 100644
--- a/arch/riscv/lib/uaccess.S
+++ b/arch/riscv/lib/uaccess.S
@@ -10,8 +10,7 @@
_asm_extable 100b, \lbl
.endm

-ENTRY(__asm_copy_to_user)
-ENTRY(__asm_copy_from_user)
+SYM_FUNC_START(__asm_copy_to_user)

/* Enable access to user memory */
li t6, SR_SUM
@@ -181,13 +180,13 @@ ENTRY(__asm_copy_from_user)
csrc CSR_STATUS, t6
sub a0, t5, a0
ret
-ENDPROC(__asm_copy_to_user)
-ENDPROC(__asm_copy_from_user)
+SYM_FUNC_END(__asm_copy_to_user)
EXPORT_SYMBOL(__asm_copy_to_user)
+SYM_FUNC_ALIAS(__asm_copy_from_user, __asm_copy_to_user)
EXPORT_SYMBOL(__asm_copy_from_user)


-ENTRY(__clear_user)
+SYM_FUNC_START(__clear_user)

/* Enable access to user memory */
li t6, SR_SUM
@@ -233,5 +232,5 @@ ENTRY(__clear_user)
csrc CSR_STATUS, t6
sub a0, a3, a0
ret
-ENDPROC(__clear_user)
+SYM_FUNC_END(__clear_user)
EXPORT_SYMBOL(__clear_user)
diff --git a/arch/riscv/purgatory/entry.S b/arch/riscv/purgatory/entry.S
index 0194f4554130..5bcf3af903da 100644
--- a/arch/riscv/purgatory/entry.S
+++ b/arch/riscv/purgatory/entry.S
@@ -7,15 +7,11 @@
* Author: Li Zhengyu ([email protected])
*
*/
-
-.macro size, sym:req
- .size \sym, . - \sym
-.endm
+#include <linux/linkage.h>

.text

-.globl purgatory_start
-purgatory_start:
+SYM_CODE_START(purgatory_start)

lla sp, .Lstack
mv s0, a0 /* The hartid of the current hart */
@@ -28,8 +24,7 @@ purgatory_start:
mv a1, s1
ld a2, riscv_kernel_entry
jr a2
-
-size purgatory_start
+SYM_CODE_END(purgatory_start)

.align 4
.rept 256
@@ -39,9 +34,6 @@ size purgatory_start

.data

-.globl riscv_kernel_entry
-riscv_kernel_entry:
- .quad 0
-size riscv_kernel_entry
+SYM_DATA(riscv_kernel_entry, .quad 0)

.end
--
2.42.0

2023-10-24 15:27:00

by Andrew Jones

[permalink] [raw]
Subject: Re: [PATCH v2 2/5] riscv: Use SYM_*() assembly macros instead of deprecated ones

On Tue, Oct 24, 2023 at 03:26:52PM +0200, Cl?ment L?ger wrote:
...
> diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S
> index 09b47ebacf2e..3ab438f30d13 100644
> --- a/arch/riscv/lib/uaccess.S
> +++ b/arch/riscv/lib/uaccess.S
> @@ -10,8 +10,7 @@
> _asm_extable 100b, \lbl
> .endm
>
> -ENTRY(__asm_copy_to_user)
> -ENTRY(__asm_copy_from_user)
> +SYM_FUNC_START(__asm_copy_to_user)
>
> /* Enable access to user memory */
> li t6, SR_SUM
> @@ -181,13 +180,13 @@ ENTRY(__asm_copy_from_user)
> csrc CSR_STATUS, t6
> sub a0, t5, a0
> ret
> -ENDPROC(__asm_copy_to_user)
> -ENDPROC(__asm_copy_from_user)
> +SYM_FUNC_END(__asm_copy_to_user)
> EXPORT_SYMBOL(__asm_copy_to_user)
> +SYM_FUNC_ALIAS(__asm_copy_from_user, __asm_copy_to_user)
> EXPORT_SYMBOL(__asm_copy_from_user)

I didn't see any comment about the sharing of debug info among both the
from and to functions. Assuming it isn't confusing in some way, then

Reviewed-by: Andrew Jones <[email protected]>

Thanks,
drew

2023-10-24 18:04:33

by Clément Léger

[permalink] [raw]
Subject: Re: [PATCH v2 2/5] riscv: Use SYM_*() assembly macros instead of deprecated ones



On 24/10/2023 17:23, Andrew Jones wrote:
> On Tue, Oct 24, 2023 at 03:26:52PM +0200, Clément Léger wrote:
> ...
>> diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S
>> index 09b47ebacf2e..3ab438f30d13 100644
>> --- a/arch/riscv/lib/uaccess.S
>> +++ b/arch/riscv/lib/uaccess.S
>> @@ -10,8 +10,7 @@
>> _asm_extable 100b, \lbl
>> .endm
>>
>> -ENTRY(__asm_copy_to_user)
>> -ENTRY(__asm_copy_from_user)
>> +SYM_FUNC_START(__asm_copy_to_user)
>>
>> /* Enable access to user memory */
>> li t6, SR_SUM
>> @@ -181,13 +180,13 @@ ENTRY(__asm_copy_from_user)
>> csrc CSR_STATUS, t6
>> sub a0, t5, a0
>> ret
>> -ENDPROC(__asm_copy_to_user)
>> -ENDPROC(__asm_copy_from_user)
>> +SYM_FUNC_END(__asm_copy_to_user)
>> EXPORT_SYMBOL(__asm_copy_to_user)
>> +SYM_FUNC_ALIAS(__asm_copy_from_user, __asm_copy_to_user)
>> EXPORT_SYMBOL(__asm_copy_from_user)
>
> I didn't see any comment about the sharing of debug info among both the
> from and to functions. Assuming it isn't confusing in some way, then

Hi Andrew,

I did some testing with gdb and it seems to correctly assume that
__asm_copy_to_user maps to __asm_copy_from_user for debugging. The basic
tests that I did (breakpoints, disasm, etc) seems to show no sign of
problems for debugging. Were you thinking about other things specifically ?

Thanks,

Clément

>
> Reviewed-by: Andrew Jones <[email protected]>
>
> Thanks,
> drew

2023-10-25 06:51:09

by Andrew Jones

[permalink] [raw]
Subject: Re: [PATCH v2 2/5] riscv: Use SYM_*() assembly macros instead of deprecated ones

On Tue, Oct 24, 2023 at 08:03:52PM +0200, Cl?ment L?ger wrote:
>
>
> On 24/10/2023 17:23, Andrew Jones wrote:
> > On Tue, Oct 24, 2023 at 03:26:52PM +0200, Cl?ment L?ger wrote:
> > ...
> >> diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S
> >> index 09b47ebacf2e..3ab438f30d13 100644
> >> --- a/arch/riscv/lib/uaccess.S
> >> +++ b/arch/riscv/lib/uaccess.S
> >> @@ -10,8 +10,7 @@
> >> _asm_extable 100b, \lbl
> >> .endm
> >>
> >> -ENTRY(__asm_copy_to_user)
> >> -ENTRY(__asm_copy_from_user)
> >> +SYM_FUNC_START(__asm_copy_to_user)
> >>
> >> /* Enable access to user memory */
> >> li t6, SR_SUM
> >> @@ -181,13 +180,13 @@ ENTRY(__asm_copy_from_user)
> >> csrc CSR_STATUS, t6
> >> sub a0, t5, a0
> >> ret
> >> -ENDPROC(__asm_copy_to_user)
> >> -ENDPROC(__asm_copy_from_user)
> >> +SYM_FUNC_END(__asm_copy_to_user)
> >> EXPORT_SYMBOL(__asm_copy_to_user)
> >> +SYM_FUNC_ALIAS(__asm_copy_from_user, __asm_copy_to_user)
> >> EXPORT_SYMBOL(__asm_copy_from_user)
> >
> > I didn't see any comment about the sharing of debug info among both the
> > from and to functions. Assuming it isn't confusing in some way, then
>
> Hi Andrew,
>
> I did some testing with gdb and it seems to correctly assume that
> __asm_copy_to_user maps to __asm_copy_from_user for debugging. The basic
> tests that I did (breakpoints, disasm, etc) seems to show no sign of
> problems for debugging. Were you thinking about other things specifically ?

Mostly just backtrace symbols, but I suppose we can live with it, since
it wouldn't be the only weird thing in a backtrace.

Thanks,
drew

2023-10-26 07:53:50

by Clément Léger

[permalink] [raw]
Subject: Re: [PATCH v2 2/5] riscv: Use SYM_*() assembly macros instead of deprecated ones



On 25/10/2023 08:50, Andrew Jones wrote:
> On Tue, Oct 24, 2023 at 08:03:52PM +0200, Clément Léger wrote:
>>
>>
>> On 24/10/2023 17:23, Andrew Jones wrote:
>>> On Tue, Oct 24, 2023 at 03:26:52PM +0200, Clément Léger wrote:
>>> ...
>>>> diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S
>>>> index 09b47ebacf2e..3ab438f30d13 100644
>>>> --- a/arch/riscv/lib/uaccess.S
>>>> +++ b/arch/riscv/lib/uaccess.S
>>>> @@ -10,8 +10,7 @@
>>>> _asm_extable 100b, \lbl
>>>> .endm
>>>>
>>>> -ENTRY(__asm_copy_to_user)
>>>> -ENTRY(__asm_copy_from_user)
>>>> +SYM_FUNC_START(__asm_copy_to_user)
>>>>
>>>> /* Enable access to user memory */
>>>> li t6, SR_SUM
>>>> @@ -181,13 +180,13 @@ ENTRY(__asm_copy_from_user)
>>>> csrc CSR_STATUS, t6
>>>> sub a0, t5, a0
>>>> ret
>>>> -ENDPROC(__asm_copy_to_user)
>>>> -ENDPROC(__asm_copy_from_user)
>>>> +SYM_FUNC_END(__asm_copy_to_user)
>>>> EXPORT_SYMBOL(__asm_copy_to_user)
>>>> +SYM_FUNC_ALIAS(__asm_copy_from_user, __asm_copy_to_user)
>>>> EXPORT_SYMBOL(__asm_copy_from_user)
>>>
>>> I didn't see any comment about the sharing of debug info among both the
>>> from and to functions. Assuming it isn't confusing in some way, then
>>
>> Hi Andrew,
>>
>> I did some testing with gdb and it seems to correctly assume that
>> __asm_copy_to_user maps to __asm_copy_from_user for debugging. The basic
>> tests that I did (breakpoints, disasm, etc) seems to show no sign of
>> problems for debugging. Were you thinking about other things specifically ?
>
> Mostly just backtrace symbols, but I suppose we can live with it, since
> it wouldn't be the only weird thing in a backtrace.

Oh yes, In that case, clearly, if backtraced through __asm_copy_to_user,
it will display __asm_copy_from_user. This is the case in gdb if you
disassemble __asm_copy_to_user, it display __asm_copy_from_user (which
is "quite" expected to say the least). But I'm not sure of what would
happen with the existing code since there are two symbols mapping to
same area of code. I'd expect the same I guess.

Clément

>
> Thanks,
> drew

2023-11-06 17:47:57

by Palmer Dabbelt

[permalink] [raw]
Subject: Re: [PATCH v2 5/5] riscv: kvm: use ".L" local labels in assembly when applicable

On Tue, 24 Oct 2023 06:26:55 PDT (-0700), [email protected] wrote:
> For the sake of coherency, use local labels in assembly when
> applicable. This also avoid kprobes being confused when applying a
> kprobe since the size of function is computed by checking where the
> next visible symbol is located. This might end up in computing some
> function size to be way shorter than expected and thus failing to apply
> kprobes to the specified offset.
>
> Signed-off-by: Clément Léger <[email protected]>
> Reviewed-by: Andrew Jones <[email protected]>
> ---
> arch/riscv/kvm/vcpu_switch.S | 4 ++--
> 1 file changed, 2 insertions(+), 2 deletions(-)
>
> diff --git a/arch/riscv/kvm/vcpu_switch.S b/arch/riscv/kvm/vcpu_switch.S
> index 8b18473780ac..0c26189aa01c 100644
> --- a/arch/riscv/kvm/vcpu_switch.S
> +++ b/arch/riscv/kvm/vcpu_switch.S
> @@ -45,7 +45,7 @@ SYM_FUNC_START(__kvm_riscv_switch_to)
> REG_L t0, (KVM_ARCH_GUEST_SSTATUS)(a0)
> REG_L t1, (KVM_ARCH_GUEST_HSTATUS)(a0)
> REG_L t2, (KVM_ARCH_GUEST_SCOUNTEREN)(a0)
> - la t4, __kvm_switch_return
> + la t4, .Lkvm_switch_return
> REG_L t5, (KVM_ARCH_GUEST_SEPC)(a0)
>
> /* Save Host and Restore Guest SSTATUS */
> @@ -113,7 +113,7 @@ SYM_FUNC_START(__kvm_riscv_switch_to)
>
> /* Back to Host */
> .align 2
> -__kvm_switch_return:
> +.Lkvm_switch_return:
> /* Swap Guest A0 with SSCRATCH */
> csrrw a0, CSR_SSCRATCH, a0

Acked-by: Palmer Dabbelt <[email protected]>

2023-11-06 17:48:21

by Palmer Dabbelt

[permalink] [raw]
Subject: Re: [PATCH v2 4/5] riscv: kvm: Use SYM_*() assembly macros instead of deprecated ones

On Tue, 24 Oct 2023 06:26:54 PDT (-0700), [email protected] wrote:
> ENTRY()/END()/WEAK() macros are deprecated and we should make use of the
> new SYM_*() macros [1] for better annotation of symbols. Replace the
> deprecated ones with the new ones and fix wrong usage of END()/ENDPROC()
> to correctly describe the symbols.
>
> [1] https://docs.kernel.org/core-api/asm-annotations.html
>
> Signed-off-by: Clément Léger <[email protected]>
> Reviewed-by: Andrew Jones <[email protected]>
> ---
> arch/riscv/kvm/vcpu_switch.S | 28 ++++++++++++----------------
> 1 file changed, 12 insertions(+), 16 deletions(-)
>
> diff --git a/arch/riscv/kvm/vcpu_switch.S b/arch/riscv/kvm/vcpu_switch.S
> index d74df8eb4d71..8b18473780ac 100644
> --- a/arch/riscv/kvm/vcpu_switch.S
> +++ b/arch/riscv/kvm/vcpu_switch.S
> @@ -15,7 +15,7 @@
> .altmacro
> .option norelax
>
> -ENTRY(__kvm_riscv_switch_to)
> +SYM_FUNC_START(__kvm_riscv_switch_to)
> /* Save Host GPRs (except A0 and T0-T6) */
> REG_S ra, (KVM_ARCH_HOST_RA)(a0)
> REG_S sp, (KVM_ARCH_HOST_SP)(a0)
> @@ -208,9 +208,9 @@ __kvm_switch_return:
>
> /* Return to C code */
> ret
> -ENDPROC(__kvm_riscv_switch_to)
> +SYM_FUNC_END(__kvm_riscv_switch_to)
>
> -ENTRY(__kvm_riscv_unpriv_trap)
> +SYM_CODE_START(__kvm_riscv_unpriv_trap)
> /*
> * We assume that faulting unpriv load/store instruction is
> * 4-byte long and blindly increment SEPC by 4.
> @@ -231,12 +231,10 @@ ENTRY(__kvm_riscv_unpriv_trap)
> csrr a1, CSR_HTINST
> REG_S a1, (KVM_ARCH_TRAP_HTINST)(a0)
> sret
> -ENDPROC(__kvm_riscv_unpriv_trap)
> +SYM_CODE_END(__kvm_riscv_unpriv_trap)
>
> #ifdef CONFIG_FPU
> - .align 3
> - .global __kvm_riscv_fp_f_save
> -__kvm_riscv_fp_f_save:
> +SYM_FUNC_START(__kvm_riscv_fp_f_save)
> csrr t2, CSR_SSTATUS
> li t1, SR_FS
> csrs CSR_SSTATUS, t1
> @@ -276,10 +274,9 @@ __kvm_riscv_fp_f_save:
> sw t0, KVM_ARCH_FP_F_FCSR(a0)
> csrw CSR_SSTATUS, t2
> ret
> +SYM_FUNC_END(__kvm_riscv_fp_f_save)
>
> - .align 3
> - .global __kvm_riscv_fp_d_save
> -__kvm_riscv_fp_d_save:
> +SYM_FUNC_START(__kvm_riscv_fp_d_save)
> csrr t2, CSR_SSTATUS
> li t1, SR_FS
> csrs CSR_SSTATUS, t1
> @@ -319,10 +316,9 @@ __kvm_riscv_fp_d_save:
> sw t0, KVM_ARCH_FP_D_FCSR(a0)
> csrw CSR_SSTATUS, t2
> ret
> +SYM_FUNC_END(__kvm_riscv_fp_d_save)
>
> - .align 3
> - .global __kvm_riscv_fp_f_restore
> -__kvm_riscv_fp_f_restore:
> +SYM_FUNC_START(__kvm_riscv_fp_f_restore)
> csrr t2, CSR_SSTATUS
> li t1, SR_FS
> lw t0, KVM_ARCH_FP_F_FCSR(a0)
> @@ -362,10 +358,9 @@ __kvm_riscv_fp_f_restore:
> fscsr t0
> csrw CSR_SSTATUS, t2
> ret
> +SYM_FUNC_END(__kvm_riscv_fp_f_restore)
>
> - .align 3
> - .global __kvm_riscv_fp_d_restore
> -__kvm_riscv_fp_d_restore:
> +SYM_FUNC_START(__kvm_riscv_fp_d_restore)
> csrr t2, CSR_SSTATUS
> li t1, SR_FS
> lw t0, KVM_ARCH_FP_D_FCSR(a0)
> @@ -405,4 +400,5 @@ __kvm_riscv_fp_d_restore:
> fscsr t0
> csrw CSR_SSTATUS, t2
> ret
> +SYM_FUNC_END(__kvm_riscv_fp_d_restore)
> #endif

Acked-by: Palmer Dabbelt <[email protected]>

Subject: Re: [PATCH v2 0/5] riscv: cleanup assembly usage of ENTRY()/END() and use local labels

Hello:

This series was applied to riscv/linux.git (for-next)
by Palmer Dabbelt <[email protected]>:

On Tue, 24 Oct 2023 15:26:50 +0200 you wrote:
> This series does a cleanup of all ENTRY()/END() macros that are used in
> arch/riscv/ as well as use of local labels. This allows to remove the
> use of the now deprecated ENTRY()/END()/WEAK() macros as well as using
> the new SYM_*() ones which provide a better understanding of what is
> meant to be annotated. Some wrong usage of SYM_FUNC_START() are also
> fixed in this series by using the correct annotations. Finally a few
> labels that were meant to be local have been renamed to use the .L
> suffix and thus not to be emitted as visible symbols.
>
> [...]

Here is the summary with links:
- [v2,1/5] riscv: use ".L" local labels in assembly when applicable
https://git.kernel.org/riscv/c/b18f7296fbfd
- [v2,2/5] riscv: Use SYM_*() assembly macros instead of deprecated ones
https://git.kernel.org/riscv/c/76329c693924
- [v2,3/5] riscv: kernel: Use correct SYM_DATA_*() macro for data
https://git.kernel.org/riscv/c/4cc0d8a3f109
- [v2,4/5] riscv: kvm: Use SYM_*() assembly macros instead of deprecated ones
(no matching commit)
- [v2,5/5] riscv: kvm: use ".L" local labels in assembly when applicable
(no matching commit)

You are awesome, thank you!
--
Deet-doot-dot, I am a bot.
https://korg.docs.kernel.org/patchwork/pwbot.html


2023-12-13 06:16:41

by Anup Patel

[permalink] [raw]
Subject: Re: [PATCH v2 4/5] riscv: kvm: Use SYM_*() assembly macros instead of deprecated ones

On Tue, Oct 24, 2023 at 6:58 PM Clément Léger <[email protected]> wrote:
>
> ENTRY()/END()/WEAK() macros are deprecated and we should make use of the
> new SYM_*() macros [1] for better annotation of symbols. Replace the
> deprecated ones with the new ones and fix wrong usage of END()/ENDPROC()
> to correctly describe the symbols.
>
> [1] https://docs.kernel.org/core-api/asm-annotations.html
>
> Signed-off-by: Clément Léger <[email protected]>
> Reviewed-by: Andrew Jones <[email protected]>

Queued this patch for Linux-6.8

Thanks,
Anup


> ---
> arch/riscv/kvm/vcpu_switch.S | 28 ++++++++++++----------------
> 1 file changed, 12 insertions(+), 16 deletions(-)
>
> diff --git a/arch/riscv/kvm/vcpu_switch.S b/arch/riscv/kvm/vcpu_switch.S
> index d74df8eb4d71..8b18473780ac 100644
> --- a/arch/riscv/kvm/vcpu_switch.S
> +++ b/arch/riscv/kvm/vcpu_switch.S
> @@ -15,7 +15,7 @@
> .altmacro
> .option norelax
>
> -ENTRY(__kvm_riscv_switch_to)
> +SYM_FUNC_START(__kvm_riscv_switch_to)
> /* Save Host GPRs (except A0 and T0-T6) */
> REG_S ra, (KVM_ARCH_HOST_RA)(a0)
> REG_S sp, (KVM_ARCH_HOST_SP)(a0)
> @@ -208,9 +208,9 @@ __kvm_switch_return:
>
> /* Return to C code */
> ret
> -ENDPROC(__kvm_riscv_switch_to)
> +SYM_FUNC_END(__kvm_riscv_switch_to)
>
> -ENTRY(__kvm_riscv_unpriv_trap)
> +SYM_CODE_START(__kvm_riscv_unpriv_trap)
> /*
> * We assume that faulting unpriv load/store instruction is
> * 4-byte long and blindly increment SEPC by 4.
> @@ -231,12 +231,10 @@ ENTRY(__kvm_riscv_unpriv_trap)
> csrr a1, CSR_HTINST
> REG_S a1, (KVM_ARCH_TRAP_HTINST)(a0)
> sret
> -ENDPROC(__kvm_riscv_unpriv_trap)
> +SYM_CODE_END(__kvm_riscv_unpriv_trap)
>
> #ifdef CONFIG_FPU
> - .align 3
> - .global __kvm_riscv_fp_f_save
> -__kvm_riscv_fp_f_save:
> +SYM_FUNC_START(__kvm_riscv_fp_f_save)
> csrr t2, CSR_SSTATUS
> li t1, SR_FS
> csrs CSR_SSTATUS, t1
> @@ -276,10 +274,9 @@ __kvm_riscv_fp_f_save:
> sw t0, KVM_ARCH_FP_F_FCSR(a0)
> csrw CSR_SSTATUS, t2
> ret
> +SYM_FUNC_END(__kvm_riscv_fp_f_save)
>
> - .align 3
> - .global __kvm_riscv_fp_d_save
> -__kvm_riscv_fp_d_save:
> +SYM_FUNC_START(__kvm_riscv_fp_d_save)
> csrr t2, CSR_SSTATUS
> li t1, SR_FS
> csrs CSR_SSTATUS, t1
> @@ -319,10 +316,9 @@ __kvm_riscv_fp_d_save:
> sw t0, KVM_ARCH_FP_D_FCSR(a0)
> csrw CSR_SSTATUS, t2
> ret
> +SYM_FUNC_END(__kvm_riscv_fp_d_save)
>
> - .align 3
> - .global __kvm_riscv_fp_f_restore
> -__kvm_riscv_fp_f_restore:
> +SYM_FUNC_START(__kvm_riscv_fp_f_restore)
> csrr t2, CSR_SSTATUS
> li t1, SR_FS
> lw t0, KVM_ARCH_FP_F_FCSR(a0)
> @@ -362,10 +358,9 @@ __kvm_riscv_fp_f_restore:
> fscsr t0
> csrw CSR_SSTATUS, t2
> ret
> +SYM_FUNC_END(__kvm_riscv_fp_f_restore)
>
> - .align 3
> - .global __kvm_riscv_fp_d_restore
> -__kvm_riscv_fp_d_restore:
> +SYM_FUNC_START(__kvm_riscv_fp_d_restore)
> csrr t2, CSR_SSTATUS
> li t1, SR_FS
> lw t0, KVM_ARCH_FP_D_FCSR(a0)
> @@ -405,4 +400,5 @@ __kvm_riscv_fp_d_restore:
> fscsr t0
> csrw CSR_SSTATUS, t2
> ret
> +SYM_FUNC_END(__kvm_riscv_fp_d_restore)
> #endif
> --
> 2.42.0
>
>
> --
> kvm-riscv mailing list
> [email protected]
> http://lists.infradead.org/mailman/listinfo/kvm-riscv

2023-12-13 06:16:57

by Anup Patel

[permalink] [raw]
Subject: Re: [PATCH v2 5/5] riscv: kvm: use ".L" local labels in assembly when applicable

On Tue, Oct 24, 2023 at 6:58 PM Clément Léger <[email protected]> wrote:
>
> For the sake of coherency, use local labels in assembly when
> applicable. This also avoid kprobes being confused when applying a
> kprobe since the size of function is computed by checking where the
> next visible symbol is located. This might end up in computing some
> function size to be way shorter than expected and thus failing to apply
> kprobes to the specified offset.
>
> Signed-off-by: Clément Léger <[email protected]>
> Reviewed-by: Andrew Jones <[email protected]>

Queued this patch for Linux-6.8

Thanks,
Anup


> ---
> arch/riscv/kvm/vcpu_switch.S | 4 ++--
> 1 file changed, 2 insertions(+), 2 deletions(-)
>
> diff --git a/arch/riscv/kvm/vcpu_switch.S b/arch/riscv/kvm/vcpu_switch.S
> index 8b18473780ac..0c26189aa01c 100644
> --- a/arch/riscv/kvm/vcpu_switch.S
> +++ b/arch/riscv/kvm/vcpu_switch.S
> @@ -45,7 +45,7 @@ SYM_FUNC_START(__kvm_riscv_switch_to)
> REG_L t0, (KVM_ARCH_GUEST_SSTATUS)(a0)
> REG_L t1, (KVM_ARCH_GUEST_HSTATUS)(a0)
> REG_L t2, (KVM_ARCH_GUEST_SCOUNTEREN)(a0)
> - la t4, __kvm_switch_return
> + la t4, .Lkvm_switch_return
> REG_L t5, (KVM_ARCH_GUEST_SEPC)(a0)
>
> /* Save Host and Restore Guest SSTATUS */
> @@ -113,7 +113,7 @@ SYM_FUNC_START(__kvm_riscv_switch_to)
>
> /* Back to Host */
> .align 2
> -__kvm_switch_return:
> +.Lkvm_switch_return:
> /* Swap Guest A0 with SSCRATCH */
> csrrw a0, CSR_SSCRATCH, a0
>
> --
> 2.42.0
>
>
> --
> kvm-riscv mailing list
> [email protected]
> http://lists.infradead.org/mailman/listinfo/kvm-riscv