Received: by 2002:a6b:fb09:0:0:0:0:0 with SMTP id h9csp4952377iog; Wed, 22 Jun 2022 08:57:16 -0700 (PDT) X-Google-Smtp-Source: AGRyM1vmP9b81cxB0vWCmvhLhMP2HyQSAbhbnDAuIxGBwRdHQR72s3Vhk9wRjneJVuNxBaay/GoC X-Received: by 2002:a05:6402:d05:b0:425:b7ab:776e with SMTP id eb5-20020a0564020d0500b00425b7ab776emr4814716edb.142.1655913436277; Wed, 22 Jun 2022 08:57:16 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1655913436; cv=none; d=google.com; s=arc-20160816; b=mKyZQeYEWwKVc8G18HdrrJwGr542h+OT+EcrVdSDPMJox2MTDSg0suVXNHR6HqXDew 42CVTXtjBqndMGw7vPRoXPAjDJ58mm6CP8I++1gcSofdYEppmbrP0jSEd8KqjL9bG5qK Rf9iaytgXwLa/PDJdYs76GyZkRcWrQElQVpKHRegCusfb/bdZR/ufceAhX6yOvYpvstz EpRYSdU5RxbjUPnTlBdajXNHY22dMY1/al6/W+rh/9Ew0yKM2t8EgqMEayAvZ77kGLQh Y9P+5lS+ebuG5V8HjnSbPd79IhZgNfwmBTgavfAvNBtMSaFrZi+aLJdi01t3MF+v2/8a rZYg== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:mime-version:references:in-reply-to:message-id :date:subject:cc:to:from; bh=RSdCq0ZcBtJjGUKgtPAOlmdjrhOWLNcCeFzen58H9as=; b=S0M2JZyzyz7APhZx6VAmbiS03V/STrxPElMht1DT1lW2ABchbbeIOHoXeV1wdh2Qw4 MKACTVwTu0btJ/a2Cc/viMB6YddLxkg8C46ZEa4TAvoSFVcBPrtEwhZd2zlVRGDxWPo2 OXxr1uCnJJJaXgUIkirM3h/ckIg3MeKMjOfi2nfVZ9wt06FTyLKJ1SwyT3gkGQuYETEX 54ZEszPJst0LtMfOcYqYVjcDkWC/LnAxmWX1hHyDD7ZnTGKRamOWlGvTxktBz2KPg5OJ +TB2Ijx8brPVkeZ4QVVM4QW+ad2lTI2kvY5M1W16gDPArsIvSPRHl6jwm4byd4cDdcux wo5A== ARC-Authentication-Results: i=1; mx.google.com; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=fail (p=QUARANTINE sp=QUARANTINE dis=NONE) header.from=huawei.com Return-Path: Received: from out1.vger.email (out1.vger.email. [2620:137:e000::1:20]) by mx.google.com with ESMTP id di12-20020a170906730c00b006f3c67436e6si7181664ejc.890.2022.06.22.08.56.48; Wed, 22 Jun 2022 08:57:16 -0700 (PDT) Received-SPF: pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) client-ip=2620:137:e000::1:20; Authentication-Results: mx.google.com; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 2620:137:e000::1:20 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=fail (p=QUARANTINE sp=QUARANTINE dis=NONE) header.from=huawei.com Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1358575AbiFVPy4 (ORCPT + 99 others); Wed, 22 Jun 2022 11:54:56 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:52692 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1359659AbiFVPxC (ORCPT ); Wed, 22 Jun 2022 11:53:02 -0400 Received: from szxga02-in.huawei.com (szxga02-in.huawei.com [45.249.212.188]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id B02F82701; Wed, 22 Jun 2022 08:52:48 -0700 (PDT) Received: from dggpemm500024.china.huawei.com (unknown [172.30.72.57]) by szxga02-in.huawei.com (SkyGuard) with ESMTP id 4LSnr10MzBzSh5T; Wed, 22 Jun 2022 23:49:21 +0800 (CST) Received: from dggpemm500013.china.huawei.com (7.185.36.172) by dggpemm500024.china.huawei.com (7.185.36.203) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2375.24; Wed, 22 Jun 2022 23:52:45 +0800 Received: from ubuntu1804.huawei.com (10.67.175.36) by dggpemm500013.china.huawei.com (7.185.36.172) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2375.24; Wed, 22 Jun 2022 23:52:45 +0800 From: Chen Zhongjin To: , , , , , CC: , , , , , , , , , , , , , Subject: [PATCH v5 18/33] arm64: Change symbol type annotations Date: Wed, 22 Jun 2022 23:49:05 +0800 Message-ID: <20220622154920.95075-19-chenzhongjin@huawei.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20220622154920.95075-1-chenzhongjin@huawei.com> References: <20220622154920.95075-1-chenzhongjin@huawei.com> MIME-Version: 1.0 Content-Type: text/plain X-Originating-IP: [10.67.175.36] X-ClientProxiedBy: dggems701-chm.china.huawei.com (10.3.19.178) To dggpemm500013.china.huawei.com (7.185.36.172) X-CFilter-Loop: Reflected X-Spam-Status: No, score=-4.2 required=5.0 tests=BAYES_00,RCVD_IN_DNSWL_MED, SPF_HELO_NONE,SPF_PASS,T_SCC_BODY_TEXT_LINE autolearn=ham autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Code symbols not following the aarch64 procedure call convention should be annotated with SYM_CODE_* instead of SYM_FUNC_* Mark relevant symbols as generic code symbols. Also replace SYM_INNER_LABEL for __swpan_entry_el0 becasuse SYM_INNER_LABEL generates zero-size-label which can't be correctly loaded in objtool. Signed-off-by: Julien Thierry Signed-off-by: Chen Zhongjin --- arch/arm64/kernel/entry.S | 10 +++++-- arch/arm64/kernel/head.S | 56 +++++++++++++++++++------------------- arch/arm64/kernel/sleep.S | 4 +-- arch/arm64/kvm/hyp/entry.S | 4 +-- 4 files changed, 39 insertions(+), 35 deletions(-) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index ede028dee81b..c460ba2d009d 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -460,11 +460,15 @@ SYM_CODE_START_LOCAL(__swpan_entry_el1) orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR b.eq 1f // TTBR0 access already disabled and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR -SYM_INNER_LABEL(__swpan_entry_el0, SYM_L_LOCAL) __uaccess_ttbr0_disable x21 1: ret SYM_CODE_END(__swpan_entry_el1) +SYM_CODE_START_LOCAL(__swpan_entry_el0) + __uaccess_ttbr0_disable x21 +1: ret +SYM_CODE_END(__swpan_entry_el0) + /* * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR * PAN bit checking. @@ -757,11 +761,11 @@ SYM_CODE_START_NOALIGN(tramp_vectors) generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_NONE SYM_CODE_END(tramp_vectors) -SYM_CODE_START(tramp_exit_native) +SYM_CODE_START_LOCAL(tramp_exit_native) tramp_exit SYM_CODE_END(tramp_exit_native) -SYM_CODE_START(tramp_exit_compat) +SYM_CODE_START_LOCAL(tramp_exit_compat) tramp_exit 32 SYM_CODE_END(tramp_exit_compat) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 6a98f1a38c29..6db9c3603bd8 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -259,7 +259,7 @@ SYM_CODE_END(preserve_boot_args) * - first few MB of the kernel linear mapping to jump to once the MMU has * been enabled */ -SYM_FUNC_START_LOCAL(__create_page_tables) +SYM_CODE_START_LOCAL(__create_page_tables) mov x28, lr /* @@ -389,7 +389,7 @@ SYM_FUNC_START_LOCAL(__create_page_tables) bl dcache_inval_poc ret x28 -SYM_FUNC_END(__create_page_tables) +SYM_CODE_END(__create_page_tables) /* * Initialize CPU registers with task-specific and cpu-specific context. @@ -422,7 +422,7 @@ SYM_FUNC_END(__create_page_tables) * * x0 = __PHYS_OFFSET */ -SYM_FUNC_START_LOCAL(__primary_switched) +SYM_CODE_START_LOCAL(__primary_switched) adr_l x4, init_task init_cpu_task x4, x5, x6 @@ -467,7 +467,7 @@ SYM_FUNC_START_LOCAL(__primary_switched) ldp x29, x30, [sp], #16 bl start_kernel ASM_BUG() -SYM_FUNC_END(__primary_switched) +SYM_CODE_END(__primary_switched) .pushsection ".rodata", "a" SYM_DATA_START(kimage_vaddr) @@ -493,7 +493,7 @@ EXPORT_SYMBOL(kimage_vaddr) * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in w0 if * booted in EL1 or EL2 respectively. */ -SYM_FUNC_START(init_kernel_el) +SYM_CODE_START(init_kernel_el) mrs x0, CurrentEL cmp x0, #CurrentEL_EL2 b.eq init_el2 @@ -557,13 +557,13 @@ __cpu_stick_to_vhe: hvc #0 mov x0, #BOOT_CPU_MODE_EL2 ret -SYM_FUNC_END(init_kernel_el) +SYM_CODE_END(init_kernel_el) /* * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed * in w0. See arch/arm64/include/asm/virt.h for more info. */ -SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag) +SYM_CODE_START_LOCAL(set_cpu_boot_mode_flag) adr_l x1, __boot_cpu_mode cmp w0, #BOOT_CPU_MODE_EL2 b.ne 1f @@ -572,7 +572,7 @@ SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag) dmb sy dc ivac, x1 // Invalidate potentially stale cache line ret -SYM_FUNC_END(set_cpu_boot_mode_flag) +SYM_CODE_END(set_cpu_boot_mode_flag) /* * These values are written with the MMU off, but read with the MMU on. @@ -606,7 +606,7 @@ SYM_DATA_END(__early_cpu_boot_status) * This provides a "holding pen" for platforms to hold all secondary * cores are held until we're ready for them to initialise. */ -SYM_FUNC_START(secondary_holding_pen) +SYM_CODE_START(secondary_holding_pen) bl init_kernel_el // w0=cpu_boot_mode bl set_cpu_boot_mode_flag mrs x0, mpidr_el1 @@ -618,19 +618,19 @@ pen: ldr x4, [x3] b.eq secondary_startup wfe b pen -SYM_FUNC_END(secondary_holding_pen) +SYM_CODE_END(secondary_holding_pen) /* * Secondary entry point that jumps straight into the kernel. Only to * be used where CPUs are brought online dynamically by the kernel. */ -SYM_FUNC_START(secondary_entry) +SYM_CODE_START(secondary_entry) bl init_kernel_el // w0=cpu_boot_mode bl set_cpu_boot_mode_flag b secondary_startup -SYM_FUNC_END(secondary_entry) +SYM_CODE_END(secondary_entry) -SYM_FUNC_START_LOCAL(secondary_startup) +SYM_CODE_START_LOCAL(secondary_startup) /* * Common entry point for secondary CPUs. */ @@ -641,9 +641,9 @@ SYM_FUNC_START_LOCAL(secondary_startup) bl __enable_mmu ldr x8, =__secondary_switched br x8 -SYM_FUNC_END(secondary_startup) +SYM_CODE_END(secondary_startup) -SYM_FUNC_START_LOCAL(__secondary_switched) +SYM_CODE_START_LOCAL(__secondary_switched) adr_l x5, vectors msr vbar_el1, x5 isb @@ -660,13 +660,13 @@ SYM_FUNC_START_LOCAL(__secondary_switched) bl secondary_start_kernel ASM_BUG() -SYM_FUNC_END(__secondary_switched) +SYM_CODE_END(__secondary_switched) -SYM_FUNC_START_LOCAL(__secondary_too_slow) +SYM_CODE_START_LOCAL(__secondary_too_slow) wfe wfi b __secondary_too_slow -SYM_FUNC_END(__secondary_too_slow) +SYM_CODE_END(__secondary_too_slow) /* * The booting CPU updates the failed status @__early_cpu_boot_status, @@ -698,7 +698,7 @@ SYM_FUNC_END(__secondary_too_slow) * Checks if the selected granule size is supported by the CPU. * If it isn't, park the CPU */ -SYM_FUNC_START(__enable_mmu) +SYM_CODE_START(__enable_mmu) mrs x2, ID_AA64MMFR0_EL1 ubfx x2, x2, #ID_AA64MMFR0_TGRAN_SHIFT, 4 cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED_MIN @@ -717,9 +717,9 @@ SYM_FUNC_START(__enable_mmu) set_sctlr_el1 x0 ret -SYM_FUNC_END(__enable_mmu) +SYM_CODE_END(__enable_mmu) -SYM_FUNC_START(__cpu_secondary_check52bitva) +SYM_CODE_START_LOCAL(__cpu_secondary_check52bitva) #ifdef CONFIG_ARM64_VA_BITS_52 ldr_l x0, vabits_actual cmp x0, #52 @@ -737,9 +737,9 @@ SYM_FUNC_START(__cpu_secondary_check52bitva) #endif 2: ret -SYM_FUNC_END(__cpu_secondary_check52bitva) +SYM_CODE_END(__cpu_secondary_check52bitva) -SYM_FUNC_START_LOCAL(__no_granule_support) +SYM_CODE_START_LOCAL(__no_granule_support) /* Indicate that this CPU can't boot and is stuck in the kernel */ update_early_cpu_boot_status \ CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_NO_GRAN, x1, x2 @@ -747,10 +747,10 @@ SYM_FUNC_START_LOCAL(__no_granule_support) wfe wfi b 1b -SYM_FUNC_END(__no_granule_support) +SYM_CODE_END(__no_granule_support) #ifdef CONFIG_RELOCATABLE -SYM_FUNC_START_LOCAL(__relocate_kernel) +SYM_CODE_START_LOCAL(__relocate_kernel) /* * Iterate over each entry in the relocation table, and apply the * relocations in place. @@ -852,10 +852,10 @@ SYM_FUNC_START_LOCAL(__relocate_kernel) #endif ret -SYM_FUNC_END(__relocate_kernel) +SYM_CODE_END(__relocate_kernel) #endif -SYM_FUNC_START_LOCAL(__primary_switch) +SYM_CODE_START_LOCAL(__primary_switch) #ifdef CONFIG_RANDOMIZE_BASE mov x19, x0 // preserve new SCTLR_EL1 value mrs x20, sctlr_el1 // preserve old SCTLR_EL1 value @@ -895,4 +895,4 @@ SYM_FUNC_START_LOCAL(__primary_switch) ldr x8, =__primary_switched adrp x0, __PHYS_OFFSET br x8 -SYM_FUNC_END(__primary_switch) +SYM_CODE_END(__primary_switch) diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index 4ea9392f86e0..f0087e8bcd28 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S @@ -111,7 +111,7 @@ SYM_CODE_END(cpu_resume) .ltorg .popsection -SYM_FUNC_START(_cpu_resume) +SYM_CODE_START(_cpu_resume) mrs x1, mpidr_el1 adr_l x8, mpidr_hash // x8 = struct mpidr_hash virt address @@ -147,4 +147,4 @@ SYM_FUNC_START(_cpu_resume) ldp x29, lr, [x29] mov x0, #0 ret -SYM_FUNC_END(_cpu_resume) +SYM_CODE_END(_cpu_resume) diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index 435346ea1504..045d4481c820 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -21,7 +21,7 @@ /* * u64 __guest_enter(struct kvm_vcpu *vcpu); */ -SYM_FUNC_START(__guest_enter) +SYM_CODE_START(__guest_enter) // x0: vcpu // x1-x17: clobbered by macros // x29: guest context @@ -212,4 +212,4 @@ abort_guest_exit_end: msr spsr_el2, x4 orr x0, x0, x5 1: ret -SYM_FUNC_END(__guest_enter) +SYM_CODE_END(__guest_enter) -- 2.17.1