Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S932537AbcLNLYN (ORCPT ); Wed, 14 Dec 2016 06:24:13 -0500 Received: from mail-wj0-f181.google.com ([209.85.210.181]:34534 "EHLO mail-wj0-f181.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S932242AbcLNLYJ (ORCPT ); Wed, 14 Dec 2016 06:24:09 -0500 Date: Wed, 14 Dec 2016 11:15:13 +0000 From: Piotr Gregor To: tglx@linutronix.de Cc: mingo@redhat.com, hpa@zytor.com, x86@kernel.org, virtualization@lists.linux-foundation.org, linux-kernel@vger.kernel.org, jeremy@goop.org, akataria@vmware.com, rusty@rustcorp.com.au Subject: [PATCH] arch: x86: kernel: fixed unused label issue Message-ID: <20161214111510.GA23861@westernst> MIME-Version: 1.0 Content-Type: text/plain; charset=us-ascii Content-Disposition: inline User-Agent: Mutt/1.5.23 (2014-03-12) Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 3631 Lines: 106 The patch_default label is only used from within case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock) and case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted) i.e. when #if defined(CONFIG_PARAVIRT_SPINLOCKS) is true. Therefore no code jumps to this label in case CONFIG_PARAVIRT_SPINLOCKS is not defined and label should be removed in that case. Moving #endif directive just after that label fixes the issue. In addition,there are three errors reported by checkpatch script on this file. This commit fixes two of them. The last one is ERROR: Macros with multiple statements should be enclosed in a do - while loop which probably is a false alarm here as PATCH_SITE macro defines a case in switch local to native_patch function not meant to be used in other places. Signed-off-by: Piotr Gregor --- arch/x86/kernel/paravirt_patch_64.c | 67 +++++++++++++++++++------------------ 1 file changed, 34 insertions(+), 33 deletions(-) diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c index f4fcf26..7ce2848 100644 --- a/arch/x86/kernel/paravirt_patch_64.c +++ b/arch/x86/kernel/paravirt_patch_64.c @@ -44,43 +44,44 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf, const unsigned char *start, *end; unsigned ret; -#define PATCH_SITE(ops, x) \ - case PARAVIRT_PATCH(ops.x): \ - start = start_##ops##_##x; \ - end = end_##ops##_##x; \ - goto patch_site - switch(type) { - PATCH_SITE(pv_irq_ops, restore_fl); - PATCH_SITE(pv_irq_ops, save_fl); - PATCH_SITE(pv_irq_ops, irq_enable); - PATCH_SITE(pv_irq_ops, irq_disable); - PATCH_SITE(pv_cpu_ops, usergs_sysret64); - PATCH_SITE(pv_cpu_ops, swapgs); - PATCH_SITE(pv_mmu_ops, read_cr2); - PATCH_SITE(pv_mmu_ops, read_cr3); - PATCH_SITE(pv_mmu_ops, write_cr3); - PATCH_SITE(pv_mmu_ops, flush_tlb_single); - PATCH_SITE(pv_cpu_ops, wbinvd); +#define PATCH_SITE(ops, x) \ + case PARAVIRT_PATCH(ops.x): \ + start = start_##ops##_##x; \ + end = end_##ops##_##x; \ + goto patch_site + + switch (type) { + PATCH_SITE(pv_irq_ops, restore_fl); + PATCH_SITE(pv_irq_ops, save_fl); + PATCH_SITE(pv_irq_ops, irq_enable); + PATCH_SITE(pv_irq_ops, irq_disable); + PATCH_SITE(pv_cpu_ops, usergs_sysret64); + PATCH_SITE(pv_cpu_ops, swapgs); + PATCH_SITE(pv_mmu_ops, read_cr2); + PATCH_SITE(pv_mmu_ops, read_cr3); + PATCH_SITE(pv_mmu_ops, write_cr3); + PATCH_SITE(pv_mmu_ops, flush_tlb_single); + PATCH_SITE(pv_cpu_ops, wbinvd); #if defined(CONFIG_PARAVIRT_SPINLOCKS) - case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock): - if (pv_is_native_spin_unlock()) { - start = start_pv_lock_ops_queued_spin_unlock; - end = end_pv_lock_ops_queued_spin_unlock; - goto patch_site; - } - goto patch_default; + case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock): + if (pv_is_native_spin_unlock()) { + start = start_pv_lock_ops_queued_spin_unlock; + end = end_pv_lock_ops_queued_spin_unlock; + goto patch_site; + } + goto patch_default; - case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted): - if (pv_is_native_vcpu_is_preempted()) { - start = start_pv_lock_ops_vcpu_is_preempted; - end = end_pv_lock_ops_vcpu_is_preempted; - goto patch_site; - } - goto patch_default; -#endif + case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted): + if (pv_is_native_vcpu_is_preempted()) { + start = start_pv_lock_ops_vcpu_is_preempted; + end = end_pv_lock_ops_vcpu_is_preempted; + goto patch_site; + } + goto patch_default; - default: patch_default: +#endif + default: ret = paravirt_patch_default(type, clobbers, ibuf, addr, len); break; -- 2.1.4