Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1755472AbbBUBjF (ORCPT ); Fri, 20 Feb 2015 20:39:05 -0500 Received: from mga11.intel.com ([192.55.52.93]:23692 "EHLO mga11.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1754990AbbBUBjD (ORCPT ); Fri, 20 Feb 2015 20:39:03 -0500 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.09,618,1418112000"; d="scan'208";a="669363877" From: Andi Kleen To: x86@kernel.org Cc: linux-kernel@vger.kernel.org, peterz@infradead.org, Andi Kleen Subject: [PATCH 1/3] x86: Move msr accesses out of line Date: Fri, 20 Feb 2015 17:38:55 -0800 Message-Id: <1424482737-958-1-git-send-email-andi@firstfloor.org> X-Mailer: git-send-email 1.9.3 Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 4913 Lines: 153 From: Andi Kleen To add trace points to msr accesses we need to include linux/tracepoint.h. Unfortunately this causes hellish include loops when with the msr inlines in asm/msr.h, which is included all over. I tried to fix several of them, but eventually gave up. This patch moves the MSR functions out of line. A MSR access is typically 40-100 cycles or even slower, a call is a few cycles at best, so the additional function call is not really significant. Kernel text size is neutral: 11852945 1671656 1822720 15347321 ea2e79 vmlinux-no-msr 11852969 1671656 1822720 15347345 ea2e91 vmlinux-msr Signed-off-by: Andi Kleen --- arch/x86/include/asm/msr.h | 51 ++++---------------------------------------- arch/x86/lib/msr.c | 53 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 57 insertions(+), 47 deletions(-) diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index de36f22..99d6864 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h @@ -57,53 +57,10 @@ static inline unsigned long long native_read_tscp(unsigned int *aux) #define EAX_EDX_RET(val, low, high) "=A" (val) #endif -static inline unsigned long long native_read_msr(unsigned int msr) -{ - DECLARE_ARGS(val, low, high); - - asm volatile("rdmsr" : EAX_EDX_RET(val, low, high) : "c" (msr)); - return EAX_EDX_VAL(val, low, high); -} - -static inline unsigned long long native_read_msr_safe(unsigned int msr, - int *err) -{ - DECLARE_ARGS(val, low, high); - - asm volatile("2: rdmsr ; xor %[err],%[err]\n" - "1:\n\t" - ".section .fixup,\"ax\"\n\t" - "3: mov %[fault],%[err] ; jmp 1b\n\t" - ".previous\n\t" - _ASM_EXTABLE(2b, 3b) - : [err] "=r" (*err), EAX_EDX_RET(val, low, high) - : "c" (msr), [fault] "i" (-EIO)); - return EAX_EDX_VAL(val, low, high); -} - -static inline void native_write_msr(unsigned int msr, - unsigned low, unsigned high) -{ - asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory"); -} - -/* Can be uninlined because referenced by paravirt */ -notrace static inline int native_write_msr_safe(unsigned int msr, - unsigned low, unsigned high) -{ - int err; - asm volatile("2: wrmsr ; xor %[err],%[err]\n" - "1:\n\t" - ".section .fixup,\"ax\"\n\t" - "3: mov %[fault],%[err] ; jmp 1b\n\t" - ".previous\n\t" - _ASM_EXTABLE(2b, 3b) - : [err] "=a" (err) - : "c" (msr), "0" (low), "d" (high), - [fault] "i" (-EIO) - : "memory"); - return err; -} +extern unsigned long long native_read_msr(unsigned int msr); +extern unsigned long long native_read_msr_safe(unsigned int msr, int *err); +extern int native_write_msr_safe(unsigned int msr, unsigned low, unsigned high); +extern void native_write_msr(unsigned int msr, unsigned low, unsigned high); extern unsigned long long native_read_tsc(void); diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c index 4362373..7eed044 100644 --- a/arch/x86/lib/msr.c +++ b/arch/x86/lib/msr.c @@ -108,3 +108,56 @@ int msr_clear_bit(u32 msr, u8 bit) { return __flip_bit(msr, bit, false); } + +inline unsigned long long native_read_msr(unsigned int msr) +{ + DECLARE_ARGS(val, low, high); + + asm volatile("rdmsr" : EAX_EDX_RET(val, low, high) : "c" (msr)); + return EAX_EDX_VAL(val, low, high); +} +EXPORT_SYMBOL(native_read_msr); + +inline unsigned long long native_read_msr_safe(unsigned int msr, + int *err) +{ + DECLARE_ARGS(val, low, high); + + asm volatile("2: rdmsr ; xor %[err],%[err]\n" + "1:\n\t" + ".section .fixup,\"ax\"\n\t" + "3: mov %[fault],%[err] ; jmp 1b\n\t" + ".previous\n\t" + _ASM_EXTABLE(2b, 3b) + : [err] "=r" (*err), EAX_EDX_RET(val, low, high) + : "c" (msr), [fault] "i" (-EIO)); + return EAX_EDX_VAL(val, low, high); +} +EXPORT_SYMBOL(native_read_msr_safe); + +inline void native_write_msr(unsigned int msr, + unsigned low, unsigned high) +{ + asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory"); +} +EXPORT_SYMBOL(native_write_msr); + +/* Can be uninlined because referenced by paravirt */ +notrace inline int native_write_msr_safe(unsigned int msr, + unsigned low, unsigned high) +{ + int err; + + asm volatile("2: wrmsr ; xor %[err],%[err]\n" + "1:\n\t" + ".section .fixup,\"ax\"\n\t" + "3: mov %[fault],%[err] ; jmp 1b\n\t" + ".previous\n\t" + _ASM_EXTABLE(2b, 3b) + : [err] "=a" (err) + : "c" (msr), "0" (low), "d" (high), + [fault] "i" (-EIO) + : "memory"); + return err; +} +EXPORT_SYMBOL(native_write_msr_safe); -- 1.9.3 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/