Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752572AbcDBOCW (ORCPT ); Sat, 2 Apr 2016 10:02:22 -0400 Received: from mail.kernel.org ([198.145.29.136]:59166 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752218AbcDBOCQ (ORCPT ); Sat, 2 Apr 2016 10:02:16 -0400 From: Andy Lutomirski To: X86 ML Cc: Paolo Bonzini , Peter Zijlstra , KVM list , Arjan van de Ven , xen-devel , linux-kernel@vger.kernel.org, Linus Torvalds , Andrew Morton , Borislav Petkov , Andy Lutomirski Subject: [PATCH v5 7/9] x86/paravirt: Add paravirt_{read,write}_msr Date: Sat, 2 Apr 2016 07:01:38 -0700 Message-Id: <880eebc5dcd2ad9f310d41345f82061ea500e9fa.1459605520.git.luto@kernel.org> X-Mailer: git-send-email 2.5.5 In-Reply-To: References: In-Reply-To: References: Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 4421 Lines: 134 This adds paravirt hooks for unsafe MSR access. On native, they call native_{read,write}_msr. On Xen, they use xen_{read,write}_msr_safe. Nothing uses them yet for ease of bisection. The next patch will use them in rdmsrl, wrmsrl, etc. I intentionally didn't make them warn on #GP on Xen. I think that should be done separately by the Xen maintainers. Signed-off-by: Andy Lutomirski --- arch/x86/include/asm/msr.h | 5 +++-- arch/x86/include/asm/paravirt.h | 11 +++++++++++ arch/x86/include/asm/paravirt_types.h | 10 ++++++++-- arch/x86/kernel/paravirt.c | 2 ++ arch/x86/xen/enlighten.c | 23 +++++++++++++++++++++++ 5 files changed, 47 insertions(+), 4 deletions(-) diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index 1487054a1a70..13da359881d7 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h @@ -119,8 +119,9 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr, return EAX_EDX_VAL(val, low, high); } -static inline void native_write_msr(unsigned int msr, - unsigned low, unsigned high) +/* Can be uninlined because referenced by paravirt */ +notrace static inline void native_write_msr(unsigned int msr, + unsigned low, unsigned high) { asm volatile("1: wrmsr\n" "2:\n" diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 2e49228ed9a3..68297d87e85c 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -129,6 +129,17 @@ static inline void wbinvd(void) #define get_kernel_rpl() (pv_info.kernel_rpl) +static inline u64 paravirt_read_msr(unsigned msr) +{ + return PVOP_CALL1(u64, pv_cpu_ops.read_msr, msr); +} + +static inline void paravirt_write_msr(unsigned msr, + unsigned low, unsigned high) +{ + return PVOP_VCALL3(pv_cpu_ops.write_msr, msr, low, high); +} + static inline u64 paravirt_read_msr_safe(unsigned msr, int *err) { return PVOP_CALL2(u64, pv_cpu_ops.read_msr_safe, msr, err); diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 5a06cccd36f0..b85aec45a1b8 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -155,8 +155,14 @@ struct pv_cpu_ops { void (*cpuid)(unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx); - /* MSR operations. - err = 0/-EIO. wrmsr returns 0/-EIO. */ + /* Unsafe MSR operations. These will warn or panic on failure. */ + u64 (*read_msr)(unsigned int msr); + void (*write_msr)(unsigned int msr, unsigned low, unsigned high); + + /* + * Safe MSR operations. + * read sets err to 0 or -EIO. write returns 0 or -EIO. + */ u64 (*read_msr_safe)(unsigned int msr, int *err); int (*write_msr_safe)(unsigned int msr, unsigned low, unsigned high); diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 8aad95478ae5..f9583917c7c4 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -339,6 +339,8 @@ __visible struct pv_cpu_ops pv_cpu_ops = { .write_cr8 = native_write_cr8, #endif .wbinvd = native_wbinvd, + .read_msr = native_read_msr, + .write_msr = native_write_msr, .read_msr_safe = native_read_msr_safe, .write_msr_safe = native_write_msr_safe, .read_pmc = native_read_pmc, diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index ff2df20d0067..548cd3e02b9e 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -1092,6 +1092,26 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) return ret; } +static u64 xen_read_msr(unsigned int msr) +{ + /* + * This will silently swallow a #GP from RDMSR. It may be worth + * changing that. + */ + int err; + + return xen_read_msr_safe(msr, &err); +} + +static void xen_write_msr(unsigned int msr, unsigned low, unsigned high) +{ + /* + * This will silently swallow a #GP from WRMSR. It may be worth + * changing that. + */ + xen_write_msr_safe(msr, low, high); +} + void xen_setup_shared_info(void) { if (!xen_feature(XENFEAT_auto_translated_physmap)) { @@ -1222,6 +1242,9 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = { .wbinvd = native_wbinvd, + .read_msr = xen_read_msr, + .write_msr = xen_write_msr, + .read_msr_safe = xen_read_msr_safe, .write_msr_safe = xen_write_msr_safe, -- 2.5.5