2010-07-17 13:03:34

by Brian Gerst

[permalink] [raw]
Subject: [PATCH 1/3] x86: Remove redundant K6 MSRs

MSR_K6_EFER is unused, and MSR_K6_STAR is redundant with MSR_STAR.

Signed-off-by: Brian Gerst <[email protected]>
---
arch/x86/include/asm/msr-index.h | 2 --
arch/x86/kvm/svm.c | 6 +++---
arch/x86/kvm/vmx.c | 8 ++++----
arch/x86/kvm/x86.c | 2 +-
4 files changed, 8 insertions(+), 10 deletions(-)

diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 7bc36f6..f553151 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -156,8 +156,6 @@
#define MSR_K7_FID_VID_STATUS 0xc0010042

/* K6 MSRs */
-#define MSR_K6_EFER 0xc0000080
-#define MSR_K6_STAR 0xc0000081
#define MSR_K6_WHCR 0xc0000082
#define MSR_K6_UWCCR 0xc0000085
#define MSR_K6_EPMR 0xc0000086
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index ce438e0..24a2206 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -130,7 +130,7 @@ static struct svm_direct_access_msrs {
u32 index; /* Index of the MSR */
bool always; /* True if intercept is always on */
} direct_access_msrs[] = {
- { .index = MSR_K6_STAR, .always = true },
+ { .index = MSR_STAR, .always = true },
{ .index = MSR_IA32_SYSENTER_CS, .always = true },
#ifdef CONFIG_X86_64
{ .index = MSR_GS_BASE, .always = true },
@@ -2431,7 +2431,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
*data = tsc_offset + native_read_tsc();
break;
}
- case MSR_K6_STAR:
+ case MSR_STAR:
*data = svm->vmcb->save.star;
break;
#ifdef CONFIG_X86_64
@@ -2555,7 +2555,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)

break;
}
- case MSR_K6_STAR:
+ case MSR_STAR:
svm->vmcb->save.star = data;
break;
#ifdef CONFIG_X86_64
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 859a01a..51555f6 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -231,14 +231,14 @@ static u64 host_efer;
static void ept_save_pdptrs(struct kvm_vcpu *vcpu);

/*
- * Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it
+ * Keep MSR_STAR at the end, as setup_msrs() will try to optimize it
* away by decrementing the array size.
*/
static const u32 vmx_msr_index[] = {
#ifdef CONFIG_X86_64
MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
#endif
- MSR_EFER, MSR_TSC_AUX, MSR_K6_STAR,
+ MSR_EFER, MSR_TSC_AUX, MSR_STAR,
};
#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)

@@ -1057,10 +1057,10 @@ static void setup_msrs(struct vcpu_vmx *vmx)
if (index >= 0 && vmx->rdtscp_enabled)
move_msr_up(vmx, index, save_nmsrs++);
/*
- * MSR_K6_STAR is only needed on long mode guests, and only
+ * MSR_STAR is only needed on long mode guests, and only
* if efer.sce is enabled.
*/
- index = __find_msr_index(vmx, MSR_K6_STAR);
+ index = __find_msr_index(vmx, MSR_STAR);
if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE))
move_msr_up(vmx, index, save_nmsrs++);
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 05d571f..6127468 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -671,7 +671,7 @@ static u32 msrs_to_save[] = {
HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
HV_X64_MSR_APIC_ASSIST_PAGE,
MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
- MSR_K6_STAR,
+ MSR_STAR,
#ifdef CONFIG_X86_64
MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
#endif
--
1.7.1.1


2010-07-17 13:03:36

by Brian Gerst

[permalink] [raw]
Subject: [PATCH 2/3] x86: Use symbolic MSR names

Signed-off-by: Brian Gerst <[email protected]>
---
arch/x86/kernel/acpi/realmode/wakeup.S | 2 +-
arch/x86/kernel/verify_cpu_64.S | 3 ++-
2 files changed, 3 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
index 580b4e2..28595d6 100644
--- a/arch/x86/kernel/acpi/realmode/wakeup.S
+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
@@ -104,7 +104,7 @@ _start:
movl %eax, %ecx
orl %edx, %ecx
jz 1f
- movl $0xc0000080, %ecx
+ movl $MSR_EFER, %ecx
wrmsr
1:

diff --git a/arch/x86/kernel/verify_cpu_64.S b/arch/x86/kernel/verify_cpu_64.S
index 45b6f8a..56a8c2a 100644
--- a/arch/x86/kernel/verify_cpu_64.S
+++ b/arch/x86/kernel/verify_cpu_64.S
@@ -31,6 +31,7 @@
*/

#include <asm/cpufeature.h>
+#include <asm/msr-index.h>

verify_cpu:
pushfl # Save caller passed flags
@@ -88,7 +89,7 @@ verify_cpu_sse_test:
je verify_cpu_sse_ok
test %di,%di
jz verify_cpu_no_longmode # only try to force SSE on AMD
- movl $0xc0010015,%ecx # HWCR
+ movl $MSR_K7_HWCR,%ecx
rdmsr
btr $15,%eax # enable SSE
wrmsr
--
1.7.1.1

2010-07-17 13:03:37

by Brian Gerst

[permalink] [raw]
Subject: [PATCH 3/3] x86-64: Simplify loading initial_gs

Load initial_gs as two 32-bit values instead of splitting a 64-bit value.

Signed-off-by: Brian Gerst <[email protected]>
---
arch/x86/kernel/head_64.S | 5 ++---
1 files changed, 2 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 3d1e6f1..239046b 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -234,9 +234,8 @@ ENTRY(secondary_startup_64)
* init data section till per cpu areas are set up.
*/
movl $MSR_GS_BASE,%ecx
- movq initial_gs(%rip),%rax
- movq %rax,%rdx
- shrq $32,%rdx
+ movl initial_gs(%rip),%eax
+ movl initial_gs+4(%rip),%edx
wrmsr

/* esi is pointer to real mode structure with interesting info.
--
1.7.1.1

2010-07-17 16:02:52

by Pekka Enberg

[permalink] [raw]
Subject: Re: [PATCH 1/3] x86: Remove redundant K6 MSRs

On Sat, Jul 17, 2010 at 4:03 PM, Brian Gerst <[email protected]> wrote:
> MSR_K6_EFER is unused, and MSR_K6_STAR is redundant with MSR_STAR.
>
> Signed-off-by: Brian Gerst <[email protected]>

Reviewed-by: Pekka Enberg <[email protected]>

2010-07-17 16:04:54

by Pekka Enberg

[permalink] [raw]
Subject: Re: [PATCH 2/3] x86: Use symbolic MSR names

On Sat, Jul 17, 2010 at 4:03 PM, Brian Gerst <[email protected]> wrote:
> Signed-off-by: Brian Gerst <[email protected]>

Reviewed-by: Pekka Enberg <[email protected]>

2010-07-17 16:14:41

by Pekka Enberg

[permalink] [raw]
Subject: Re: [PATCH 3/3] x86-64: Simplify loading initial_gs

On Sat, Jul 17, 2010 at 4:03 PM, Brian Gerst <[email protected]> wrote:
> Load initial_gs as two 32-bit values instead of splitting a 64-bit value.
>
> Signed-off-by: Brian Gerst <[email protected]>
> ---
> ?arch/x86/kernel/head_64.S | ? ?5 ++---
> ?1 files changed, 2 insertions(+), 3 deletions(-)
>
> diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
> index 3d1e6f1..239046b 100644
> --- a/arch/x86/kernel/head_64.S
> +++ b/arch/x86/kernel/head_64.S
> @@ -234,9 +234,8 @@ ENTRY(secondary_startup_64)
> ? ? ? ? * init data section till per cpu areas are set up.
> ? ? ? ? */
> ? ? ? ?movl ? ?$MSR_GS_BASE,%ecx
> - ? ? ? movq ? ?initial_gs(%rip),%rax
> - ? ? ? movq ? ?%rax,%rdx
> - ? ? ? shrq ? ?$32,%rdx
> + ? ? ? movl ? ?initial_gs(%rip),%eax
> + ? ? ? movl ? ?initial_gs+4(%rip),%edx
> ? ? ? ?wrmsr
>
> ? ? ? ?/* esi is pointer to real mode structure with interesting info.

Reviewed-by: Pekka Enberg <[email protected]>

2010-07-22 05:01:32

by Brian Gerst

[permalink] [raw]
Subject: [tip:x86/asm] x86: Remove redundant K6 MSRs

Commit-ID: 8c06585d6431addadd94903843dfbcd315b42d4e
Gitweb: http://git.kernel.org/tip/8c06585d6431addadd94903843dfbcd315b42d4e
Author: Brian Gerst <[email protected]>
AuthorDate: Sat, 17 Jul 2010 09:03:26 -0400
Committer: H. Peter Anvin <[email protected]>
CommitDate: Wed, 21 Jul 2010 21:23:05 -0700

x86: Remove redundant K6 MSRs

MSR_K6_EFER is unused, and MSR_K6_STAR is redundant with MSR_STAR.

Signed-off-by: Brian Gerst <[email protected]>
LKML-Reference: <[email protected]>
Reviewed-by: Pekka Enberg <[email protected]>
Signed-off-by: H. Peter Anvin <[email protected]>
---
arch/x86/include/asm/msr-index.h | 2 --
arch/x86/kvm/svm.c | 6 +++---
arch/x86/kvm/vmx.c | 8 ++++----
arch/x86/kvm/x86.c | 2 +-
4 files changed, 8 insertions(+), 10 deletions(-)

diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 8c7ae43..6068e0e 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -159,8 +159,6 @@
#define MSR_K7_FID_VID_STATUS 0xc0010042

/* K6 MSRs */
-#define MSR_K6_EFER 0xc0000080
-#define MSR_K6_STAR 0xc0000081
#define MSR_K6_WHCR 0xc0000082
#define MSR_K6_UWCCR 0xc0000085
#define MSR_K6_EPMR 0xc0000086
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index ce438e0..24a2206 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -130,7 +130,7 @@ static struct svm_direct_access_msrs {
u32 index; /* Index of the MSR */
bool always; /* True if intercept is always on */
} direct_access_msrs[] = {
- { .index = MSR_K6_STAR, .always = true },
+ { .index = MSR_STAR, .always = true },
{ .index = MSR_IA32_SYSENTER_CS, .always = true },
#ifdef CONFIG_X86_64
{ .index = MSR_GS_BASE, .always = true },
@@ -2431,7 +2431,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
*data = tsc_offset + native_read_tsc();
break;
}
- case MSR_K6_STAR:
+ case MSR_STAR:
*data = svm->vmcb->save.star;
break;
#ifdef CONFIG_X86_64
@@ -2555,7 +2555,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)

break;
}
- case MSR_K6_STAR:
+ case MSR_STAR:
svm->vmcb->save.star = data;
break;
#ifdef CONFIG_X86_64
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index ee03679..b42ad25 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -231,14 +231,14 @@ static u64 host_efer;
static void ept_save_pdptrs(struct kvm_vcpu *vcpu);

/*
- * Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it
+ * Keep MSR_STAR at the end, as setup_msrs() will try to optimize it
* away by decrementing the array size.
*/
static const u32 vmx_msr_index[] = {
#ifdef CONFIG_X86_64
MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
#endif
- MSR_EFER, MSR_TSC_AUX, MSR_K6_STAR,
+ MSR_EFER, MSR_TSC_AUX, MSR_STAR,
};
#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)

@@ -1057,10 +1057,10 @@ static void setup_msrs(struct vcpu_vmx *vmx)
if (index >= 0 && vmx->rdtscp_enabled)
move_msr_up(vmx, index, save_nmsrs++);
/*
- * MSR_K6_STAR is only needed on long mode guests, and only
+ * MSR_STAR is only needed on long mode guests, and only
* if efer.sce is enabled.
*/
- index = __find_msr_index(vmx, MSR_K6_STAR);
+ index = __find_msr_index(vmx, MSR_STAR);
if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE))
move_msr_up(vmx, index, save_nmsrs++);
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 05d571f..6127468 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -671,7 +671,7 @@ static u32 msrs_to_save[] = {
HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
HV_X64_MSR_APIC_ASSIST_PAGE,
MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
- MSR_K6_STAR,
+ MSR_STAR,
#ifdef CONFIG_X86_64
MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
#endif

2010-07-22 05:01:51

by Brian Gerst

[permalink] [raw]
Subject: [tip:x86/asm] x86: Use symbolic MSR names

Commit-ID: cfaa71ee9794472598d3966c3315cd6bd8f953d3
Gitweb: http://git.kernel.org/tip/cfaa71ee9794472598d3966c3315cd6bd8f953d3
Author: Brian Gerst <[email protected]>
AuthorDate: Sat, 17 Jul 2010 09:03:27 -0400
Committer: H. Peter Anvin <[email protected]>
CommitDate: Wed, 21 Jul 2010 21:23:40 -0700

x86: Use symbolic MSR names

Use symbolic MSR names instead of hardcoding the MSR index.

Signed-off-by: Brian Gerst <[email protected]>
LKML-Reference: <[email protected]>
Reviewed-by: Pekka Enberg <[email protected]>
Signed-off-by: H. Peter Anvin <[email protected]>
---
arch/x86/kernel/acpi/realmode/wakeup.S | 2 +-
arch/x86/kernel/verify_cpu_64.S | 3 ++-
2 files changed, 3 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
index 580b4e2..28595d6 100644
--- a/arch/x86/kernel/acpi/realmode/wakeup.S
+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
@@ -104,7 +104,7 @@ _start:
movl %eax, %ecx
orl %edx, %ecx
jz 1f
- movl $0xc0000080, %ecx
+ movl $MSR_EFER, %ecx
wrmsr
1:

diff --git a/arch/x86/kernel/verify_cpu_64.S b/arch/x86/kernel/verify_cpu_64.S
index 45b6f8a..56a8c2a 100644
--- a/arch/x86/kernel/verify_cpu_64.S
+++ b/arch/x86/kernel/verify_cpu_64.S
@@ -31,6 +31,7 @@
*/

#include <asm/cpufeature.h>
+#include <asm/msr-index.h>

verify_cpu:
pushfl # Save caller passed flags
@@ -88,7 +89,7 @@ verify_cpu_sse_test:
je verify_cpu_sse_ok
test %di,%di
jz verify_cpu_no_longmode # only try to force SSE on AMD
- movl $0xc0010015,%ecx # HWCR
+ movl $MSR_K7_HWCR,%ecx
rdmsr
btr $15,%eax # enable SSE
wrmsr

2010-07-22 05:02:06

by Brian Gerst

[permalink] [raw]
Subject: [tip:x86/asm] x86-64: Simplify loading initial_gs

Commit-ID: 650fb4393dff543bc980d361555c489fbdeed088
Gitweb: http://git.kernel.org/tip/650fb4393dff543bc980d361555c489fbdeed088
Author: Brian Gerst <[email protected]>
AuthorDate: Sat, 17 Jul 2010 09:03:28 -0400
Committer: H. Peter Anvin <[email protected]>
CommitDate: Wed, 21 Jul 2010 21:23:51 -0700

x86-64: Simplify loading initial_gs

Load initial_gs as two 32-bit values instead of splitting a 64-bit value.

Signed-off-by: Brian Gerst <[email protected]>
LKML-Reference: <[email protected]>
Reviewed-by: Pekka Enberg <[email protected]>
Signed-off-by: H. Peter Anvin <[email protected]>
---
arch/x86/kernel/head_64.S | 5 ++---
1 files changed, 2 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 3d1e6f1..239046b 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -234,9 +234,8 @@ ENTRY(secondary_startup_64)
* init data section till per cpu areas are set up.
*/
movl $MSR_GS_BASE,%ecx
- movq initial_gs(%rip),%rax
- movq %rax,%rdx
- shrq $32,%rdx
+ movl initial_gs(%rip),%eax
+ movl initial_gs+4(%rip),%edx
wrmsr

/* esi is pointer to real mode structure with interesting info.