Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751467AbdCBWJO (ORCPT ); Thu, 2 Mar 2017 17:09:14 -0500 Received: from mx1.redhat.com ([209.132.183.28]:54560 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751185AbdCBWJM (ORCPT ); Thu, 2 Mar 2017 17:09:12 -0500 From: Vitaly Kuznetsov To: xen-devel@lists.xenproject.org Cc: x86@kernel.org, linux-kernel@vger.kernel.org, Boris Ostrovsky , Juergen Gross , Andrew Jones Subject: [PATCH v2 05/21] x86/xen: split off enlighten_hvm.c Date: Thu, 2 Mar 2017 18:53:41 +0100 Message-Id: <20170302175357.8222-6-vkuznets@redhat.com> In-Reply-To: <20170302175357.8222-1-vkuznets@redhat.com> References: <20170302175357.8222-1-vkuznets@redhat.com> X-Greylist: Sender IP whitelisted, not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.38]); Thu, 02 Mar 2017 17:54:09 +0000 (UTC) Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 14126 Lines: 552 Move PVHVM related code to enlighten_hvm.c. Three functions: xen_cpuhp_setup(), xen_reboot(), xen_emergency_restart() are shared, drop static qualifier from them. These functions will go to common code once it is split from enlighten.c. Signed-off-by: Vitaly Kuznetsov --- arch/x86/xen/Makefile | 1 + arch/x86/xen/enlighten.c | 209 +----------------------------------------- arch/x86/xen/enlighten_hvm.c | 210 +++++++++++++++++++++++++++++++++++++++++++ arch/x86/xen/xen-ops.h | 6 ++ 4 files changed, 221 insertions(+), 205 deletions(-) create mode 100644 arch/x86/xen/enlighten_hvm.c diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile index 348128b..1bca75b 100644 --- a/arch/x86/xen/Makefile +++ b/arch/x86/xen/Makefile @@ -15,6 +15,7 @@ obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \ grant-table.o suspend.o platform-pci-unplug.o \ p2m.o apic.o pmu.o +obj-$(CONFIG_XEN_PVHVM) += enlighten_hvm.o obj-$(CONFIG_XEN_PVH) += enlighten_pvh.o obj-$(CONFIG_EVENT_TRACING) += trace.o diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index d66debd..0cd99ad 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -45,10 +45,8 @@ #include #include #include -#include #include #include -#include #include #include @@ -140,10 +138,8 @@ void *xen_initial_gdt; RESERVE_BRK(shared_info_page_brk, PAGE_SIZE); static int xen_cpu_up_prepare_pv(unsigned int cpu); -static int xen_cpu_up_prepare_hvm(unsigned int cpu); static int xen_cpu_up_online(unsigned int cpu); static int xen_cpu_dead_pv(unsigned int cpu); -static int xen_cpu_dead_hvm(unsigned int cpu); /* * Point at some empty memory to start with. We map the real shared_info @@ -1282,7 +1278,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = { .end_context_switch = xen_end_context_switch, }; -static void xen_reboot(int reason) +void xen_reboot(int reason) { struct sched_shutdown r = { .reason = reason }; int cpu; @@ -1299,7 +1295,7 @@ static void xen_restart(char *msg) xen_reboot(SHUTDOWN_reboot); } -static void xen_emergency_restart(void) +void xen_emergency_restart(void) { xen_reboot(SHUTDOWN_reboot); } @@ -1435,8 +1431,8 @@ static void __init xen_dom0_set_legacy_features(void) x86_platform.legacy.rtc = 1; } -static int xen_cpuhp_setup(int (*cpu_up_prepare_cb)(unsigned int), - int (*cpu_dead_cb)(unsigned int)) +int xen_cpuhp_setup(int (*cpu_up_prepare_cb)(unsigned int), + int (*cpu_dead_cb)(unsigned int)) { int rc; @@ -1660,79 +1656,6 @@ asmlinkage __visible void __init xen_start_kernel(void) #endif } -void __ref xen_hvm_init_shared_info(void) -{ - int cpu; - struct xen_add_to_physmap xatp; - static struct shared_info *shared_info_page = 0; - - if (!shared_info_page) - shared_info_page = (struct shared_info *) - extend_brk(PAGE_SIZE, PAGE_SIZE); - xatp.domid = DOMID_SELF; - xatp.idx = 0; - xatp.space = XENMAPSPACE_shared_info; - xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT; - if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) - BUG(); - - HYPERVISOR_shared_info = (struct shared_info *)shared_info_page; - - /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info - * page, we use it in the event channel upcall and in some pvclock - * related functions. We don't need the vcpu_info placement - * optimizations because we don't use any pv_mmu or pv_irq op on - * HVM. - * When xen_hvm_init_shared_info is run at boot time only vcpu 0 is - * online but xen_hvm_init_shared_info is run at resume time too and - * in that case multiple vcpus might be online. */ - for_each_online_cpu(cpu) { - /* Leave it to be NULL. */ - if (xen_vcpu_nr(cpu) >= MAX_VIRT_CPUS) - continue; - per_cpu(xen_vcpu, cpu) = - &HYPERVISOR_shared_info->vcpu_info[xen_vcpu_nr(cpu)]; - } -} - -#ifdef CONFIG_XEN_PVHVM -static void __init init_hvm_pv_info(void) -{ - int major, minor; - uint32_t eax, ebx, ecx, edx, base; - - base = xen_cpuid_base(); - eax = cpuid_eax(base + 1); - - major = eax >> 16; - minor = eax & 0xffff; - printk(KERN_INFO "Xen version %d.%d.\n", major, minor); - - xen_domain_type = XEN_HVM_DOMAIN; - - /* PVH set up hypercall page in xen_prepare_pvh(). */ - if (xen_pvh_domain()) - pv_info.name = "Xen PVH"; - else { - u64 pfn; - uint32_t msr; - - pv_info.name = "Xen HVM"; - msr = cpuid_ebx(base + 2); - pfn = __pa(hypercall_page); - wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32)); - } - - xen_setup_features(); - - cpuid(base + 4, &eax, &ebx, &ecx, &edx); - if (eax & XEN_HVM_CPUID_VCPU_ID_PRESENT) - this_cpu_write(xen_vcpu_id, ebx); - else - this_cpu_write(xen_vcpu_id, smp_processor_id()); -} -#endif - static int xen_cpu_up_prepare_pv(unsigned int cpu) { int rc; @@ -1748,37 +1671,6 @@ static int xen_cpu_up_prepare_pv(unsigned int cpu) return 0; } -static int xen_cpu_up_prepare_hvm(unsigned int cpu) -{ - int rc; - - /* - * This can happen if CPU was offlined earlier and - * offlining timed out in common_cpu_die(). - */ - if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) { - xen_smp_intr_free(cpu); - xen_uninit_lock_cpu(cpu); - } - - if (cpu_acpi_id(cpu) != U32_MAX) - per_cpu(xen_vcpu_id, cpu) = cpu_acpi_id(cpu); - else - per_cpu(xen_vcpu_id, cpu) = cpu; - xen_vcpu_setup(cpu); - - if (xen_feature(XENFEAT_hvm_safe_pvclock)) - xen_setup_timer(cpu); - - rc = xen_smp_intr_init(cpu); - if (rc) { - WARN(1, "xen_smp_intr_init() for CPU %d failed: %d\n", - cpu, rc); - return rc; - } - return 0; -} - static int xen_cpu_dead_pv(unsigned int cpu) { xen_smp_intr_free(cpu); @@ -1788,75 +1680,12 @@ static int xen_cpu_dead_pv(unsigned int cpu) return 0; } -static int xen_cpu_dead_hvm(unsigned int cpu) -{ - xen_smp_intr_free(cpu); - - if (xen_feature(XENFEAT_hvm_safe_pvclock)) - xen_teardown_timer(cpu); - - return 0; -} - static int xen_cpu_up_online(unsigned int cpu) { xen_init_lock_cpu(cpu); return 0; } -#ifdef CONFIG_XEN_PVHVM -#ifdef CONFIG_KEXEC_CORE -static void xen_hvm_shutdown(void) -{ - native_machine_shutdown(); - if (kexec_in_progress) - xen_reboot(SHUTDOWN_soft_reset); -} - -static void xen_hvm_crash_shutdown(struct pt_regs *regs) -{ - native_machine_crash_shutdown(regs); - xen_reboot(SHUTDOWN_soft_reset); -} -#endif - -static void __init xen_hvm_guest_init(void) -{ - if (xen_pv_domain()) - return; - - init_hvm_pv_info(); - - xen_hvm_init_shared_info(); - - xen_panic_handler_init(); - - BUG_ON(!xen_feature(XENFEAT_hvm_callback_vector)); - - xen_hvm_smp_init(); - WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_hvm, xen_cpu_dead_hvm)); - xen_unplug_emulated_devices(); - x86_init.irqs.intr_init = xen_init_IRQ; - xen_hvm_init_time_ops(); - xen_hvm_init_mmu_ops(); - - if (xen_pvh_domain()) - machine_ops.emergency_restart = xen_emergency_restart; -#ifdef CONFIG_KEXEC_CORE - machine_ops.shutdown = xen_hvm_shutdown; - machine_ops.crash_shutdown = xen_hvm_crash_shutdown; -#endif -} -#endif - -static bool xen_nopv = false; -static __init int xen_parse_nopv(char *arg) -{ - xen_nopv = true; - return 0; -} -early_param("xen_nopv", xen_parse_nopv); - static uint32_t __init xen_platform_pv(void) { if (xen_pv_domain()) @@ -1865,28 +1694,6 @@ static uint32_t __init xen_platform_pv(void) return 0; } -static uint32_t __init xen_platform_hvm(void) -{ - if (xen_pv_domain() || xen_nopv) - return 0; - - return xen_cpuid_base(); -} - -bool xen_hvm_need_lapic(void) -{ - if (xen_nopv) - return false; - if (xen_pv_domain()) - return false; - if (!xen_hvm_domain()) - return false; - if (xen_feature(XENFEAT_hvm_pirqs)) - return false; - return true; -} -EXPORT_SYMBOL_GPL(xen_hvm_need_lapic); - static void xen_set_cpu_features(struct cpuinfo_x86 *c) { clear_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); @@ -1940,14 +1747,6 @@ const struct hypervisor_x86 x86_hyper_xen_pv = { }; EXPORT_SYMBOL(x86_hyper_xen_pv); -const struct hypervisor_x86 x86_hyper_xen_hvm = { - .name = "Xen HVM", - .detect = xen_platform_hvm, - .init_platform = xen_hvm_guest_init, - .x2apic_available = xen_x2apic_para_available, -}; -EXPORT_SYMBOL(x86_hyper_xen_hvm); - #ifdef CONFIG_HOTPLUG_CPU void xen_arch_register_cpu(int num) { diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c new file mode 100644 index 0000000..a8f4d7e --- /dev/null +++ b/arch/x86/xen/enlighten_hvm.c @@ -0,0 +1,210 @@ +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include "xen-ops.h" +#include "mmu.h" +#include "smp.h" + +void __ref xen_hvm_init_shared_info(void) +{ + int cpu; + struct xen_add_to_physmap xatp; + static struct shared_info *shared_info_page; + + if (!shared_info_page) + shared_info_page = (struct shared_info *) + extend_brk(PAGE_SIZE, PAGE_SIZE); + xatp.domid = DOMID_SELF; + xatp.idx = 0; + xatp.space = XENMAPSPACE_shared_info; + xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT; + if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) + BUG(); + + HYPERVISOR_shared_info = (struct shared_info *)shared_info_page; + + /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info + * page, we use it in the event channel upcall and in some pvclock + * related functions. We don't need the vcpu_info placement + * optimizations because we don't use any pv_mmu or pv_irq op on + * HVM. + * When xen_hvm_init_shared_info is run at boot time only vcpu 0 is + * online but xen_hvm_init_shared_info is run at resume time too and + * in that case multiple vcpus might be online. */ + for_each_online_cpu(cpu) { + /* Leave it to be NULL. */ + if (xen_vcpu_nr(cpu) >= MAX_VIRT_CPUS) + continue; + per_cpu(xen_vcpu, cpu) = + &HYPERVISOR_shared_info->vcpu_info[xen_vcpu_nr(cpu)]; + } +} + +static void __init init_hvm_pv_info(void) +{ + int major, minor; + uint32_t eax, ebx, ecx, edx, base; + + base = xen_cpuid_base(); + eax = cpuid_eax(base + 1); + + major = eax >> 16; + minor = eax & 0xffff; + printk(KERN_INFO "Xen version %d.%d.\n", major, minor); + + xen_domain_type = XEN_HVM_DOMAIN; + + /* PVH set up hypercall page in xen_prepare_pvh(). */ + if (xen_pvh_domain()) + pv_info.name = "Xen PVH"; + else { + u64 pfn; + uint32_t msr; + + pv_info.name = "Xen HVM"; + msr = cpuid_ebx(base + 2); + pfn = __pa(hypercall_page); + wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32)); + } + + xen_setup_features(); + + cpuid(base + 4, &eax, &ebx, &ecx, &edx); + if (eax & XEN_HVM_CPUID_VCPU_ID_PRESENT) + this_cpu_write(xen_vcpu_id, ebx); + else + this_cpu_write(xen_vcpu_id, smp_processor_id()); +} + +#ifdef CONFIG_KEXEC_CORE +static void xen_hvm_shutdown(void) +{ + native_machine_shutdown(); + if (kexec_in_progress) + xen_reboot(SHUTDOWN_soft_reset); +} + +static void xen_hvm_crash_shutdown(struct pt_regs *regs) +{ + native_machine_crash_shutdown(regs); + xen_reboot(SHUTDOWN_soft_reset); +} +#endif + +static int xen_cpu_up_prepare_hvm(unsigned int cpu) +{ + int rc; + + /* + * This can happen if CPU was offlined earlier and + * offlining timed out in common_cpu_die(). + */ + if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) { + xen_smp_intr_free(cpu); + xen_uninit_lock_cpu(cpu); + } + + if (cpu_acpi_id(cpu) != U32_MAX) + per_cpu(xen_vcpu_id, cpu) = cpu_acpi_id(cpu); + else + per_cpu(xen_vcpu_id, cpu) = cpu; + xen_vcpu_setup(cpu); + + if (xen_feature(XENFEAT_hvm_safe_pvclock)) + xen_setup_timer(cpu); + + rc = xen_smp_intr_init(cpu); + if (rc) { + WARN(1, "xen_smp_intr_init() for CPU %d failed: %d\n", + cpu, rc); + return rc; + } + return 0; +} + +static int xen_cpu_dead_hvm(unsigned int cpu) +{ + xen_smp_intr_free(cpu); + + if (xen_feature(XENFEAT_hvm_safe_pvclock)) + xen_teardown_timer(cpu); + + return 0; +} + +static void __init xen_hvm_guest_init(void) +{ + if (xen_pv_domain()) + return; + + init_hvm_pv_info(); + + xen_hvm_init_shared_info(); + + xen_panic_handler_init(); + + BUG_ON(!xen_feature(XENFEAT_hvm_callback_vector)); + + xen_hvm_smp_init(); + WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_hvm, xen_cpu_dead_hvm)); + xen_unplug_emulated_devices(); + x86_init.irqs.intr_init = xen_init_IRQ; + xen_hvm_init_time_ops(); + xen_hvm_init_mmu_ops(); + + if (xen_pvh_domain()) + machine_ops.emergency_restart = xen_emergency_restart; +#ifdef CONFIG_KEXEC_CORE + machine_ops.shutdown = xen_hvm_shutdown; + machine_ops.crash_shutdown = xen_hvm_crash_shutdown; +#endif +} + +static bool xen_nopv; +static __init int xen_parse_nopv(char *arg) +{ + xen_nopv = true; + return 0; +} +early_param("xen_nopv", xen_parse_nopv); + +bool xen_hvm_need_lapic(void) +{ + if (xen_nopv) + return false; + if (xen_pv_domain()) + return false; + if (!xen_hvm_domain()) + return false; + if (xen_feature(XENFEAT_hvm_pirqs)) + return false; + return true; +} +EXPORT_SYMBOL_GPL(xen_hvm_need_lapic); + +static uint32_t __init xen_platform_hvm(void) +{ + if (xen_pv_domain() || xen_nopv) + return 0; + + return xen_cpuid_base(); +} + +const struct hypervisor_x86 x86_hyper_xen_hvm = { + .name = "Xen HVM", + .detect = xen_platform_hvm, + .init_platform = xen_hvm_guest_init, + .x2apic_available = xen_x2apic_para_available, +}; +EXPORT_SYMBOL(x86_hyper_xen_hvm); diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 2b162f6..7baeb04 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -148,4 +148,10 @@ __visible void xen_adjust_exception_frame(void); extern int xen_panic_handler_init(void); +extern int xen_cpuhp_setup(int (*cpu_up_prepare_cb)(unsigned int), + int (*cpu_dead_cb)(unsigned int)); + +extern void xen_reboot(int reason); +extern void xen_emergency_restart(void); + #endif /* XEN_OPS_H */ -- 2.9.3