Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753669AbdCNRjX (ORCPT ); Tue, 14 Mar 2017 13:39:23 -0400 Received: from mx1.redhat.com ([209.132.183.28]:42364 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753353AbdCNRgY (ORCPT ); Tue, 14 Mar 2017 13:36:24 -0400 From: Vitaly Kuznetsov To: xen-devel@lists.xenproject.org Cc: x86@kernel.org, linux-kernel@vger.kernel.org, Boris Ostrovsky , Juergen Gross , Andrew Jones Subject: [PATCH v3 11/21] x86/xen: split off smp_pv.c Date: Tue, 14 Mar 2017 18:35:46 +0100 Message-Id: <20170314173556.2249-12-vkuznets@redhat.com> In-Reply-To: <20170314173556.2249-1-vkuznets@redhat.com> References: <20170314173556.2249-1-vkuznets@redhat.com> X-Greylist: Sender IP whitelisted, not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.32]); Tue, 14 Mar 2017 17:36:19 +0000 (UTC) Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 27614 Lines: 1086 Basically, smp.c is renamed to smp_pv.c and some code moved out to common smp.c. struct xen_common_irq delcaration ended up in smp.h. Reviewed-by: Juergen Gross Signed-off-by: Vitaly Kuznetsov --- arch/x86/xen/Makefile | 2 +- arch/x86/xen/smp.c | 488 +----------------------------------------------- arch/x86/xen/smp.h | 5 + arch/x86/xen/smp_pv.c | 500 ++++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 509 insertions(+), 486 deletions(-) create mode 100644 arch/x86/xen/smp_pv.c diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile index bc7df8c..ebf3522 100644 --- a/arch/x86/xen/Makefile +++ b/arch/x86/xen/Makefile @@ -20,7 +20,7 @@ obj-$(CONFIG_XEN_PVH) += enlighten_pvh.o obj-$(CONFIG_EVENT_TRACING) += trace.o -obj-$(CONFIG_SMP) += smp.o +obj-$(CONFIG_SMP) += smp.o smp_pv.o obj-$(CONFIG_XEN_PVHVM_SMP) += smp_hvm.o obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 5939592..82ac611 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -1,63 +1,21 @@ -/* - * Xen SMP support - * - * This file implements the Xen versions of smp_ops. SMP under Xen is - * very straightforward. Bringing a CPU up is simply a matter of - * loading its initial context and setting it running. - * - * IPIs are handled through the Xen event mechanism. - * - * Because virtual CPUs can be scheduled onto any real CPU, there's no - * useful topology information for the kernel to make use of. As a - * result, all CPUs are treated as if they're single-core and - * single-threaded. - */ -#include -#include -#include #include -#include -#include -#include - -#include -#include -#include -#include - -#include -#include -#include - -#include -#include +#include +#include +#include -#include -#include #include #include #include "xen-ops.h" -#include "mmu.h" #include "smp.h" -#include "pmu.h" - -cpumask_var_t xen_cpu_initialized_map; -struct xen_common_irq { - int irq; - char *name; -}; static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 }; static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 }; static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq = -1 }; -static DEFINE_PER_CPU(struct xen_common_irq, xen_irq_work) = { .irq = -1 }; static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 }; -static DEFINE_PER_CPU(struct xen_common_irq, xen_pmu_irq) = { .irq = -1 }; static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); -static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id); /* * Reschedule call back. @@ -70,42 +28,6 @@ static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static void cpu_bringup(void) -{ - int cpu; - - cpu_init(); - touch_softlockup_watchdog(); - preempt_disable(); - - /* PVH runs in ring 0 and allows us to do native syscalls. Yay! */ - if (!xen_feature(XENFEAT_supervisor_mode_kernel)) { - xen_enable_sysenter(); - xen_enable_syscall(); - } - cpu = smp_processor_id(); - smp_store_cpu_info(cpu); - cpu_data(cpu).x86_max_cores = 1; - set_cpu_sibling_map(cpu); - - xen_setup_cpu_clockevents(); - - notify_cpu_starting(cpu); - - set_cpu_online(cpu, true); - - cpu_set_state_online(cpu); /* Implies full memory barrier. */ - - /* We can take interrupts now: we're officially "up". */ - local_irq_enable(); -} - -asmlinkage __visible void cpu_bringup_and_idle(void) -{ - cpu_bringup(); - cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); -} - void xen_smp_intr_free(unsigned int cpu) { if (per_cpu(xen_resched_irq, cpu).irq >= 0) { @@ -135,23 +57,6 @@ void xen_smp_intr_free(unsigned int cpu) } } -void xen_smp_intr_free_pv(unsigned int cpu) -{ - if (per_cpu(xen_irq_work, cpu).irq >= 0) { - unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL); - per_cpu(xen_irq_work, cpu).irq = -1; - kfree(per_cpu(xen_irq_work, cpu).name); - per_cpu(xen_irq_work, cpu).name = NULL; - } - - if (per_cpu(xen_pmu_irq, cpu).irq >= 0) { - unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL); - per_cpu(xen_pmu_irq, cpu).irq = -1; - kfree(per_cpu(xen_pmu_irq, cpu).name); - per_cpu(xen_pmu_irq, cpu).name = NULL; - } -} - int xen_smp_intr_init(unsigned int cpu) { int rc; @@ -209,360 +114,6 @@ int xen_smp_intr_init(unsigned int cpu) return rc; } -int xen_smp_intr_init_pv(unsigned int cpu) -{ - int rc; - char *callfunc_name, *pmu_name; - - callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu); - rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR, - cpu, - xen_irq_work_interrupt, - IRQF_PERCPU|IRQF_NOBALANCING, - callfunc_name, - NULL); - if (rc < 0) - goto fail; - per_cpu(xen_irq_work, cpu).irq = rc; - per_cpu(xen_irq_work, cpu).name = callfunc_name; - - if (is_xen_pmu(cpu)) { - pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu); - rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu, - xen_pmu_irq_handler, - IRQF_PERCPU|IRQF_NOBALANCING, - pmu_name, NULL); - if (rc < 0) - goto fail; - per_cpu(xen_pmu_irq, cpu).irq = rc; - per_cpu(xen_pmu_irq, cpu).name = pmu_name; - } - - return 0; - - fail: - xen_smp_intr_free_pv(cpu); - return rc; -} - -static void __init xen_fill_possible_map(void) -{ - int i, rc; - - if (xen_initial_domain()) - return; - - for (i = 0; i < nr_cpu_ids; i++) { - rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); - if (rc >= 0) { - num_processors++; - set_cpu_possible(i, true); - } - } -} - -static void __init xen_filter_cpu_maps(void) -{ - int i, rc; - unsigned int subtract = 0; - - if (!xen_initial_domain()) - return; - - num_processors = 0; - disabled_cpus = 0; - for (i = 0; i < nr_cpu_ids; i++) { - rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); - if (rc >= 0) { - num_processors++; - set_cpu_possible(i, true); - } else { - set_cpu_possible(i, false); - set_cpu_present(i, false); - subtract++; - } - } -#ifdef CONFIG_HOTPLUG_CPU - /* This is akin to using 'nr_cpus' on the Linux command line. - * Which is OK as when we use 'dom0_max_vcpus=X' we can only - * have up to X, while nr_cpu_ids is greater than X. This - * normally is not a problem, except when CPU hotplugging - * is involved and then there might be more than X CPUs - * in the guest - which will not work as there is no - * hypercall to expand the max number of VCPUs an already - * running guest has. So cap it up to X. */ - if (subtract) - nr_cpu_ids = nr_cpu_ids - subtract; -#endif - -} - -static void __init xen_pv_smp_prepare_boot_cpu(void) -{ - BUG_ON(smp_processor_id() != 0); - native_smp_prepare_boot_cpu(); - - if (!xen_feature(XENFEAT_writable_page_tables)) - /* We've switched to the "real" per-cpu gdt, so make - * sure the old memory can be recycled. */ - make_lowmem_page_readwrite(xen_initial_gdt); - -#ifdef CONFIG_X86_32 - /* - * Xen starts us with XEN_FLAT_RING1_DS, but linux code - * expects __USER_DS - */ - loadsegment(ds, __USER_DS); - loadsegment(es, __USER_DS); -#endif - - xen_filter_cpu_maps(); - xen_setup_vcpu_info_placement(); - - /* - * The alternative logic (which patches the unlock/lock) runs before - * the smp bootup up code is activated. Hence we need to set this up - * the core kernel is being patched. Otherwise we will have only - * modules patched but not core code. - */ - xen_init_spinlocks(); -} - -static void __init xen_smp_prepare_cpus(unsigned int max_cpus) -{ - unsigned cpu; - unsigned int i; - - if (skip_ioapic_setup) { - char *m = (max_cpus == 0) ? - "The nosmp parameter is incompatible with Xen; " \ - "use Xen dom0_max_vcpus=1 parameter" : - "The noapic parameter is incompatible with Xen"; - - xen_raw_printk(m); - panic(m); - } - xen_init_lock_cpu(0); - - smp_store_boot_cpu_info(); - cpu_data(0).x86_max_cores = 1; - - for_each_possible_cpu(i) { - zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); - zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); - zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL); - } - set_cpu_sibling_map(0); - - xen_pmu_init(0); - - if (xen_smp_intr_init(0)) - BUG(); - - if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL)) - panic("could not allocate xen_cpu_initialized_map\n"); - - cpumask_copy(xen_cpu_initialized_map, cpumask_of(0)); - - /* Restrict the possible_map according to max_cpus. */ - while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { - for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) - continue; - set_cpu_possible(cpu, false); - } - - for_each_possible_cpu(cpu) - set_cpu_present(cpu, true); -} - -static int -cpu_initialize_context(unsigned int cpu, struct task_struct *idle) -{ - struct vcpu_guest_context *ctxt; - struct desc_struct *gdt; - unsigned long gdt_mfn; - - /* used to tell cpu_init() that it can proceed with initialization */ - cpumask_set_cpu(cpu, cpu_callout_mask); - if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map)) - return 0; - - ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); - if (ctxt == NULL) - return -ENOMEM; - - gdt = get_cpu_gdt_table(cpu); - -#ifdef CONFIG_X86_32 - ctxt->user_regs.fs = __KERNEL_PERCPU; - ctxt->user_regs.gs = __KERNEL_STACK_CANARY; -#endif - memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt)); - - ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle; - ctxt->flags = VGCF_IN_KERNEL; - ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ - ctxt->user_regs.ds = __USER_DS; - ctxt->user_regs.es = __USER_DS; - ctxt->user_regs.ss = __KERNEL_DS; - - xen_copy_trap_info(ctxt->trap_ctxt); - - ctxt->ldt_ents = 0; - - BUG_ON((unsigned long)gdt & ~PAGE_MASK); - - gdt_mfn = arbitrary_virt_to_mfn(gdt); - make_lowmem_page_readonly(gdt); - make_lowmem_page_readonly(mfn_to_virt(gdt_mfn)); - - ctxt->gdt_frames[0] = gdt_mfn; - ctxt->gdt_ents = GDT_ENTRIES; - - ctxt->kernel_ss = __KERNEL_DS; - ctxt->kernel_sp = idle->thread.sp0; - -#ifdef CONFIG_X86_32 - ctxt->event_callback_cs = __KERNEL_CS; - ctxt->failsafe_callback_cs = __KERNEL_CS; -#else - ctxt->gs_base_kernel = per_cpu_offset(cpu); -#endif - ctxt->event_callback_eip = - (unsigned long)xen_hypervisor_callback; - ctxt->failsafe_callback_eip = - (unsigned long)xen_failsafe_callback; - ctxt->user_regs.cs = __KERNEL_CS; - per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir); - - ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs); - ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_gfn(swapper_pg_dir)); - if (HYPERVISOR_vcpu_op(VCPUOP_initialise, xen_vcpu_nr(cpu), ctxt)) - BUG(); - - kfree(ctxt); - return 0; -} - -static int xen_cpu_up(unsigned int cpu, struct task_struct *idle) -{ - int rc; - - common_cpu_up(cpu, idle); - - xen_setup_runstate_info(cpu); - - /* - * PV VCPUs are always successfully taken down (see 'while' loop - * in xen_cpu_die()), so -EBUSY is an error. - */ - rc = cpu_check_up_prepare(cpu); - if (rc) - return rc; - - /* make sure interrupts start blocked */ - per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1; - - rc = cpu_initialize_context(cpu, idle); - if (rc) - return rc; - - xen_pmu_init(cpu); - - rc = HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL); - BUG_ON(rc); - - while (cpu_report_state(cpu) != CPU_ONLINE) - HYPERVISOR_sched_op(SCHEDOP_yield, NULL); - - return 0; -} - -static void xen_smp_cpus_done(unsigned int max_cpus) -{ -} - -#ifdef CONFIG_HOTPLUG_CPU -static int xen_cpu_disable(void) -{ - unsigned int cpu = smp_processor_id(); - if (cpu == 0) - return -EBUSY; - - cpu_disable_common(); - - load_cr3(swapper_pg_dir); - return 0; -} - -static void xen_pv_cpu_die(unsigned int cpu) -{ - while (HYPERVISOR_vcpu_op(VCPUOP_is_up, - xen_vcpu_nr(cpu), NULL)) { - __set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(HZ/10); - } - - if (common_cpu_die(cpu) == 0) { - xen_smp_intr_free(cpu); - xen_uninit_lock_cpu(cpu); - xen_teardown_timer(cpu); - xen_pmu_finish(cpu); - } -} - -static void xen_play_dead(void) /* used only with HOTPLUG_CPU */ -{ - play_dead_common(); - HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(smp_processor_id()), NULL); - cpu_bringup(); - /* - * commit 4b0c0f294 (tick: Cleanup NOHZ per cpu data on cpu down) - * clears certain data that the cpu_idle loop (which called us - * and that we return from) expects. The only way to get that - * data back is to call: - */ - tick_nohz_idle_enter(); - - cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); -} - -#else /* !CONFIG_HOTPLUG_CPU */ -static int xen_cpu_disable(void) -{ - return -ENOSYS; -} - -static void xen_pv_cpu_die(unsigned int cpu) -{ - BUG(); -} - -static void xen_play_dead(void) -{ - BUG(); -} - -#endif -static void stop_self(void *v) -{ - int cpu = smp_processor_id(); - - /* make sure we're not pinning something down */ - load_cr3(swapper_pg_dir); - /* should set up a minimal gdt */ - - set_cpu_online(cpu, false); - - HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(cpu), NULL); - BUG(); -} - -static void xen_stop_other_cpus(int wait) -{ - smp_call_function(stop_self, NULL, wait); -} - void xen_smp_send_reschedule(int cpu) { xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); @@ -697,36 +248,3 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } - -static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id) -{ - irq_enter(); - irq_work_run(); - inc_irq_stat(apic_irq_work_irqs); - irq_exit(); - - return IRQ_HANDLED; -} - -static const struct smp_ops xen_smp_ops __initconst = { - .smp_prepare_boot_cpu = xen_pv_smp_prepare_boot_cpu, - .smp_prepare_cpus = xen_smp_prepare_cpus, - .smp_cpus_done = xen_smp_cpus_done, - - .cpu_up = xen_cpu_up, - .cpu_die = xen_pv_cpu_die, - .cpu_disable = xen_cpu_disable, - .play_dead = xen_play_dead, - - .stop_other_cpus = xen_stop_other_cpus, - .smp_send_reschedule = xen_smp_send_reschedule, - - .send_call_func_ipi = xen_smp_send_call_function_ipi, - .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi, -}; - -void __init xen_smp_init(void) -{ - smp_ops = xen_smp_ops; - xen_fill_possible_map(); -} diff --git a/arch/x86/xen/smp.h b/arch/x86/xen/smp.h index 2e8cd56..8ebb6ac 100644 --- a/arch/x86/xen/smp.h +++ b/arch/x86/xen/smp.h @@ -17,6 +17,11 @@ void xen_smp_intr_free_pv(unsigned int cpu); void xen_smp_send_reschedule(int cpu); void xen_smp_send_call_function_ipi(const struct cpumask *mask); void xen_smp_send_call_function_single_ipi(int cpu); + +struct xen_common_irq { + int irq; + char *name; +}; #else /* CONFIG_SMP */ static inline int xen_smp_intr_init(unsigned int cpu) diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c new file mode 100644 index 0000000..3a61e0e --- /dev/null +++ b/arch/x86/xen/smp_pv.c @@ -0,0 +1,500 @@ +/* + * Xen SMP support + * + * This file implements the Xen versions of smp_ops. SMP under Xen is + * very straightforward. Bringing a CPU up is simply a matter of + * loading its initial context and setting it running. + * + * IPIs are handled through the Xen event mechanism. + * + * Because virtual CPUs can be scheduled onto any real CPU, there's no + * useful topology information for the kernel to make use of. As a + * result, all CPUs are treated as if they're single-core and + * single-threaded. + */ +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include +#include +#include + +#include +#include "xen-ops.h" +#include "mmu.h" +#include "smp.h" +#include "pmu.h" + +cpumask_var_t xen_cpu_initialized_map; + +static DEFINE_PER_CPU(struct xen_common_irq, xen_irq_work) = { .irq = -1 }; +static DEFINE_PER_CPU(struct xen_common_irq, xen_pmu_irq) = { .irq = -1 }; + +static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id); + +static void cpu_bringup(void) +{ + int cpu; + + cpu_init(); + touch_softlockup_watchdog(); + preempt_disable(); + + /* PVH runs in ring 0 and allows us to do native syscalls. Yay! */ + if (!xen_feature(XENFEAT_supervisor_mode_kernel)) { + xen_enable_sysenter(); + xen_enable_syscall(); + } + cpu = smp_processor_id(); + smp_store_cpu_info(cpu); + cpu_data(cpu).x86_max_cores = 1; + set_cpu_sibling_map(cpu); + + xen_setup_cpu_clockevents(); + + notify_cpu_starting(cpu); + + set_cpu_online(cpu, true); + + cpu_set_state_online(cpu); /* Implies full memory barrier. */ + + /* We can take interrupts now: we're officially "up". */ + local_irq_enable(); +} + +asmlinkage __visible void cpu_bringup_and_idle(void) +{ + cpu_bringup(); + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); +} + +void xen_smp_intr_free_pv(unsigned int cpu) +{ + if (per_cpu(xen_irq_work, cpu).irq >= 0) { + unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL); + per_cpu(xen_irq_work, cpu).irq = -1; + kfree(per_cpu(xen_irq_work, cpu).name); + per_cpu(xen_irq_work, cpu).name = NULL; + } + + if (per_cpu(xen_pmu_irq, cpu).irq >= 0) { + unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL); + per_cpu(xen_pmu_irq, cpu).irq = -1; + kfree(per_cpu(xen_pmu_irq, cpu).name); + per_cpu(xen_pmu_irq, cpu).name = NULL; + } +} + +int xen_smp_intr_init_pv(unsigned int cpu) +{ + int rc; + char *callfunc_name, *pmu_name; + + callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu); + rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR, + cpu, + xen_irq_work_interrupt, + IRQF_PERCPU|IRQF_NOBALANCING, + callfunc_name, + NULL); + if (rc < 0) + goto fail; + per_cpu(xen_irq_work, cpu).irq = rc; + per_cpu(xen_irq_work, cpu).name = callfunc_name; + + if (is_xen_pmu(cpu)) { + pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu); + rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu, + xen_pmu_irq_handler, + IRQF_PERCPU|IRQF_NOBALANCING, + pmu_name, NULL); + if (rc < 0) + goto fail; + per_cpu(xen_pmu_irq, cpu).irq = rc; + per_cpu(xen_pmu_irq, cpu).name = pmu_name; + } + + return 0; + + fail: + xen_smp_intr_free_pv(cpu); + return rc; +} + +static void __init xen_fill_possible_map(void) +{ + int i, rc; + + if (xen_initial_domain()) + return; + + for (i = 0; i < nr_cpu_ids; i++) { + rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); + if (rc >= 0) { + num_processors++; + set_cpu_possible(i, true); + } + } +} + +static void __init xen_filter_cpu_maps(void) +{ + int i, rc; + unsigned int subtract = 0; + + if (!xen_initial_domain()) + return; + + num_processors = 0; + disabled_cpus = 0; + for (i = 0; i < nr_cpu_ids; i++) { + rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); + if (rc >= 0) { + num_processors++; + set_cpu_possible(i, true); + } else { + set_cpu_possible(i, false); + set_cpu_present(i, false); + subtract++; + } + } +#ifdef CONFIG_HOTPLUG_CPU + /* This is akin to using 'nr_cpus' on the Linux command line. + * Which is OK as when we use 'dom0_max_vcpus=X' we can only + * have up to X, while nr_cpu_ids is greater than X. This + * normally is not a problem, except when CPU hotplugging + * is involved and then there might be more than X CPUs + * in the guest - which will not work as there is no + * hypercall to expand the max number of VCPUs an already + * running guest has. So cap it up to X. */ + if (subtract) + nr_cpu_ids = nr_cpu_ids - subtract; +#endif + +} + +static void __init xen_pv_smp_prepare_boot_cpu(void) +{ + BUG_ON(smp_processor_id() != 0); + native_smp_prepare_boot_cpu(); + + if (!xen_feature(XENFEAT_writable_page_tables)) + /* We've switched to the "real" per-cpu gdt, so make + * sure the old memory can be recycled. */ + make_lowmem_page_readwrite(xen_initial_gdt); + +#ifdef CONFIG_X86_32 + /* + * Xen starts us with XEN_FLAT_RING1_DS, but linux code + * expects __USER_DS + */ + loadsegment(ds, __USER_DS); + loadsegment(es, __USER_DS); +#endif + + xen_filter_cpu_maps(); + xen_setup_vcpu_info_placement(); + + /* + * The alternative logic (which patches the unlock/lock) runs before + * the smp bootup up code is activated. Hence we need to set this up + * the core kernel is being patched. Otherwise we will have only + * modules patched but not core code. + */ + xen_init_spinlocks(); +} + +static void __init xen_smp_prepare_cpus(unsigned int max_cpus) +{ + unsigned cpu; + unsigned int i; + + if (skip_ioapic_setup) { + char *m = (max_cpus == 0) ? + "The nosmp parameter is incompatible with Xen; " \ + "use Xen dom0_max_vcpus=1 parameter" : + "The noapic parameter is incompatible with Xen"; + + xen_raw_printk(m); + panic(m); + } + xen_init_lock_cpu(0); + + smp_store_boot_cpu_info(); + cpu_data(0).x86_max_cores = 1; + + for_each_possible_cpu(i) { + zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); + zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); + zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL); + } + set_cpu_sibling_map(0); + + xen_pmu_init(0); + + if (xen_smp_intr_init(0)) + BUG(); + + if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL)) + panic("could not allocate xen_cpu_initialized_map\n"); + + cpumask_copy(xen_cpu_initialized_map, cpumask_of(0)); + + /* Restrict the possible_map according to max_cpus. */ + while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { + for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) + continue; + set_cpu_possible(cpu, false); + } + + for_each_possible_cpu(cpu) + set_cpu_present(cpu, true); +} + +static int +cpu_initialize_context(unsigned int cpu, struct task_struct *idle) +{ + struct vcpu_guest_context *ctxt; + struct desc_struct *gdt; + unsigned long gdt_mfn; + + /* used to tell cpu_init() that it can proceed with initialization */ + cpumask_set_cpu(cpu, cpu_callout_mask); + if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map)) + return 0; + + ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); + if (ctxt == NULL) + return -ENOMEM; + + gdt = get_cpu_gdt_table(cpu); + +#ifdef CONFIG_X86_32 + ctxt->user_regs.fs = __KERNEL_PERCPU; + ctxt->user_regs.gs = __KERNEL_STACK_CANARY; +#endif + memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt)); + + ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle; + ctxt->flags = VGCF_IN_KERNEL; + ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ + ctxt->user_regs.ds = __USER_DS; + ctxt->user_regs.es = __USER_DS; + ctxt->user_regs.ss = __KERNEL_DS; + + xen_copy_trap_info(ctxt->trap_ctxt); + + ctxt->ldt_ents = 0; + + BUG_ON((unsigned long)gdt & ~PAGE_MASK); + + gdt_mfn = arbitrary_virt_to_mfn(gdt); + make_lowmem_page_readonly(gdt); + make_lowmem_page_readonly(mfn_to_virt(gdt_mfn)); + + ctxt->gdt_frames[0] = gdt_mfn; + ctxt->gdt_ents = GDT_ENTRIES; + + ctxt->kernel_ss = __KERNEL_DS; + ctxt->kernel_sp = idle->thread.sp0; + +#ifdef CONFIG_X86_32 + ctxt->event_callback_cs = __KERNEL_CS; + ctxt->failsafe_callback_cs = __KERNEL_CS; +#else + ctxt->gs_base_kernel = per_cpu_offset(cpu); +#endif + ctxt->event_callback_eip = + (unsigned long)xen_hypervisor_callback; + ctxt->failsafe_callback_eip = + (unsigned long)xen_failsafe_callback; + ctxt->user_regs.cs = __KERNEL_CS; + per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir); + + ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs); + ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_gfn(swapper_pg_dir)); + if (HYPERVISOR_vcpu_op(VCPUOP_initialise, xen_vcpu_nr(cpu), ctxt)) + BUG(); + + kfree(ctxt); + return 0; +} + +static int xen_cpu_up(unsigned int cpu, struct task_struct *idle) +{ + int rc; + + common_cpu_up(cpu, idle); + + xen_setup_runstate_info(cpu); + + /* + * PV VCPUs are always successfully taken down (see 'while' loop + * in xen_cpu_die()), so -EBUSY is an error. + */ + rc = cpu_check_up_prepare(cpu); + if (rc) + return rc; + + /* make sure interrupts start blocked */ + per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1; + + rc = cpu_initialize_context(cpu, idle); + if (rc) + return rc; + + xen_pmu_init(cpu); + + rc = HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL); + BUG_ON(rc); + + while (cpu_report_state(cpu) != CPU_ONLINE) + HYPERVISOR_sched_op(SCHEDOP_yield, NULL); + + return 0; +} + +static void xen_smp_cpus_done(unsigned int max_cpus) +{ +} + +#ifdef CONFIG_HOTPLUG_CPU +static int xen_cpu_disable(void) +{ + unsigned int cpu = smp_processor_id(); + if (cpu == 0) + return -EBUSY; + + cpu_disable_common(); + + load_cr3(swapper_pg_dir); + return 0; +} + +static void xen_pv_cpu_die(unsigned int cpu) +{ + while (HYPERVISOR_vcpu_op(VCPUOP_is_up, + xen_vcpu_nr(cpu), NULL)) { + __set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(HZ/10); + } + + if (common_cpu_die(cpu) == 0) { + xen_smp_intr_free(cpu); + xen_uninit_lock_cpu(cpu); + xen_teardown_timer(cpu); + xen_pmu_finish(cpu); + } +} + +static void xen_play_dead(void) /* used only with HOTPLUG_CPU */ +{ + play_dead_common(); + HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(smp_processor_id()), NULL); + cpu_bringup(); + /* + * commit 4b0c0f294 (tick: Cleanup NOHZ per cpu data on cpu down) + * clears certain data that the cpu_idle loop (which called us + * and that we return from) expects. The only way to get that + * data back is to call: + */ + tick_nohz_idle_enter(); + + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); +} + +#else /* !CONFIG_HOTPLUG_CPU */ +static int xen_cpu_disable(void) +{ + return -ENOSYS; +} + +static void xen_pv_cpu_die(unsigned int cpu) +{ + BUG(); +} + +static void xen_play_dead(void) +{ + BUG(); +} + +#endif +static void stop_self(void *v) +{ + int cpu = smp_processor_id(); + + /* make sure we're not pinning something down */ + load_cr3(swapper_pg_dir); + /* should set up a minimal gdt */ + + set_cpu_online(cpu, false); + + HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(cpu), NULL); + BUG(); +} + +static void xen_stop_other_cpus(int wait) +{ + smp_call_function(stop_self, NULL, wait); +} + +static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) +{ + irq_enter(); + generic_smp_call_function_interrupt(); + inc_irq_stat(irq_call_count); + irq_exit(); + + return IRQ_HANDLED; +} + +static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id) +{ + irq_enter(); + irq_work_run(); + inc_irq_stat(apic_irq_work_irqs); + irq_exit(); + + return IRQ_HANDLED; +} + +static const struct smp_ops xen_smp_ops __initconst = { + .smp_prepare_boot_cpu = xen_pv_smp_prepare_boot_cpu, + .smp_prepare_cpus = xen_smp_prepare_cpus, + .smp_cpus_done = xen_smp_cpus_done, + + .cpu_up = xen_cpu_up, + .cpu_die = xen_pv_cpu_die, + .cpu_disable = xen_cpu_disable, + .play_dead = xen_play_dead, + + .stop_other_cpus = xen_stop_other_cpus, + .smp_send_reschedule = xen_smp_send_reschedule, + + .send_call_func_ipi = xen_smp_send_call_function_ipi, + .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi, +}; + +void __init xen_smp_init(void) +{ + smp_ops = xen_smp_ops; + xen_fill_possible_map(); +} -- 2.9.3