Linus,
please pull the latest smp-urgent-for-linus git tree from:
git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git smp-urgent-for-linus
This is the final cleanup of the hotplug notifier infrastructure. The
series has been reintgrated in the last two days because there came a new
driver using the old infrastructure via the SCSI tree.
- Convert the last leftover drivers utilizing notifiers
- Fixup for a completely broken hotplug user
- Prevent setup of already used states
- Removal of the notifiers
- Treewide cleanup of hotplug state names
- Consolidation of state space
There is a sphinx based documentation pending, but that needs review from
the documentation folks.
Thanks,
tglx
------------------>
Anna-Maria Gleixner (1):
staging/lustre/libcfs: Convert to hotplug state machine
Sebastian Andrzej Siewior (2):
scsi/bnx2fc: Convert to hotplug state machine
scsi/bnx2i: Convert to hotplug state machine
Thomas Gleixner (11):
scsi: qedi: Convert to hotplug state machine
ARM/imx/mmcd: Fix broken cpu hotplug handling
perf/x86/intel/cstate: Prevent hotplug callback leak
bus: arm-ccn: Prevent hotplug callback leak
x86/msr: Remove bogus cleanup from the error path
cpu/hotplug: Prevent overwriting of callbacks
cpu/hotplug: Remove obsolete cpu hotplug register/unregister functions
cpu/hotplug: Cleanup state names
coresight/etm3/4x: Consolidate hotplug state space
irqchip/gic: Consolidate hotplug state space
irqchip/armada-xp: Consolidate hotplug state space
arch/arm/kernel/smp_twd.c | 2 +-
arch/arm/mach-imx/mmdc.c | 34 +--
arch/arm/mach-mvebu/coherency.c | 2 +-
arch/arm/mm/cache-l2x0-pmu.c | 2 +-
arch/arm/mm/cache-l2x0.c | 2 +-
arch/arm/vfp/vfpmodule.c | 2 +-
arch/arm/xen/enlighten.c | 2 +-
arch/arm64/kernel/armv8_deprecated.c | 2 +-
arch/arm64/kernel/debug-monitors.c | 2 +-
arch/arm64/kernel/hw_breakpoint.c | 2 +-
arch/blackfin/kernel/perf_event.c | 2 +-
arch/metag/kernel/perf/perf_event.c | 2 +-
arch/mips/kernel/pm-cps.c | 2 +-
arch/mips/oprofile/op_model_loongson3.c | 2 +-
arch/powerpc/mm/numa.c | 2 +-
arch/powerpc/perf/core-book3s.c | 2 +-
arch/s390/kernel/perf_cpum_cf.c | 2 +-
arch/s390/kernel/perf_cpum_sf.c | 2 +-
arch/x86/entry/vdso/vma.c | 2 +-
arch/x86/events/amd/ibs.c | 2 +-
arch/x86/events/amd/power.c | 2 +-
arch/x86/events/amd/uncore.c | 6 +-
arch/x86/events/core.c | 6 +-
arch/x86/events/intel/cqm.c | 4 +-
arch/x86/events/intel/cstate.c | 14 +-
arch/x86/events/intel/rapl.c | 4 +-
arch/x86/events/intel/uncore.c | 10 +-
arch/x86/kernel/apb_timer.c | 2 +-
arch/x86/kernel/apic/x2apic_cluster.c | 2 +-
arch/x86/kernel/hpet.c | 4 +-
arch/x86/kernel/msr.c | 1 -
arch/x86/kernel/tboot.c | 2 +-
arch/x86/kvm/x86.c | 2 +-
arch/x86/xen/enlighten.c | 4 +-
arch/xtensa/kernel/perf_event.c | 2 +-
drivers/bus/arm-cci.c | 2 +-
drivers/bus/arm-ccn.c | 7 +-
drivers/clocksource/arc_timer.c | 2 +-
drivers/clocksource/arm_arch_timer.c | 2 +-
drivers/clocksource/arm_global_timer.c | 2 +-
drivers/clocksource/dummy_timer.c | 2 +-
drivers/clocksource/exynos_mct.c | 2 +-
drivers/clocksource/jcore-pit.c | 2 +-
drivers/clocksource/metag_generic.c | 2 +-
drivers/clocksource/mips-gic-timer.c | 4 +-
drivers/clocksource/qcom-timer.c | 2 +-
drivers/clocksource/time-armada-370-xp.c | 2 +-
drivers/clocksource/timer-atlas7.c | 2 +-
drivers/hwtracing/coresight/coresight-etm3x.c | 4 +-
drivers/hwtracing/coresight/coresight-etm4x.c | 8 +-
drivers/irqchip/irq-armada-370-xp.c | 6 +-
drivers/irqchip/irq-bcm2836.c | 2 +-
drivers/irqchip/irq-gic-v3.c | 6 +-
drivers/irqchip/irq-gic.c | 2 +-
drivers/irqchip/irq-hip04.c | 2 +-
drivers/leds/trigger/ledtrig-cpu.c | 2 +-
drivers/net/virtio_net.c | 4 +-
drivers/perf/arm_pmu.c | 2 +-
drivers/scsi/bnx2fc/bnx2fc_fcoe.c | 79 +++----
drivers/scsi/bnx2i/bnx2i_init.c | 78 +++----
drivers/scsi/qedi/qedi_main.c | 96 +++------
.../staging/lustre/lnet/libcfs/linux/linux-cpu.c | 85 ++++----
drivers/xen/events/events_fifo.c | 2 +-
include/linux/cpu.h | 90 --------
include/linux/cpuhotplug.h | 9 +-
kernel/cpu.c | 235 +++++----------------
lib/Kconfig.debug | 24 ---
lib/Makefile | 1 -
lib/cpu-notifier-error-inject.c | 84 --------
virt/kvm/arm/arch_timer.c | 2 +-
virt/kvm/arm/vgic/vgic-init.c | 2 +-
virt/kvm/kvm_main.c | 2 +-
72 files changed, 308 insertions(+), 689 deletions(-)
delete mode 100644 lib/cpu-notifier-error-inject.c
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 02d5e5e8d44c..895ae5197159 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -339,7 +339,7 @@ static int __init twd_local_timer_common_register(struct device_node *np)
}
cpuhp_setup_state_nocalls(CPUHP_AP_ARM_TWD_STARTING,
- "AP_ARM_TWD_STARTING",
+ "arm/timer/twd:starting",
twd_timer_starting_cpu, twd_timer_dying_cpu);
twd_get_clock(np);
diff --git a/arch/arm/mach-imx/mmdc.c b/arch/arm/mach-imx/mmdc.c
index ba96bf979625..699157759120 100644
--- a/arch/arm/mach-imx/mmdc.c
+++ b/arch/arm/mach-imx/mmdc.c
@@ -60,6 +60,7 @@
#define to_mmdc_pmu(p) container_of(p, struct mmdc_pmu, pmu)
+static enum cpuhp_state cpuhp_mmdc_state;
static int ddr_type;
struct fsl_mmdc_devtype_data {
@@ -451,8 +452,8 @@ static int imx_mmdc_remove(struct platform_device *pdev)
{
struct mmdc_pmu *pmu_mmdc = platform_get_drvdata(pdev);
+ cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
perf_pmu_unregister(&pmu_mmdc->pmu);
- cpuhp_remove_state_nocalls(CPUHP_ONLINE);
kfree(pmu_mmdc);
return 0;
}
@@ -472,6 +473,18 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
return -ENOMEM;
}
+ /* The first instance registers the hotplug state */
+ if (!cpuhp_mmdc_state) {
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "perf/arm/mmdc:online", NULL,
+ mmdc_pmu_offline_cpu);
+ if (ret < 0) {
+ pr_err("cpuhp_setup_state_multi failed\n");
+ goto pmu_free;
+ }
+ cpuhp_mmdc_state = ret;
+ }
+
mmdc_num = mmdc_pmu_init(pmu_mmdc, mmdc_base, &pdev->dev);
if (mmdc_num == 0)
name = "mmdc";
@@ -485,26 +498,23 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
HRTIMER_MODE_REL);
pmu_mmdc->hrtimer.function = mmdc_pmu_timer_handler;
- cpuhp_state_add_instance_nocalls(CPUHP_ONLINE,
- &pmu_mmdc->node);
- cpumask_set_cpu(smp_processor_id(), &pmu_mmdc->cpu);
- ret = cpuhp_setup_state_multi(CPUHP_AP_NOTIFY_ONLINE,
- "MMDC_ONLINE", NULL,
- mmdc_pmu_offline_cpu);
- if (ret) {
- pr_err("cpuhp_setup_state_multi failure\n");
- goto pmu_register_err;
- }
+ cpumask_set_cpu(raw_smp_processor_id(), &pmu_mmdc->cpu);
+
+ /* Register the pmu instance for cpu hotplug */
+ cpuhp_state_add_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
ret = perf_pmu_register(&(pmu_mmdc->pmu), name, -1);
- platform_set_drvdata(pdev, pmu_mmdc);
if (ret)
goto pmu_register_err;
+
+ platform_set_drvdata(pdev, pmu_mmdc);
return 0;
pmu_register_err:
pr_warn("MMDC Perf PMU failed (%d), disabled\n", ret);
+ cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
hrtimer_cancel(&pmu_mmdc->hrtimer);
+pmu_free:
kfree(pmu_mmdc);
return ret;
}
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
index ae2a018b9305..8f8748a0c84f 100644
--- a/arch/arm/mach-mvebu/coherency.c
+++ b/arch/arm/mach-mvebu/coherency.c
@@ -148,7 +148,7 @@ static void __init armada_370_coherency_init(struct device_node *np)
of_node_put(cpu_config_np);
cpuhp_setup_state_nocalls(CPUHP_AP_ARM_MVEBU_COHERENCY,
- "AP_ARM_MVEBU_COHERENCY",
+ "arm/mvebu/coherency:starting",
armada_xp_clear_l2_starting, NULL);
exit:
set_cpu_coherent();
diff --git a/arch/arm/mm/cache-l2x0-pmu.c b/arch/arm/mm/cache-l2x0-pmu.c
index 976d3057272e..0a1e2280141f 100644
--- a/arch/arm/mm/cache-l2x0-pmu.c
+++ b/arch/arm/mm/cache-l2x0-pmu.c
@@ -563,7 +563,7 @@ static __init int l2x0_pmu_init(void)
cpumask_set_cpu(0, &pmu_cpu);
ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_L2X0_ONLINE,
- "AP_PERF_ARM_L2X0_ONLINE", NULL,
+ "perf/arm/l2x0:online", NULL,
l2x0_pmu_offline_cpu);
if (ret)
goto out_pmu;
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index d1870c777c6e..2290be390f87 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -683,7 +683,7 @@ static void __init l2c310_enable(void __iomem *base, unsigned num_lock)
if (aux & L310_AUX_CTRL_FULL_LINE_ZERO)
cpuhp_setup_state(CPUHP_AP_ARM_L2X0_STARTING,
- "AP_ARM_L2X0_STARTING", l2c310_starting_cpu,
+ "arm/l2x0:starting", l2c310_starting_cpu,
l2c310_dying_cpu);
}
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 0351f5645fb1..569d5a650a4a 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -799,7 +799,7 @@ static int __init vfp_init(void)
}
cpuhp_setup_state_nocalls(CPUHP_AP_ARM_VFP_STARTING,
- "AP_ARM_VFP_STARTING", vfp_starting_cpu,
+ "arm/vfp:starting", vfp_starting_cpu,
vfp_dying_cpu);
vfp_vector = vfp_support_entry;
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 4986dc0c1dff..11d9f2898b16 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -412,7 +412,7 @@ static int __init xen_guest_init(void)
pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
return cpuhp_setup_state(CPUHP_AP_ARM_XEN_STARTING,
- "AP_ARM_XEN_STARTING", xen_starting_cpu,
+ "arm/xen:starting", xen_starting_cpu,
xen_dying_cpu);
}
early_initcall(xen_guest_init);
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index 04de188a36c9..a211addeb6bc 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -640,7 +640,7 @@ static int __init armv8_deprecated_init(void)
}
cpuhp_setup_state_nocalls(CPUHP_AP_ARM64_ISNDEP_STARTING,
- "AP_ARM64_ISNDEP_STARTING",
+ "arm64/isndep:starting",
run_all_insn_set_hw_mode, NULL);
register_insn_emulation_sysctl(ctl_abi);
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 605df76f0a06..2bd426448fc1 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -140,7 +140,7 @@ static int clear_os_lock(unsigned int cpu)
static int debug_monitors_init(void)
{
return cpuhp_setup_state(CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
- "CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING",
+ "arm64/debug_monitors:starting",
clear_os_lock, NULL);
}
postcore_initcall(debug_monitors_init);
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 1b3c747fedda..0296e7924240 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -1001,7 +1001,7 @@ static int __init arch_hw_breakpoint_init(void)
* debugger will leave the world in a nice state for us.
*/
ret = cpuhp_setup_state(CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
- "CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING",
+ "perf/arm64/hw_breakpoint:starting",
hw_breakpoint_reset, NULL);
if (ret)
pr_err("failed to register CPU hotplug notifier: %d\n", ret);
diff --git a/arch/blackfin/kernel/perf_event.c b/arch/blackfin/kernel/perf_event.c
index 6355e97d22b9..6a9524ad04a5 100644
--- a/arch/blackfin/kernel/perf_event.c
+++ b/arch/blackfin/kernel/perf_event.c
@@ -475,7 +475,7 @@ static int __init bfin_pmu_init(void)
ret = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
if (!ret)
- cpuhp_setup_state(CPUHP_PERF_BFIN, "PERF_BFIN",
+ cpuhp_setup_state(CPUHP_PERF_BFIN,"perf/bfin:starting",
bfin_pmu_prepare_cpu, NULL);
return ret;
}
diff --git a/arch/metag/kernel/perf/perf_event.c b/arch/metag/kernel/perf/perf_event.c
index 052cba23708c..7e793eb0c1fe 100644
--- a/arch/metag/kernel/perf/perf_event.c
+++ b/arch/metag/kernel/perf/perf_event.c
@@ -868,7 +868,7 @@ static int __init init_hw_perf_events(void)
metag_out32(0, PERF_COUNT(1));
cpuhp_setup_state(CPUHP_AP_PERF_METAG_STARTING,
- "AP_PERF_METAG_STARTING", metag_pmu_starting_cpu,
+ "perf/metag:starting", metag_pmu_starting_cpu,
NULL);
ret = perf_pmu_register(&pmu, metag_pmu->name, PERF_TYPE_RAW);
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
index 7cf653e21423..5f928c34c148 100644
--- a/arch/mips/kernel/pm-cps.c
+++ b/arch/mips/kernel/pm-cps.c
@@ -713,7 +713,7 @@ static int __init cps_pm_init(void)
pr_warn("pm-cps: no CPC, clock & power gating unavailable\n");
}
- return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "AP_PM_CPS_CPU_ONLINE",
+ return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mips/cps_pm:online",
cps_pm_online_cpu, NULL);
}
arch_initcall(cps_pm_init);
diff --git a/arch/mips/oprofile/op_model_loongson3.c b/arch/mips/oprofile/op_model_loongson3.c
index 85f3ee4ab456..d60d97b6113d 100644
--- a/arch/mips/oprofile/op_model_loongson3.c
+++ b/arch/mips/oprofile/op_model_loongson3.c
@@ -186,7 +186,7 @@ static int __init loongson3_init(void)
{
on_each_cpu(reset_counters, NULL, 1);
cpuhp_setup_state_nocalls(CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
- "AP_MIPS_OP_LOONGSON3_STARTING",
+ "mips/oprofile/loongson3:starting",
loongson3_starting_cpu, loongson3_dying_cpu);
save_perf_irq = perf_irq;
perf_irq = loongson3_perfcount_handler;
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 0cb6bd8bfccf..b1099cb2f393 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -944,7 +944,7 @@ void __init initmem_init(void)
* _nocalls() + manual invocation is used because cpuhp is not yet
* initialized for the boot CPU.
*/
- cpuhp_setup_state_nocalls(CPUHP_POWER_NUMA_PREPARE, "POWER_NUMA_PREPARE",
+ cpuhp_setup_state_nocalls(CPUHP_POWER_NUMA_PREPARE, "powerpc/numa:prepare",
ppc_numa_cpu_prepare, ppc_numa_cpu_dead);
for_each_present_cpu(cpu)
numa_setup_cpu(cpu);
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 72c27b8d2cf3..fd3e4034c04d 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -2189,7 +2189,7 @@ int register_power_pmu(struct power_pmu *pmu)
#endif /* CONFIG_PPC64 */
perf_pmu_register(&power_pmu, "cpu", PERF_TYPE_RAW);
- cpuhp_setup_state(CPUHP_PERF_POWER, "PERF_POWER",
+ cpuhp_setup_state(CPUHP_PERF_POWER, "perf/powerpc:prepare",
power_pmu_prepare_cpu, NULL);
return 0;
}
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 037c2a253ae4..1aba10e90906 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -711,7 +711,7 @@ static int __init cpumf_pmu_init(void)
return rc;
}
return cpuhp_setup_state(CPUHP_AP_PERF_S390_CF_ONLINE,
- "AP_PERF_S390_CF_ONLINE",
+ "perf/s390/cf:online",
s390_pmu_online_cpu, s390_pmu_offline_cpu);
}
early_initcall(cpumf_pmu_init);
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 763dec18edcd..1c0b58545c04 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -1623,7 +1623,7 @@ static int __init init_cpum_sampling_pmu(void)
goto out;
}
- cpuhp_setup_state(CPUHP_AP_PERF_S390_SF_ONLINE, "AP_PERF_S390_SF_ONLINE",
+ cpuhp_setup_state(CPUHP_AP_PERF_S390_SF_ONLINE, "perf/s390/sf:online",
s390_pmu_sf_online_cpu, s390_pmu_sf_offline_cpu);
out:
return err;
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index 40121d14d34d..10820f6cefbf 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -371,7 +371,7 @@ static int __init init_vdso(void)
/* notifier priority > KVM */
return cpuhp_setup_state(CPUHP_AP_X86_VDSO_VMA_ONLINE,
- "AP_X86_VDSO_VMA_ONLINE", vgetcpu_online, NULL);
+ "x86/vdso/vma:online", vgetcpu_online, NULL);
}
subsys_initcall(init_vdso);
#endif /* CONFIG_X86_64 */
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index b26ee32f73e8..05612a2529c8 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -1010,7 +1010,7 @@ static __init int amd_ibs_init(void)
* all online cpus.
*/
cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
- "AP_PERF_X86_AMD_IBS_STARTING",
+ "perf/x86/amd/ibs:STARTING",
x86_pmu_amd_ibs_starting_cpu,
x86_pmu_amd_ibs_dying_cpu);
diff --git a/arch/x86/events/amd/power.c b/arch/x86/events/amd/power.c
index 9842270ed2f2..a6eee5ac4f58 100644
--- a/arch/x86/events/amd/power.c
+++ b/arch/x86/events/amd/power.c
@@ -291,7 +291,7 @@ static int __init amd_power_pmu_init(void)
cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_POWER_ONLINE,
- "AP_PERF_X86_AMD_POWER_ONLINE",
+ "perf/x86/amd/power:online",
power_cpu_init, power_cpu_exit);
ret = perf_pmu_register(&pmu_class, "power", -1);
diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
index 65577f081d07..a0b1bdb3ad42 100644
--- a/arch/x86/events/amd/uncore.c
+++ b/arch/x86/events/amd/uncore.c
@@ -527,16 +527,16 @@ static int __init amd_uncore_init(void)
* Install callbacks. Core will call them for each online cpu.
*/
if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP,
- "PERF_X86_AMD_UNCORE_PREP",
+ "perf/x86/amd/uncore:prepare",
amd_uncore_cpu_up_prepare, amd_uncore_cpu_dead))
goto fail_l2;
if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
- "AP_PERF_X86_AMD_UNCORE_STARTING",
+ "perf/x86/amd/uncore:starting",
amd_uncore_cpu_starting, NULL))
goto fail_prep;
if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
- "AP_PERF_X86_AMD_UNCORE_ONLINE",
+ "perf/x86/amd/uncore:online",
amd_uncore_cpu_online,
amd_uncore_cpu_down_prepare))
goto fail_start;
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index f1c22584a46f..019c5887b698 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1820,18 +1820,18 @@ static int __init init_hw_perf_events(void)
* Install callbacks. Core will call them for each online
* cpu.
*/
- err = cpuhp_setup_state(CPUHP_PERF_X86_PREPARE, "PERF_X86_PREPARE",
+ err = cpuhp_setup_state(CPUHP_PERF_X86_PREPARE, "perf/x86:prepare",
x86_pmu_prepare_cpu, x86_pmu_dead_cpu);
if (err)
return err;
err = cpuhp_setup_state(CPUHP_AP_PERF_X86_STARTING,
- "AP_PERF_X86_STARTING", x86_pmu_starting_cpu,
+ "perf/x86:starting", x86_pmu_starting_cpu,
x86_pmu_dying_cpu);
if (err)
goto out;
- err = cpuhp_setup_state(CPUHP_AP_PERF_X86_ONLINE, "AP_PERF_X86_ONLINE",
+ err = cpuhp_setup_state(CPUHP_AP_PERF_X86_ONLINE, "perf/x86:online",
x86_pmu_online_cpu, NULL);
if (err)
goto out1;
diff --git a/arch/x86/events/intel/cqm.c b/arch/x86/events/intel/cqm.c
index 0c45cc8e64ba..8c00dc09a5d2 100644
--- a/arch/x86/events/intel/cqm.c
+++ b/arch/x86/events/intel/cqm.c
@@ -1747,9 +1747,9 @@ static int __init intel_cqm_init(void)
* is enabled to avoid notifier leak.
*/
cpuhp_setup_state(CPUHP_AP_PERF_X86_CQM_STARTING,
- "AP_PERF_X86_CQM_STARTING",
+ "perf/x86/cqm:starting",
intel_cqm_cpu_starting, NULL);
- cpuhp_setup_state(CPUHP_AP_PERF_X86_CQM_ONLINE, "AP_PERF_X86_CQM_ONLINE",
+ cpuhp_setup_state(CPUHP_AP_PERF_X86_CQM_ONLINE, "perf/x86/cqm:online",
NULL, intel_cqm_cpu_exit);
out:
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index da51e5a3e2ff..fec8a461bdef 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -594,6 +594,9 @@ static int __init cstate_probe(const struct cstate_model *cm)
static inline void cstate_cleanup(void)
{
+ cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_ONLINE);
+ cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_STARTING);
+
if (has_cstate_core)
perf_pmu_unregister(&cstate_core_pmu);
@@ -606,16 +609,16 @@ static int __init cstate_init(void)
int err;
cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_STARTING,
- "AP_PERF_X86_CSTATE_STARTING", cstate_cpu_init,
- NULL);
+ "perf/x86/cstate:starting", cstate_cpu_init, NULL);
cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_ONLINE,
- "AP_PERF_X86_CSTATE_ONLINE", NULL, cstate_cpu_exit);
+ "perf/x86/cstate:online", NULL, cstate_cpu_exit);
if (has_cstate_core) {
err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1);
if (err) {
has_cstate_core = false;
pr_info("Failed to register cstate core pmu\n");
+ cstate_cleanup();
return err;
}
}
@@ -629,8 +632,7 @@ static int __init cstate_init(void)
return err;
}
}
-
- return err;
+ return 0;
}
static int __init cstate_pmu_init(void)
@@ -655,8 +657,6 @@ module_init(cstate_pmu_init);
static void __exit cstate_pmu_exit(void)
{
- cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_ONLINE);
- cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_STARTING);
cstate_cleanup();
}
module_exit(cstate_pmu_exit);
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index 0a535cea8ff3..bd34124449b0 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -803,13 +803,13 @@ static int __init rapl_pmu_init(void)
* Install callbacks. Core will call them for each online cpu.
*/
- ret = cpuhp_setup_state(CPUHP_PERF_X86_RAPL_PREP, "PERF_X86_RAPL_PREP",
+ ret = cpuhp_setup_state(CPUHP_PERF_X86_RAPL_PREP, "perf/x86/rapl:prepare",
rapl_cpu_prepare, NULL);
if (ret)
goto out;
ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
- "AP_PERF_X86_RAPL_ONLINE",
+ "perf/x86/rapl:online",
rapl_cpu_online, rapl_cpu_offline);
if (ret)
goto out1;
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index dbaaf7dc8373..97c246f84dea 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -1398,22 +1398,22 @@ static int __init intel_uncore_init(void)
*/
if (!cret) {
ret = cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_PREP,
- "PERF_X86_UNCORE_PREP",
- uncore_cpu_prepare, NULL);
+ "perf/x86/intel/uncore:prepare",
+ uncore_cpu_prepare, NULL);
if (ret)
goto err;
} else {
cpuhp_setup_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP,
- "PERF_X86_UNCORE_PREP",
+ "perf/x86/intel/uncore:prepare",
uncore_cpu_prepare, NULL);
}
first_init = 1;
cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING,
- "AP_PERF_X86_UNCORE_STARTING",
+ "perf/x86/uncore:starting",
uncore_cpu_starting, uncore_cpu_dying);
first_init = 0;
cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
- "AP_PERF_X86_UNCORE_ONLINE",
+ "perf/x86/uncore:online",
uncore_event_cpu_online, uncore_event_cpu_offline);
return 0;
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index 456316f6c868..202a7817beaf 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -234,7 +234,7 @@ static __init int apbt_late_init(void)
if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT ||
!apb_timer_block_enabled)
return 0;
- return cpuhp_setup_state(CPUHP_X86_APB_DEAD, "X86_APB_DEAD", NULL,
+ return cpuhp_setup_state(CPUHP_X86_APB_DEAD, "x86/apb:dead", NULL,
apbt_cpu_dead);
}
fs_initcall(apbt_late_init);
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index 200af5ae9662..5a35f208ed95 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -191,7 +191,7 @@ static int x2apic_cluster_probe(void)
if (!x2apic_mode)
return 0;
- ret = cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "X2APIC_PREPARE",
+ ret = cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "x86/x2apic:prepare",
x2apic_prepare_cpu, x2apic_dead_cpu);
if (ret < 0) {
pr_err("Failed to register X2APIC_PREPARE\n");
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 274fab99169d..38c8fd684d38 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -1051,11 +1051,11 @@ static __init int hpet_late_init(void)
return 0;
/* This notifier should be called after workqueue is ready */
- ret = cpuhp_setup_state(CPUHP_AP_X86_HPET_ONLINE, "AP_X86_HPET_ONLINE",
+ ret = cpuhp_setup_state(CPUHP_AP_X86_HPET_ONLINE, "x86/hpet:online",
hpet_cpuhp_online, NULL);
if (ret)
return ret;
- ret = cpuhp_setup_state(CPUHP_X86_HPET_DEAD, "X86_HPET_DEAD", NULL,
+ ret = cpuhp_setup_state(CPUHP_X86_HPET_DEAD, "x86/hpet:dead", NULL,
hpet_cpuhp_dead);
if (ret)
goto err_cpuhp;
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index f5e3ff835cc8..ef688804f80d 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -224,7 +224,6 @@ static int __init msr_init(void)
return 0;
out_class:
- cpuhp_remove_state(cpuhp_msr_state);
class_destroy(msr_class);
out_chrdev:
__unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr");
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index 8402907825b0..b868fa1b812b 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -408,7 +408,7 @@ static __init int tboot_late_init(void)
tboot_create_trampoline();
atomic_set(&ap_wfs_count, 0);
- cpuhp_setup_state(CPUHP_AP_X86_TBOOT_DYING, "AP_X86_TBOOT_DYING", NULL,
+ cpuhp_setup_state(CPUHP_AP_X86_TBOOT_DYING, "x86/tboot:dying", NULL,
tboot_dying_cpu);
#ifdef CONFIG_DEBUG_FS
debugfs_create_file("tboot_log", S_IRUSR,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 445c51b6cf6d..6414fa6cb9fd 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5855,7 +5855,7 @@ static void kvm_timer_init(void)
}
pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);
- cpuhp_setup_state(CPUHP_AP_X86_KVM_CLK_ONLINE, "AP_X86_KVM_CLK_ONLINE",
+ cpuhp_setup_state(CPUHP_AP_X86_KVM_CLK_ONLINE, "x86/kvm/clk:online",
kvmclock_cpu_online, kvmclock_cpu_down_prep);
}
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index ced7027b3fbc..51ef95232725 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1529,11 +1529,11 @@ static int xen_cpuhp_setup(void)
int rc;
rc = cpuhp_setup_state_nocalls(CPUHP_XEN_PREPARE,
- "XEN_HVM_GUEST_PREPARE",
+ "x86/xen/hvm_guest:prepare",
xen_cpu_up_prepare, xen_cpu_dead);
if (rc >= 0) {
rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
- "XEN_HVM_GUEST_ONLINE",
+ "x86/xen/hvm_guest:online",
xen_cpu_up_online, NULL);
if (rc < 0)
cpuhp_remove_state_nocalls(CPUHP_XEN_PREPARE);
diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c
index 0fecc8a2c0b5..ff1d81385ed7 100644
--- a/arch/xtensa/kernel/perf_event.c
+++ b/arch/xtensa/kernel/perf_event.c
@@ -422,7 +422,7 @@ static int __init xtensa_pmu_init(void)
int irq = irq_create_mapping(NULL, XCHAL_PROFILING_INTERRUPT);
ret = cpuhp_setup_state(CPUHP_AP_PERF_XTENSA_STARTING,
- "AP_PERF_XTENSA_STARTING", xtensa_pmu_setup,
+ "perf/xtensa:starting", xtensa_pmu_setup,
NULL);
if (ret) {
pr_err("xtensa_pmu: failed to register CPU-hotplug.\n");
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index 231633328dfa..c49da15d9790 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -1796,7 +1796,7 @@ static int __init cci_platform_init(void)
int ret;
ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CCI_ONLINE,
- "AP_PERF_ARM_CCI_ONLINE", NULL,
+ "perf/arm/cci:online", NULL,
cci_pmu_offline_cpu);
if (ret)
return ret;
diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
index d1074d9b38ba..4d6a2b7e4d3f 100644
--- a/drivers/bus/arm-ccn.c
+++ b/drivers/bus/arm-ccn.c
@@ -1562,7 +1562,7 @@ static int __init arm_ccn_init(void)
int i, ret;
ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CCN_ONLINE,
- "AP_PERF_ARM_CCN_ONLINE", NULL,
+ "perf/arm/ccn:online", NULL,
arm_ccn_pmu_offline_cpu);
if (ret)
return ret;
@@ -1570,7 +1570,10 @@ static int __init arm_ccn_init(void)
for (i = 0; i < ARRAY_SIZE(arm_ccn_pmu_events); i++)
arm_ccn_pmu_events_attrs[i] = &arm_ccn_pmu_events[i].attr.attr;
- return platform_driver_register(&arm_ccn_driver);
+ ret = platform_driver_register(&arm_ccn_driver);
+ if (ret)
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE);
+ return ret;
}
static void __exit arm_ccn_exit(void)
diff --git a/drivers/clocksource/arc_timer.c b/drivers/clocksource/arc_timer.c
index a49748d826c0..2b7e87134d1a 100644
--- a/drivers/clocksource/arc_timer.c
+++ b/drivers/clocksource/arc_timer.c
@@ -309,7 +309,7 @@ static int __init arc_clockevent_setup(struct device_node *node)
}
ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING,
- "AP_ARC_TIMER_STARTING",
+ "clockevents/arc/timer:starting",
arc_timer_starting_cpu,
arc_timer_dying_cpu);
if (ret) {
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 02fef6830e72..cdeca850f29e 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -738,7 +738,7 @@ static int __init arch_timer_register(void)
/* Register and immediately configure the timer on the boot CPU */
err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
- "AP_ARM_ARCH_TIMER_STARTING",
+ "clockevents/arm/arch_timer:starting",
arch_timer_starting_cpu, arch_timer_dying_cpu);
if (err)
goto out_unreg_cpupm;
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index 8da03298f844..bbfeb2800a94 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -316,7 +316,7 @@ static int __init global_timer_of_register(struct device_node *np)
goto out_irq;
err = cpuhp_setup_state(CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
- "AP_ARM_GLOBAL_TIMER_STARTING",
+ "clockevents/arm/global_timer:starting",
gt_starting_cpu, gt_dying_cpu);
if (err)
goto out_irq;
diff --git a/drivers/clocksource/dummy_timer.c b/drivers/clocksource/dummy_timer.c
index 89f1c2edbe02..01f3f5a59bc6 100644
--- a/drivers/clocksource/dummy_timer.c
+++ b/drivers/clocksource/dummy_timer.c
@@ -34,7 +34,7 @@ static int dummy_timer_starting_cpu(unsigned int cpu)
static int __init dummy_timer_register(void)
{
return cpuhp_setup_state(CPUHP_AP_DUMMY_TIMER_STARTING,
- "AP_DUMMY_TIMER_STARTING",
+ "clockevents/dummy_timer:starting",
dummy_timer_starting_cpu, NULL);
}
early_initcall(dummy_timer_register);
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 8f3488b80896..b45b72b95861 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -552,7 +552,7 @@ static int __init exynos4_timer_resources(struct device_node *np, void __iomem *
/* Install hotplug callbacks which configure the timer on this CPU */
err = cpuhp_setup_state(CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
- "AP_EXYNOS4_MCT_TIMER_STARTING",
+ "clockevents/exynos4/mct_timer:starting",
exynos4_mct_starting_cpu,
exynos4_mct_dying_cpu);
if (err)
diff --git a/drivers/clocksource/jcore-pit.c b/drivers/clocksource/jcore-pit.c
index 54e1665aa03c..4e4146f69845 100644
--- a/drivers/clocksource/jcore-pit.c
+++ b/drivers/clocksource/jcore-pit.c
@@ -240,7 +240,7 @@ static int __init jcore_pit_init(struct device_node *node)
}
cpuhp_setup_state(CPUHP_AP_JCORE_TIMER_STARTING,
- "AP_JCORE_TIMER_STARTING",
+ "clockevents/jcore:starting",
jcore_pit_local_init, NULL);
return 0;
diff --git a/drivers/clocksource/metag_generic.c b/drivers/clocksource/metag_generic.c
index a80ab3e446b7..172f43d4bc1a 100644
--- a/drivers/clocksource/metag_generic.c
+++ b/drivers/clocksource/metag_generic.c
@@ -154,6 +154,6 @@ int __init metag_generic_timer_init(void)
/* Hook cpu boot to configure the CPU's timers */
return cpuhp_setup_state(CPUHP_AP_METAG_TIMER_STARTING,
- "AP_METAG_TIMER_STARTING",
+ "clockevents/metag:starting",
arch_timer_starting_cpu, NULL);
}
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index 7a960cd01104..d9278847ffb2 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -120,8 +120,8 @@ static int gic_clockevent_init(void)
}
cpuhp_setup_state(CPUHP_AP_MIPS_GIC_TIMER_STARTING,
- "AP_MIPS_GIC_TIMER_STARTING", gic_starting_cpu,
- gic_dying_cpu);
+ "clockevents/mips/gic/timer:starting",
+ gic_starting_cpu, gic_dying_cpu);
return 0;
}
diff --git a/drivers/clocksource/qcom-timer.c b/drivers/clocksource/qcom-timer.c
index 3283cfa2aa52..3bf65fff5c08 100644
--- a/drivers/clocksource/qcom-timer.c
+++ b/drivers/clocksource/qcom-timer.c
@@ -182,7 +182,7 @@ static int __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq,
} else {
/* Install and invoke hotplug callbacks */
res = cpuhp_setup_state(CPUHP_AP_QCOM_TIMER_STARTING,
- "AP_QCOM_TIMER_STARTING",
+ "clockevents/qcom/timer:starting",
msm_local_timer_starting_cpu,
msm_local_timer_dying_cpu);
if (res) {
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index 3c39e6f45971..4440aefc59cd 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -320,7 +320,7 @@ static int __init armada_370_xp_timer_common_init(struct device_node *np)
}
res = cpuhp_setup_state(CPUHP_AP_ARMADA_TIMER_STARTING,
- "AP_ARMADA_TIMER_STARTING",
+ "clockevents/armada:starting",
armada_370_xp_timer_starting_cpu,
armada_370_xp_timer_dying_cpu);
if (res) {
diff --git a/drivers/clocksource/timer-atlas7.c b/drivers/clocksource/timer-atlas7.c
index 4334e0330ada..3c23e1744f4a 100644
--- a/drivers/clocksource/timer-atlas7.c
+++ b/drivers/clocksource/timer-atlas7.c
@@ -221,7 +221,7 @@ static int __init sirfsoc_clockevent_init(void)
/* Install and invoke hotplug callbacks */
return cpuhp_setup_state(CPUHP_AP_MARCO_TIMER_STARTING,
- "AP_MARCO_TIMER_STARTING",
+ "clockevents/marco:starting",
sirfsoc_local_timer_starting_cpu,
sirfsoc_local_timer_dying_cpu);
}
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index 3fe368b23d15..a51b6b64ecdf 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -804,10 +804,10 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
if (!etm_count++) {
cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING,
- "AP_ARM_CORESIGHT_STARTING",
+ "arm/coresight:starting",
etm_starting_cpu, etm_dying_cpu);
ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
- "AP_ARM_CORESIGHT_ONLINE",
+ "arm/coresight:online",
etm_online_cpu, NULL);
if (ret < 0)
goto err_arch_supported;
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 4db8d6a4d0cb..031480f2c34d 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -986,11 +986,11 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
dev_err(dev, "ETM arch init failed\n");
if (!etm4_count++) {
- cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT4_STARTING,
- "AP_ARM_CORESIGHT4_STARTING",
+ cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING,
+ "arm/coresight4:starting",
etm4_starting_cpu, etm4_dying_cpu);
ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
- "AP_ARM_CORESIGHT4_ONLINE",
+ "arm/coresight4:online",
etm4_online_cpu, NULL);
if (ret < 0)
goto err_arch_supported;
@@ -1037,7 +1037,7 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
err_arch_supported:
if (--etm4_count == 0) {
- cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT4_STARTING);
+ cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
if (hp_online)
cpuhp_remove_state_nocalls(hp_online);
}
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 8bcee65a0b8c..eb0d4d41b156 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -578,13 +578,13 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
#ifdef CONFIG_SMP
set_smp_cross_call(armada_mpic_send_doorbell);
cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_XP_STARTING,
- "AP_IRQ_ARMADA_XP_STARTING",
+ "irqchip/armada/ipi:starting",
armada_xp_mpic_starting_cpu, NULL);
#endif
} else {
#ifdef CONFIG_SMP
- cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_CASC_STARTING,
- "AP_IRQ_ARMADA_CASC_STARTING",
+ cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_XP_STARTING,
+ "irqchip/armada/cascade:starting",
mpic_cascaded_starting_cpu, NULL);
#endif
irq_set_chained_handler(parent_irq,
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index d96b2c947e74..e7463e3c0814 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -245,7 +245,7 @@ bcm2836_arm_irqchip_smp_init(void)
#ifdef CONFIG_SMP
/* Unmask IPIs to the boot CPU. */
cpuhp_setup_state(CPUHP_AP_IRQ_BCM2836_STARTING,
- "AP_IRQ_BCM2836_STARTING", bcm2836_cpu_starting,
+ "irqchip/bcm2836:starting", bcm2836_cpu_starting,
bcm2836_cpu_dying);
set_smp_cross_call(bcm2836_arm_irqchip_send_ipi);
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 26e1d7fafb1e..c132f29322cc 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -632,9 +632,9 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
static void gic_smp_init(void)
{
set_smp_cross_call(gic_raise_softirq);
- cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GICV3_STARTING,
- "AP_IRQ_GICV3_STARTING", gic_starting_cpu,
- NULL);
+ cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
+ "irqchip/arm/gicv3:starting",
+ gic_starting_cpu, NULL);
}
static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index d6c404b3584d..1b1df4f770bd 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -1191,7 +1191,7 @@ static int __init __gic_init_bases(struct gic_chip_data *gic,
set_smp_cross_call(gic_raise_softirq);
#endif
cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
- "AP_IRQ_GIC_STARTING",
+ "irqchip/arm/gic:starting",
gic_starting_cpu, NULL);
set_handle_irq(gic_handle_irq);
if (static_key_true(&supports_deactivate))
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
index 021b0e0833c1..c1b4ee955dbe 100644
--- a/drivers/irqchip/irq-hip04.c
+++ b/drivers/irqchip/irq-hip04.c
@@ -407,7 +407,7 @@ hip04_of_init(struct device_node *node, struct device_node *parent)
set_handle_irq(hip04_handle_irq);
hip04_irq_dist_init(&hip04_data);
- cpuhp_setup_state(CPUHP_AP_IRQ_HIP04_STARTING, "AP_IRQ_HIP04_STARTING",
+ cpuhp_setup_state(CPUHP_AP_IRQ_HIP04_STARTING, "irqchip/hip04:starting",
hip04_irq_starting_cpu, NULL);
return 0;
}
diff --git a/drivers/leds/trigger/ledtrig-cpu.c b/drivers/leds/trigger/ledtrig-cpu.c
index 9719caf7437c..a41896468cb3 100644
--- a/drivers/leds/trigger/ledtrig-cpu.c
+++ b/drivers/leds/trigger/ledtrig-cpu.c
@@ -127,7 +127,7 @@ static int __init ledtrig_cpu_init(void)
register_syscore_ops(&ledtrig_cpu_syscore_ops);
- ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "AP_LEDTRIG_STARTING",
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "leds/trigger:starting",
ledtrig_online_cpu, ledtrig_prepare_down_cpu);
if (ret < 0)
pr_err("CPU hotplug notifier for ledtrig-cpu could not be registered: %d\n",
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 5deeda61d6d3..4a105006ca63 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2484,13 +2484,13 @@ static __init int virtio_net_driver_init(void)
{
int ret;
- ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "AP_VIRT_NET_ONLINE",
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online",
virtnet_cpu_online,
virtnet_cpu_down_prep);
if (ret < 0)
goto out;
virtionet_online = ret;
- ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "VIRT_NET_DEAD",
+ ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead",
NULL, virtnet_cpu_dead);
if (ret)
goto err_dead;
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index b37b57294566..6d9335865880 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -1084,7 +1084,7 @@ static int arm_pmu_hp_init(void)
int ret;
ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING,
- "AP_PERF_ARM_STARTING",
+ "perf/arm/pmu:starting",
arm_perf_starting_cpu, NULL);
if (ret)
pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n",
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 0990130821fa..c639d5a02656 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -127,13 +127,6 @@ module_param_named(log_fka, bnx2fc_log_fka, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(log_fka, " Print message to kernel log when fcoe is "
"initiating a FIP keep alive when debug logging is enabled.");
-static int bnx2fc_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu);
-/* notification function for CPU hotplug events */
-static struct notifier_block bnx2fc_cpu_notifier = {
- .notifier_call = bnx2fc_cpu_callback,
-};
-
static inline struct net_device *bnx2fc_netdev(const struct fc_lport *lport)
{
return ((struct bnx2fc_interface *)
@@ -2622,37 +2615,19 @@ static void bnx2fc_percpu_thread_destroy(unsigned int cpu)
kthread_stop(thread);
}
-/**
- * bnx2fc_cpu_callback - Handler for CPU hotplug events
- *
- * @nfb: The callback data block
- * @action: The event triggering the callback
- * @hcpu: The index of the CPU that the event is for
- *
- * This creates or destroys per-CPU data for fcoe
- *
- * Returns NOTIFY_OK always.
- */
-static int bnx2fc_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+
+static int bnx2fc_cpu_online(unsigned int cpu)
{
- unsigned cpu = (unsigned long)hcpu;
+ printk(PFX "CPU %x online: Create Rx thread\n", cpu);
+ bnx2fc_percpu_thread_create(cpu);
+ return 0;
+}
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- printk(PFX "CPU %x online: Create Rx thread\n", cpu);
- bnx2fc_percpu_thread_create(cpu);
- break;
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- printk(PFX "CPU %x offline: Remove Rx thread\n", cpu);
- bnx2fc_percpu_thread_destroy(cpu);
- break;
- default:
- break;
- }
- return NOTIFY_OK;
+static int bnx2fc_cpu_dead(unsigned int cpu)
+{
+ printk(PFX "CPU %x offline: Remove Rx thread\n", cpu);
+ bnx2fc_percpu_thread_destroy(cpu);
+ return 0;
}
static int bnx2fc_slave_configure(struct scsi_device *sdev)
@@ -2664,6 +2639,8 @@ static int bnx2fc_slave_configure(struct scsi_device *sdev)
return 0;
}
+static enum cpuhp_state bnx2fc_online_state;
+
/**
* bnx2fc_mod_init - module init entry point
*
@@ -2724,21 +2701,31 @@ static int __init bnx2fc_mod_init(void)
spin_lock_init(&p->fp_work_lock);
}
- cpu_notifier_register_begin();
+ get_online_cpus();
- for_each_online_cpu(cpu) {
+ for_each_online_cpu(cpu)
bnx2fc_percpu_thread_create(cpu);
- }
- /* Initialize per CPU interrupt thread */
- __register_hotcpu_notifier(&bnx2fc_cpu_notifier);
+ rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "scsi/bnx2fc:online",
+ bnx2fc_cpu_online, NULL);
+ if (rc < 0)
+ goto stop_threads;
+ bnx2fc_online_state = rc;
- cpu_notifier_register_done();
+ cpuhp_setup_state_nocalls(CPUHP_SCSI_BNX2FC_DEAD, "scsi/bnx2fc:dead",
+ NULL, bnx2fc_cpu_dead);
+ put_online_cpus();
cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb);
return 0;
+stop_threads:
+ for_each_online_cpu(cpu)
+ bnx2fc_percpu_thread_destroy(cpu);
+ put_online_cpus();
+ kthread_stop(l2_thread);
free_wq:
destroy_workqueue(bnx2fc_wq);
release_bt:
@@ -2797,16 +2784,16 @@ static void __exit bnx2fc_mod_exit(void)
if (l2_thread)
kthread_stop(l2_thread);
- cpu_notifier_register_begin();
-
+ get_online_cpus();
/* Destroy per cpu threads */
for_each_online_cpu(cpu) {
bnx2fc_percpu_thread_destroy(cpu);
}
- __unregister_hotcpu_notifier(&bnx2fc_cpu_notifier);
+ cpuhp_remove_state_nocalls(bnx2fc_online_state);
+ cpuhp_remove_state_nocalls(CPUHP_SCSI_BNX2FC_DEAD);
- cpu_notifier_register_done();
+ put_online_cpus();
destroy_workqueue(bnx2fc_wq);
/*
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index c8b410c24cf0..86afc002814c 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -70,14 +70,6 @@ u64 iscsi_error_mask = 0x00;
DEFINE_PER_CPU(struct bnx2i_percpu_s, bnx2i_percpu);
-static int bnx2i_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu);
-/* notification function for CPU hotplug events */
-static struct notifier_block bnx2i_cpu_notifier = {
- .notifier_call = bnx2i_cpu_callback,
-};
-
-
/**
* bnx2i_identify_device - identifies NetXtreme II device type
* @hba: Adapter structure pointer
@@ -461,41 +453,21 @@ static void bnx2i_percpu_thread_destroy(unsigned int cpu)
kthread_stop(thread);
}
-
-/**
- * bnx2i_cpu_callback - Handler for CPU hotplug events
- *
- * @nfb: The callback data block
- * @action: The event triggering the callback
- * @hcpu: The index of the CPU that the event is for
- *
- * This creates or destroys per-CPU data for iSCSI
- *
- * Returns NOTIFY_OK always.
- */
-static int bnx2i_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+static int bnx2i_cpu_online(unsigned int cpu)
{
- unsigned cpu = (unsigned long)hcpu;
+ pr_info("bnx2i: CPU %x online: Create Rx thread\n", cpu);
+ bnx2i_percpu_thread_create(cpu);
+ return 0;
+}
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- printk(KERN_INFO "bnx2i: CPU %x online: Create Rx thread\n",
- cpu);
- bnx2i_percpu_thread_create(cpu);
- break;
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- printk(KERN_INFO "CPU %x offline: Remove Rx thread\n", cpu);
- bnx2i_percpu_thread_destroy(cpu);
- break;
- default:
- break;
- }
- return NOTIFY_OK;
+static int bnx2i_cpu_dead(unsigned int cpu)
+{
+ pr_info("CPU %x offline: Remove Rx thread\n", cpu);
+ bnx2i_percpu_thread_destroy(cpu);
+ return 0;
}
+static enum cpuhp_state bnx2i_online_state;
/**
* bnx2i_mod_init - module init entry point
@@ -539,18 +511,28 @@ static int __init bnx2i_mod_init(void)
p->iothread = NULL;
}
- cpu_notifier_register_begin();
+ get_online_cpus();
for_each_online_cpu(cpu)
bnx2i_percpu_thread_create(cpu);
- /* Initialize per CPU interrupt thread */
- __register_hotcpu_notifier(&bnx2i_cpu_notifier);
-
- cpu_notifier_register_done();
+ err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "scsi/bnx2i:online",
+ bnx2i_cpu_online, NULL);
+ if (err < 0)
+ goto remove_threads;
+ bnx2i_online_state = err;
+ cpuhp_setup_state_nocalls(CPUHP_SCSI_BNX2I_DEAD, "scsi/bnx2i:dead",
+ NULL, bnx2i_cpu_dead);
+ put_online_cpus();
return 0;
+remove_threads:
+ for_each_online_cpu(cpu)
+ bnx2i_percpu_thread_destroy(cpu);
+ put_online_cpus();
+ cnic_unregister_driver(CNIC_ULP_ISCSI);
unreg_xport:
iscsi_unregister_transport(&bnx2i_iscsi_transport);
out:
@@ -587,14 +569,14 @@ static void __exit bnx2i_mod_exit(void)
}
mutex_unlock(&bnx2i_dev_lock);
- cpu_notifier_register_begin();
+ get_online_cpus();
for_each_online_cpu(cpu)
bnx2i_percpu_thread_destroy(cpu);
- __unregister_hotcpu_notifier(&bnx2i_cpu_notifier);
-
- cpu_notifier_register_done();
+ cpuhp_remove_state_nocalls(bnx2i_online_state);
+ cpuhp_remove_state_nocalls(CPUHP_SCSI_BNX2I_DEAD);
+ put_online_cpus();
iscsi_unregister_transport(&bnx2i_iscsi_transport);
cnic_unregister_driver(CNIC_ULP_ISCSI);
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 19ead8d17e55..5eda21d903e9 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1612,30 +1612,29 @@ static int qedi_percpu_io_thread(void *arg)
return 0;
}
-static void qedi_percpu_thread_create(unsigned int cpu)
+static int qedi_cpu_online(unsigned int cpu)
{
- struct qedi_percpu_s *p;
+ struct qedi_percpu_s *p = this_cpu_ptr(&qedi_percpu);
struct task_struct *thread;
- p = &per_cpu(qedi_percpu, cpu);
-
thread = kthread_create_on_node(qedi_percpu_io_thread, (void *)p,
cpu_to_node(cpu),
"qedi_thread/%d", cpu);
- if (likely(!IS_ERR(thread))) {
- kthread_bind(thread, cpu);
- p->iothread = thread;
- wake_up_process(thread);
- }
+ if (IS_ERR(thread))
+ return PTR_ERR(thread);
+
+ kthread_bind(thread, cpu);
+ p->iothread = thread;
+ wake_up_process(thread);
+ return 0;
}
-static void qedi_percpu_thread_destroy(unsigned int cpu)
+static int qedi_cpu_offline(unsigned int cpu)
{
- struct qedi_percpu_s *p;
- struct task_struct *thread;
+ struct qedi_percpu_s *p = this_cpu_ptr(&qedi_percpu);
struct qedi_work *work, *tmp;
+ struct task_struct *thread;
- p = &per_cpu(qedi_percpu, cpu);
spin_lock_bh(&p->p_work_lock);
thread = p->iothread;
p->iothread = NULL;
@@ -1650,35 +1649,9 @@ static void qedi_percpu_thread_destroy(unsigned int cpu)
spin_unlock_bh(&p->p_work_lock);
if (thread)
kthread_stop(thread);
+ return 0;
}
-static int qedi_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
-
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- QEDI_ERR(NULL, "CPU %d online.\n", cpu);
- qedi_percpu_thread_create(cpu);
- break;
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- QEDI_ERR(NULL, "CPU %d offline.\n", cpu);
- qedi_percpu_thread_destroy(cpu);
- break;
- default:
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block qedi_cpu_notifier = {
- .notifier_call = qedi_cpu_callback,
-};
-
void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu)
{
struct qed_ll2_params params;
@@ -2038,6 +2011,8 @@ static struct pci_device_id qedi_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, qedi_pci_tbl);
+static enum cpuhp_state qedi_cpuhp_state;
+
static struct pci_driver qedi_pci_driver = {
.name = QEDI_MODULE_NAME,
.id_table = qedi_pci_tbl,
@@ -2047,16 +2022,13 @@ static struct pci_driver qedi_pci_driver = {
static int __init qedi_init(void)
{
- int rc = 0;
- int ret;
struct qedi_percpu_s *p;
- unsigned int cpu = 0;
+ int cpu, rc = 0;
qedi_ops = qed_get_iscsi_ops();
if (!qedi_ops) {
QEDI_ERR(NULL, "Failed to get qed iSCSI operations\n");
- rc = -EINVAL;
- goto exit_qedi_init_0;
+ return -EINVAL;
}
#ifdef CONFIG_DEBUG_FS
@@ -2070,15 +2042,6 @@ static int __init qedi_init(void)
goto exit_qedi_init_1;
}
- register_hotcpu_notifier(&qedi_cpu_notifier);
-
- ret = pci_register_driver(&qedi_pci_driver);
- if (ret) {
- QEDI_ERR(NULL, "Failed to register driver\n");
- rc = -EINVAL;
- goto exit_qedi_init_2;
- }
-
for_each_possible_cpu(cpu) {
p = &per_cpu(qedi_percpu, cpu);
INIT_LIST_HEAD(&p->work_list);
@@ -2086,11 +2049,22 @@ static int __init qedi_init(void)
p->iothread = NULL;
}
- for_each_online_cpu(cpu)
- qedi_percpu_thread_create(cpu);
+ rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/qedi:online",
+ qedi_cpu_online, qedi_cpu_offline);
+ if (rc < 0)
+ goto exit_qedi_init_2;
+ qedi_cpuhp_state = rc;
- return rc;
+ rc = pci_register_driver(&qedi_pci_driver);
+ if (rc) {
+ QEDI_ERR(NULL, "Failed to register driver\n");
+ goto exit_qedi_hp;
+ }
+
+ return 0;
+exit_qedi_hp:
+ cpuhp_remove_state(qedi_cpuhp_state);
exit_qedi_init_2:
iscsi_unregister_transport(&qedi_iscsi_transport);
exit_qedi_init_1:
@@ -2098,19 +2072,13 @@ static int __init qedi_init(void)
qedi_dbg_exit();
#endif
qed_put_iscsi_ops();
-exit_qedi_init_0:
return rc;
}
static void __exit qedi_cleanup(void)
{
- unsigned int cpu = 0;
-
- for_each_online_cpu(cpu)
- qedi_percpu_thread_destroy(cpu);
-
pci_unregister_driver(&qedi_pci_driver);
- unregister_hotcpu_notifier(&qedi_cpu_notifier);
+ cpuhp_remove_state(qedi_cpuhp_state);
iscsi_unregister_transport(&qedi_iscsi_transport);
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
index 6b9cf06e8df2..427e2198bb9e 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
@@ -967,48 +967,38 @@ cfs_cpt_table_create_pattern(char *pattern)
}
#ifdef CONFIG_HOTPLUG_CPU
-static int
-cfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
- bool warn;
-
- switch (action) {
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- spin_lock(&cpt_data.cpt_lock);
- cpt_data.cpt_version++;
- spin_unlock(&cpt_data.cpt_lock);
- /* Fall through */
- default:
- if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) {
- CDEBUG(D_INFO, "CPU changed [cpu %u action %lx]\n",
- cpu, action);
- break;
- }
+static enum cpuhp_state lustre_cpu_online;
- mutex_lock(&cpt_data.cpt_mutex);
- /* if all HTs in a core are offline, it may break affinity */
- cpumask_copy(cpt_data.cpt_cpumask,
- topology_sibling_cpumask(cpu));
- warn = cpumask_any_and(cpt_data.cpt_cpumask,
- cpu_online_mask) >= nr_cpu_ids;
- mutex_unlock(&cpt_data.cpt_mutex);
- CDEBUG(warn ? D_WARNING : D_INFO,
- "Lustre: can't support CPU plug-out well now, performance and stability could be impacted [CPU %u action: %lx]\n",
- cpu, action);
- }
+static void cfs_cpu_incr_cpt_version(void)
+{
+ spin_lock(&cpt_data.cpt_lock);
+ cpt_data.cpt_version++;
+ spin_unlock(&cpt_data.cpt_lock);
+}
- return NOTIFY_OK;
+static int cfs_cpu_online(unsigned int cpu)
+{
+ cfs_cpu_incr_cpt_version();
+ return 0;
}
-static struct notifier_block cfs_cpu_notifier = {
- .notifier_call = cfs_cpu_notify,
- .priority = 0
-};
+static int cfs_cpu_dead(unsigned int cpu)
+{
+ bool warn;
+
+ cfs_cpu_incr_cpt_version();
+ mutex_lock(&cpt_data.cpt_mutex);
+ /* if all HTs in a core are offline, it may break affinity */
+ cpumask_copy(cpt_data.cpt_cpumask, topology_sibling_cpumask(cpu));
+ warn = cpumask_any_and(cpt_data.cpt_cpumask,
+ cpu_online_mask) >= nr_cpu_ids;
+ mutex_unlock(&cpt_data.cpt_mutex);
+ CDEBUG(warn ? D_WARNING : D_INFO,
+ "Lustre: can't support CPU plug-out well now, performance and stability could be impacted [CPU %u]\n",
+ cpu);
+ return 0;
+}
#endif
void
@@ -1018,7 +1008,9 @@ cfs_cpu_fini(void)
cfs_cpt_table_free(cfs_cpt_table);
#ifdef CONFIG_HOTPLUG_CPU
- unregister_hotcpu_notifier(&cfs_cpu_notifier);
+ if (lustre_cpu_online > 0)
+ cpuhp_remove_state_nocalls(lustre_cpu_online);
+ cpuhp_remove_state_nocalls(CPUHP_LUSTRE_CFS_DEAD);
#endif
if (cpt_data.cpt_cpumask)
LIBCFS_FREE(cpt_data.cpt_cpumask, cpumask_size());
@@ -1027,6 +1019,8 @@ cfs_cpu_fini(void)
int
cfs_cpu_init(void)
{
+ int ret = 0;
+
LASSERT(!cfs_cpt_table);
memset(&cpt_data, 0, sizeof(cpt_data));
@@ -1041,8 +1035,19 @@ cfs_cpu_init(void)
mutex_init(&cpt_data.cpt_mutex);
#ifdef CONFIG_HOTPLUG_CPU
- register_hotcpu_notifier(&cfs_cpu_notifier);
+ ret = cpuhp_setup_state_nocalls(CPUHP_LUSTRE_CFS_DEAD,
+ "staging/lustre/cfe:dead", NULL,
+ cfs_cpu_dead);
+ if (ret < 0)
+ goto failed;
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "staging/lustre/cfe:online",
+ cfs_cpu_online, NULL);
+ if (ret < 0)
+ goto failed;
+ lustre_cpu_online = ret;
#endif
+ ret = -EINVAL;
if (*cpu_pattern) {
cfs_cpt_table = cfs_cpt_table_create_pattern(cpu_pattern);
@@ -1075,7 +1080,7 @@ cfs_cpu_init(void)
failed:
cfs_cpu_fini();
- return -1;
+ return ret;
}
#endif
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
index 7ef27c6ed72f..c03f9c86c7e3 100644
--- a/drivers/xen/events/events_fifo.c
+++ b/drivers/xen/events/events_fifo.c
@@ -445,7 +445,7 @@ int __init xen_evtchn_fifo_init(void)
evtchn_ops = &evtchn_ops_fifo;
cpuhp_setup_state_nocalls(CPUHP_XEN_EVTCHN_PREPARE,
- "CPUHP_XEN_EVTCHN_PREPARE",
+ "xen/evtchn:prepare",
xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead);
out:
put_cpu();
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 09807c2ce328..21f9c74496e7 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -57,9 +57,6 @@ struct notifier_block;
#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
-#define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */
-#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
-#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
* lock is dropped */
@@ -80,80 +77,14 @@ struct notifier_block;
#ifdef CONFIG_SMP
extern bool cpuhp_tasks_frozen;
-/* Need to know about CPUs going up/down? */
-#if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
-#define cpu_notifier(fn, pri) { \
- static struct notifier_block fn##_nb = \
- { .notifier_call = fn, .priority = pri }; \
- register_cpu_notifier(&fn##_nb); \
-}
-
-#define __cpu_notifier(fn, pri) { \
- static struct notifier_block fn##_nb = \
- { .notifier_call = fn, .priority = pri }; \
- __register_cpu_notifier(&fn##_nb); \
-}
-
-extern int register_cpu_notifier(struct notifier_block *nb);
-extern int __register_cpu_notifier(struct notifier_block *nb);
-extern void unregister_cpu_notifier(struct notifier_block *nb);
-extern void __unregister_cpu_notifier(struct notifier_block *nb);
-
-#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
-#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
-#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
-
-static inline int register_cpu_notifier(struct notifier_block *nb)
-{
- return 0;
-}
-
-static inline int __register_cpu_notifier(struct notifier_block *nb)
-{
- return 0;
-}
-
-static inline void unregister_cpu_notifier(struct notifier_block *nb)
-{
-}
-
-static inline void __unregister_cpu_notifier(struct notifier_block *nb)
-{
-}
-#endif
-
int cpu_up(unsigned int cpu);
void notify_cpu_starting(unsigned int cpu);
extern void cpu_maps_update_begin(void);
extern void cpu_maps_update_done(void);
-#define cpu_notifier_register_begin cpu_maps_update_begin
-#define cpu_notifier_register_done cpu_maps_update_done
-
#else /* CONFIG_SMP */
#define cpuhp_tasks_frozen 0
-#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
-#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
-
-static inline int register_cpu_notifier(struct notifier_block *nb)
-{
- return 0;
-}
-
-static inline int __register_cpu_notifier(struct notifier_block *nb)
-{
- return 0;
-}
-
-static inline void unregister_cpu_notifier(struct notifier_block *nb)
-{
-}
-
-static inline void __unregister_cpu_notifier(struct notifier_block *nb)
-{
-}
-
static inline void cpu_maps_update_begin(void)
{
}
@@ -162,14 +93,6 @@ static inline void cpu_maps_update_done(void)
{
}
-static inline void cpu_notifier_register_begin(void)
-{
-}
-
-static inline void cpu_notifier_register_done(void)
-{
-}
-
#endif /* CONFIG_SMP */
extern struct bus_type cpu_subsys;
@@ -182,12 +105,6 @@ extern void get_online_cpus(void);
extern void put_online_cpus(void);
extern void cpu_hotplug_disable(void);
extern void cpu_hotplug_enable(void);
-#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
-#define __hotcpu_notifier(fn, pri) __cpu_notifier(fn, pri)
-#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
-#define __register_hotcpu_notifier(nb) __register_cpu_notifier(nb)
-#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
-#define __unregister_hotcpu_notifier(nb) __unregister_cpu_notifier(nb)
void clear_tasks_mm_cpumask(int cpu);
int cpu_down(unsigned int cpu);
@@ -199,13 +116,6 @@ static inline void cpu_hotplug_done(void) {}
#define put_online_cpus() do { } while (0)
#define cpu_hotplug_disable() do { } while (0)
#define cpu_hotplug_enable() do { } while (0)
-#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
-#define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
-/* These aren't inline functions due to a GCC bug. */
-#define register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
-#define __register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
-#define unregister_hotcpu_notifier(nb) ({ (void)(nb); })
-#define __unregister_hotcpu_notifier(nb) ({ (void)(nb); })
#endif /* CONFIG_HOTPLUG_CPU */
#ifdef CONFIG_PM_SLEEP_SMP
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 2ab7bf53d529..20bfefbe7594 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -41,6 +41,9 @@ enum cpuhp_state {
CPUHP_NET_DEV_DEAD,
CPUHP_PCI_XGENE_DEAD,
CPUHP_IOMMU_INTEL_DEAD,
+ CPUHP_LUSTRE_CFS_DEAD,
+ CPUHP_SCSI_BNX2FC_DEAD,
+ CPUHP_SCSI_BNX2I_DEAD,
CPUHP_WORKQUEUE_PREP,
CPUHP_POWER_NUMA_PREPARE,
CPUHP_HRTIMERS_PREPARE,
@@ -56,7 +59,6 @@ enum cpuhp_state {
CPUHP_POWERPC_MMU_CTX_PREPARE,
CPUHP_XEN_PREPARE,
CPUHP_XEN_EVTCHN_PREPARE,
- CPUHP_NOTIFY_PREPARE,
CPUHP_ARM_SHMOBILE_SCU_PREPARE,
CPUHP_SH_SH3X_PREPARE,
CPUHP_BLK_MQ_PREPARE,
@@ -71,7 +73,6 @@ enum cpuhp_state {
CPUHP_KVM_PPC_BOOK3S_PREPARE,
CPUHP_ZCOMP_PREPARE,
CPUHP_TIMERS_DEAD,
- CPUHP_NOTF_ERR_INJ_PREPARE,
CPUHP_MIPS_SOC_PREPARE,
CPUHP_BRINGUP_CPU,
CPUHP_AP_IDLE_DEAD,
@@ -79,10 +80,8 @@ enum cpuhp_state {
CPUHP_AP_SCHED_STARTING,
CPUHP_AP_RCUTREE_DYING,
CPUHP_AP_IRQ_GIC_STARTING,
- CPUHP_AP_IRQ_GICV3_STARTING,
CPUHP_AP_IRQ_HIP04_STARTING,
CPUHP_AP_IRQ_ARMADA_XP_STARTING,
- CPUHP_AP_IRQ_ARMADA_CASC_STARTING,
CPUHP_AP_IRQ_BCM2836_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY,
CPUHP_AP_PERF_X86_UNCORE_STARTING,
@@ -118,7 +117,6 @@ enum cpuhp_state {
CPUHP_AP_DUMMY_TIMER_STARTING,
CPUHP_AP_ARM_XEN_STARTING,
CPUHP_AP_ARM_CORESIGHT_STARTING,
- CPUHP_AP_ARM_CORESIGHT4_STARTING,
CPUHP_AP_ARM64_ISNDEP_STARTING,
CPUHP_AP_SMPCFD_DYING,
CPUHP_AP_X86_TBOOT_DYING,
@@ -142,7 +140,6 @@ enum cpuhp_state {
CPUHP_AP_PERF_ARM_L2X0_ONLINE,
CPUHP_AP_WORKQUEUE_ONLINE,
CPUHP_AP_RCUTREE_ONLINE,
- CPUHP_AP_NOTIFY_ONLINE,
CPUHP_AP_ONLINE_DYN,
CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
CPUHP_AP_X86_HPET_ONLINE,
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 5339aca811d2..042fd7e8e030 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -183,23 +183,16 @@ EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
/*
* The following two APIs (cpu_maps_update_begin/done) must be used when
* attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
- * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
- * hotplug callback (un)registration performed using __register_cpu_notifier()
- * or __unregister_cpu_notifier().
*/
void cpu_maps_update_begin(void)
{
mutex_lock(&cpu_add_remove_lock);
}
-EXPORT_SYMBOL(cpu_notifier_register_begin);
void cpu_maps_update_done(void)
{
mutex_unlock(&cpu_add_remove_lock);
}
-EXPORT_SYMBOL(cpu_notifier_register_done);
-
-static RAW_NOTIFIER_HEAD(cpu_chain);
/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
* Should always be manipulated under cpu_add_remove_lock
@@ -349,66 +342,7 @@ void cpu_hotplug_enable(void)
EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
#endif /* CONFIG_HOTPLUG_CPU */
-/* Need to know about CPUs going up/down? */
-int register_cpu_notifier(struct notifier_block *nb)
-{
- int ret;
- cpu_maps_update_begin();
- ret = raw_notifier_chain_register(&cpu_chain, nb);
- cpu_maps_update_done();
- return ret;
-}
-
-int __register_cpu_notifier(struct notifier_block *nb)
-{
- return raw_notifier_chain_register(&cpu_chain, nb);
-}
-
-static int __cpu_notify(unsigned long val, unsigned int cpu, int nr_to_call,
- int *nr_calls)
-{
- unsigned long mod = cpuhp_tasks_frozen ? CPU_TASKS_FROZEN : 0;
- void *hcpu = (void *)(long)cpu;
-
- int ret;
-
- ret = __raw_notifier_call_chain(&cpu_chain, val | mod, hcpu, nr_to_call,
- nr_calls);
-
- return notifier_to_errno(ret);
-}
-
-static int cpu_notify(unsigned long val, unsigned int cpu)
-{
- return __cpu_notify(val, cpu, -1, NULL);
-}
-
-static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
-{
- BUG_ON(cpu_notify(val, cpu));
-}
-
/* Notifier wrappers for transitioning to state machine */
-static int notify_prepare(unsigned int cpu)
-{
- int nr_calls = 0;
- int ret;
-
- ret = __cpu_notify(CPU_UP_PREPARE, cpu, -1, &nr_calls);
- if (ret) {
- nr_calls--;
- printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
- __func__, cpu);
- __cpu_notify(CPU_UP_CANCELED, cpu, nr_calls, NULL);
- }
- return ret;
-}
-
-static int notify_online(unsigned int cpu)
-{
- cpu_notify(CPU_ONLINE, cpu);
- return 0;
-}
static int bringup_wait_for_ap(unsigned int cpu)
{
@@ -433,10 +367,8 @@ static int bringup_cpu(unsigned int cpu)
/* Arch-specific enabling code. */
ret = __cpu_up(cpu, idle);
irq_unlock_sparse();
- if (ret) {
- cpu_notify(CPU_UP_CANCELED, cpu);
+ if (ret)
return ret;
- }
ret = bringup_wait_for_ap(cpu);
BUG_ON(!cpu_online(cpu));
return ret;
@@ -565,11 +497,6 @@ static void cpuhp_thread_fun(unsigned int cpu)
BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
undo_cpu_down(cpu, st);
- /*
- * This is a momentary workaround to keep the notifier users
- * happy. Will go away once we got rid of the notifiers.
- */
- cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
st->rollback = false;
} else {
/* Cannot happen .... */
@@ -659,22 +586,6 @@ void __init cpuhp_threads_init(void)
kthread_unpark(this_cpu_read(cpuhp_state.thread));
}
-EXPORT_SYMBOL(register_cpu_notifier);
-EXPORT_SYMBOL(__register_cpu_notifier);
-void unregister_cpu_notifier(struct notifier_block *nb)
-{
- cpu_maps_update_begin();
- raw_notifier_chain_unregister(&cpu_chain, nb);
- cpu_maps_update_done();
-}
-EXPORT_SYMBOL(unregister_cpu_notifier);
-
-void __unregister_cpu_notifier(struct notifier_block *nb)
-{
- raw_notifier_chain_unregister(&cpu_chain, nb);
-}
-EXPORT_SYMBOL(__unregister_cpu_notifier);
-
#ifdef CONFIG_HOTPLUG_CPU
/**
* clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
@@ -741,20 +652,6 @@ static inline void check_for_tasks(int dead_cpu)
read_unlock(&tasklist_lock);
}
-static int notify_down_prepare(unsigned int cpu)
-{
- int err, nr_calls = 0;
-
- err = __cpu_notify(CPU_DOWN_PREPARE, cpu, -1, &nr_calls);
- if (err) {
- nr_calls--;
- __cpu_notify(CPU_DOWN_FAILED, cpu, nr_calls, NULL);
- pr_warn("%s: attempt to take down CPU %u failed\n",
- __func__, cpu);
- }
- return err;
-}
-
/* Take this CPU down. */
static int take_cpu_down(void *_param)
{
@@ -833,13 +730,6 @@ static int takedown_cpu(unsigned int cpu)
return 0;
}
-static int notify_dead(unsigned int cpu)
-{
- cpu_notify_nofail(CPU_DEAD, cpu);
- check_for_tasks(cpu);
- return 0;
-}
-
static void cpuhp_complete_idle_dead(void *arg)
{
struct cpuhp_cpu_state *st = arg;
@@ -863,9 +753,7 @@ void cpuhp_report_idle_dead(void)
}
#else
-#define notify_down_prepare NULL
#define takedown_cpu NULL
-#define notify_dead NULL
#endif
#ifdef CONFIG_HOTPLUG_CPU
@@ -924,9 +812,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
out:
cpu_hotplug_done();
- /* This post dead nonsense must die */
- if (!ret && hasdied)
- cpu_notify_nofail(CPU_POST_DEAD, cpu);
return ret;
}
@@ -1292,17 +1177,6 @@ static struct cpuhp_step cpuhp_bp_states[] = {
.teardown.single = rcutree_dead_cpu,
},
/*
- * Preparatory and dead notifiers. Will be replaced once the notifiers
- * are converted to states.
- */
- [CPUHP_NOTIFY_PREPARE] = {
- .name = "notify:prepare",
- .startup.single = notify_prepare,
- .teardown.single = notify_dead,
- .skip_onerr = true,
- .cant_stop = true,
- },
- /*
* On the tear-down path, timers_dead_cpu() must be invoked
* before blk_mq_queue_reinit_notify() from notify_dead(),
* otherwise a RCU stall occurs.
@@ -1391,17 +1265,6 @@ static struct cpuhp_step cpuhp_ap_states[] = {
.startup.single = rcutree_online_cpu,
.teardown.single = rcutree_offline_cpu,
},
-
- /*
- * Online/down_prepare notifiers. Will be removed once the notifiers
- * are converted to states.
- */
- [CPUHP_AP_NOTIFY_ONLINE] = {
- .name = "notify:online",
- .startup.single = notify_online,
- .teardown.single = notify_down_prepare,
- .skip_onerr = true,
- },
#endif
/*
* The dynamically registered state space is here
@@ -1432,23 +1295,53 @@ static int cpuhp_cb_check(enum cpuhp_state state)
return 0;
}
-static void cpuhp_store_callbacks(enum cpuhp_state state,
- const char *name,
- int (*startup)(unsigned int cpu),
- int (*teardown)(unsigned int cpu),
- bool multi_instance)
+/*
+ * Returns a free for dynamic slot assignment of the Online state. The states
+ * are protected by the cpuhp_slot_states mutex and an empty slot is identified
+ * by having no name assigned.
+ */
+static int cpuhp_reserve_state(enum cpuhp_state state)
+{
+ enum cpuhp_state i;
+
+ for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) {
+ if (!cpuhp_ap_states[i].name)
+ return i;
+ }
+ WARN(1, "No more dynamic states available for CPU hotplug\n");
+ return -ENOSPC;
+}
+
+static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
+ int (*startup)(unsigned int cpu),
+ int (*teardown)(unsigned int cpu),
+ bool multi_instance)
{
/* (Un)Install the callbacks for further cpu hotplug operations */
struct cpuhp_step *sp;
+ int ret = 0;
mutex_lock(&cpuhp_state_mutex);
+
+ if (state == CPUHP_AP_ONLINE_DYN) {
+ ret = cpuhp_reserve_state(state);
+ if (ret < 0)
+ goto out;
+ state = ret;
+ }
sp = cpuhp_get_step(state);
+ if (name && sp->name) {
+ ret = -EBUSY;
+ goto out;
+ }
sp->startup.single = startup;
sp->teardown.single = teardown;
sp->name = name;
sp->multi_instance = multi_instance;
INIT_HLIST_HEAD(&sp->list);
+out:
mutex_unlock(&cpuhp_state_mutex);
+ return ret;
}
static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
@@ -1509,29 +1402,6 @@ static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
}
}
-/*
- * Returns a free for dynamic slot assignment of the Online state. The states
- * are protected by the cpuhp_slot_states mutex and an empty slot is identified
- * by having no name assigned.
- */
-static int cpuhp_reserve_state(enum cpuhp_state state)
-{
- enum cpuhp_state i;
-
- mutex_lock(&cpuhp_state_mutex);
- for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) {
- if (cpuhp_ap_states[i].name)
- continue;
-
- cpuhp_ap_states[i].name = "Reserved";
- mutex_unlock(&cpuhp_state_mutex);
- return i;
- }
- mutex_unlock(&cpuhp_state_mutex);
- WARN(1, "No more dynamic states available for CPU hotplug\n");
- return -ENOSPC;
-}
-
int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
bool invoke)
{
@@ -1580,11 +1450,13 @@ EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
/**
* __cpuhp_setup_state - Setup the callbacks for an hotplug machine state
- * @state: The state to setup
- * @invoke: If true, the startup function is invoked for cpus where
- * cpu state >= @state
- * @startup: startup callback function
- * @teardown: teardown callback function
+ * @state: The state to setup
+ * @invoke: If true, the startup function is invoked for cpus where
+ * cpu state >= @state
+ * @startup: startup callback function
+ * @teardown: teardown callback function
+ * @multi_instance: State is set up for multiple instances which get
+ * added afterwards.
*
* Returns:
* On success:
@@ -1599,25 +1471,16 @@ int __cpuhp_setup_state(enum cpuhp_state state,
bool multi_instance)
{
int cpu, ret = 0;
- int dyn_state = 0;
if (cpuhp_cb_check(state) || !name)
return -EINVAL;
get_online_cpus();
- /* currently assignments for the ONLINE state are possible */
- if (state == CPUHP_AP_ONLINE_DYN) {
- dyn_state = 1;
- ret = cpuhp_reserve_state(state);
- if (ret < 0)
- goto out;
- state = ret;
- }
+ ret = cpuhp_store_callbacks(state, name, startup, teardown,
+ multi_instance);
- cpuhp_store_callbacks(state, name, startup, teardown, multi_instance);
-
- if (!invoke || !startup)
+ if (ret || !invoke || !startup)
goto out;
/*
@@ -1641,7 +1504,11 @@ int __cpuhp_setup_state(enum cpuhp_state state,
}
out:
put_online_cpus();
- if (!ret && dyn_state)
+ /*
+ * If the requested state is CPUHP_AP_ONLINE_DYN, return the
+ * dynamically allocated state in case of success.
+ */
+ if (!ret && state == CPUHP_AP_ONLINE_DYN)
return state;
return ret;
}
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index cb66a4648840..b06848a104e6 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1538,30 +1538,6 @@ config NOTIFIER_ERROR_INJECTION
Say N if unsure.
-config CPU_NOTIFIER_ERROR_INJECT
- tristate "CPU notifier error injection module"
- depends on HOTPLUG_CPU && NOTIFIER_ERROR_INJECTION
- help
- This option provides a kernel module that can be used to test
- the error handling of the cpu notifiers by injecting artificial
- errors to CPU notifier chain callbacks. It is controlled through
- debugfs interface under /sys/kernel/debug/notifier-error-inject/cpu
-
- If the notifier call chain should be failed with some events
- notified, write the error code to "actions/<notifier event>/error".
-
- Example: Inject CPU offline error (-1 == -EPERM)
-
- # cd /sys/kernel/debug/notifier-error-inject/cpu
- # echo -1 > actions/CPU_DOWN_PREPARE/error
- # echo 0 > /sys/devices/system/cpu/cpu1/online
- bash: echo: write error: Operation not permitted
-
- To compile this code as a module, choose M here: the module will
- be called cpu-notifier-error-inject.
-
- If unsure, say N.
-
config PM_NOTIFIER_ERROR_INJECT
tristate "PM notifier error injection module"
depends on PM && NOTIFIER_ERROR_INJECTION
diff --git a/lib/Makefile b/lib/Makefile
index 50144a3aeebd..bc4073a8cd08 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -128,7 +128,6 @@ obj-$(CONFIG_SWIOTLB) += swiotlb.o
obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o iommu-common.o
obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
obj-$(CONFIG_NOTIFIER_ERROR_INJECTION) += notifier-error-inject.o
-obj-$(CONFIG_CPU_NOTIFIER_ERROR_INJECT) += cpu-notifier-error-inject.o
obj-$(CONFIG_PM_NOTIFIER_ERROR_INJECT) += pm-notifier-error-inject.o
obj-$(CONFIG_NETDEV_NOTIFIER_ERROR_INJECT) += netdev-notifier-error-inject.o
obj-$(CONFIG_MEMORY_NOTIFIER_ERROR_INJECT) += memory-notifier-error-inject.o
diff --git a/lib/cpu-notifier-error-inject.c b/lib/cpu-notifier-error-inject.c
deleted file mode 100644
index 0e2c9a1e958a..000000000000
--- a/lib/cpu-notifier-error-inject.c
+++ /dev/null
@@ -1,84 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/cpu.h>
-
-#include "notifier-error-inject.h"
-
-static int priority;
-module_param(priority, int, 0);
-MODULE_PARM_DESC(priority, "specify cpu notifier priority");
-
-#define UP_PREPARE 0
-#define UP_PREPARE_FROZEN 0
-#define DOWN_PREPARE 0
-#define DOWN_PREPARE_FROZEN 0
-
-static struct notifier_err_inject cpu_notifier_err_inject = {
- .actions = {
- { NOTIFIER_ERR_INJECT_ACTION(UP_PREPARE) },
- { NOTIFIER_ERR_INJECT_ACTION(UP_PREPARE_FROZEN) },
- { NOTIFIER_ERR_INJECT_ACTION(DOWN_PREPARE) },
- { NOTIFIER_ERR_INJECT_ACTION(DOWN_PREPARE_FROZEN) },
- {}
- }
-};
-
-static int notf_err_handle(struct notifier_err_inject_action *action)
-{
- int ret;
-
- ret = action->error;
- if (ret)
- pr_info("Injecting error (%d) to %s\n", ret, action->name);
- return ret;
-}
-
-static int notf_err_inj_up_prepare(unsigned int cpu)
-{
- if (!cpuhp_tasks_frozen)
- return notf_err_handle(&cpu_notifier_err_inject.actions[0]);
- else
- return notf_err_handle(&cpu_notifier_err_inject.actions[1]);
-}
-
-static int notf_err_inj_dead(unsigned int cpu)
-{
- if (!cpuhp_tasks_frozen)
- return notf_err_handle(&cpu_notifier_err_inject.actions[2]);
- else
- return notf_err_handle(&cpu_notifier_err_inject.actions[3]);
-}
-
-static struct dentry *dir;
-
-static int err_inject_init(void)
-{
- int err;
-
- dir = notifier_err_inject_init("cpu", notifier_err_inject_dir,
- &cpu_notifier_err_inject, priority);
- if (IS_ERR(dir))
- return PTR_ERR(dir);
-
- err = cpuhp_setup_state_nocalls(CPUHP_NOTF_ERR_INJ_PREPARE,
- "cpu-err-notif:prepare",
- notf_err_inj_up_prepare,
- notf_err_inj_dead);
- if (err)
- debugfs_remove_recursive(dir);
-
- return err;
-}
-
-static void err_inject_exit(void)
-{
- cpuhp_remove_state_nocalls(CPUHP_NOTF_ERR_INJ_PREPARE);
- debugfs_remove_recursive(dir);
-}
-
-module_init(err_inject_init);
-module_exit(err_inject_exit);
-
-MODULE_DESCRIPTION("CPU notifier error injection module");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Akinobu Mita <[email protected]>");
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index ae95fc0e3214..5b4f60d43314 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -456,7 +456,7 @@ int kvm_timer_hyp_init(void)
kvm_info("virtual timer IRQ%d\n", host_vtimer_irq);
cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING,
- "AP_KVM_ARM_TIMER_STARTING", kvm_timer_starting_cpu,
+ "kvm/arm/timer:starting", kvm_timer_starting_cpu,
kvm_timer_dying_cpu);
return err;
}
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index 8cebfbc19e90..5114391b7e5a 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -428,7 +428,7 @@ int kvm_vgic_hyp_init(void)
}
ret = cpuhp_setup_state(CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING,
- "AP_KVM_ARM_VGIC_INIT_STARTING",
+ "kvm/arm/vgic:starting",
vgic_init_cpu_starting, vgic_init_cpu_dying);
if (ret) {
kvm_err("Cannot register vgic CPU notifier\n");
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index de102cae7125..ec298b7ca556 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -3944,7 +3944,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
goto out_free_1;
}
- r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "AP_KVM_STARTING",
+ r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "kvm/cpu:starting",
kvm_starting_cpu, kvm_dying_cpu);
if (r)
goto out_free_2;
On 2016.12.25 at 14:39 +0100, Thomas Gleixner wrote:
> Linus,
>
> please pull the latest smp-urgent-for-linus git tree from:
>
> git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git smp-urgent-for-linus
>
> Thomas Gleixner (11):
> cpu/hotplug: Prevent overwriting of callbacks
The following commit:
commit dc280d93623927570da279e99393879dbbab39e7
Author: Thomas Gleixner <[email protected]>
Date: Wed Dec 21 20:19:49 2016 +0100
cpu/hotplug: Prevent overwriting of callbacks
results in an early OOPs during boot on my AMD machine.
I haven't wrote down the entire backtrace, but basically things start to
go wrong in mce_threshold_create_device() from
arch/x86/kernel/cpu/mcheck/mce_amd.c.
# CONFIG_HOTPLUG_CPU is not set
Reverting the commit "fixes" the issue for me.
--
Markus
On 2016.12.26 at 08:45 +0100, Markus Trippelsdorf wrote:
> On 2016.12.25 at 14:39 +0100, Thomas Gleixner wrote:
> > Linus,
> >
> > please pull the latest smp-urgent-for-linus git tree from:
> >
> > git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git smp-urgent-for-linus
> >
> > Thomas Gleixner (11):
> > cpu/hotplug: Prevent overwriting of callbacks
>
> The following commit:
>
> commit dc280d93623927570da279e99393879dbbab39e7
> Author: Thomas Gleixner <[email protected]>
> Date: Wed Dec 21 20:19:49 2016 +0100
>
> cpu/hotplug: Prevent overwriting of callbacks
>
> results in an early OOPs during boot on my AMD machine.
> I haven't wrote down the entire backtrace, but basically things start to
> go wrong in mce_threshold_create_device() from
> arch/x86/kernel/cpu/mcheck/mce_amd.c.
>
> # CONFIG_HOTPLUG_CPU is not set
>
> Reverting the commit "fixes" the issue for me.
CCing Sebastian and Borislav.
--
Markus
On 2016.12.26 at 12:06 +0100, Markus Trippelsdorf wrote:
> On 2016.12.26 at 08:45 +0100, Markus Trippelsdorf wrote:
> > On 2016.12.25 at 14:39 +0100, Thomas Gleixner wrote:
> > > Linus,
> > >
> > > please pull the latest smp-urgent-for-linus git tree from:
> > >
> > > git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git smp-urgent-for-linus
> > >
> > > Thomas Gleixner (11):
> > > cpu/hotplug: Prevent overwriting of callbacks
> >
> > The following commit:
> >
> > commit dc280d93623927570da279e99393879dbbab39e7
> > Author: Thomas Gleixner <[email protected]>
> > Date: Wed Dec 21 20:19:49 2016 +0100
> >
> > cpu/hotplug: Prevent overwriting of callbacks
> >
> > results in an early OOPs during boot on my AMD machine.
> > I haven't wrote down the entire backtrace, but basically things start to
> > go wrong in mce_threshold_create_device() from
> > arch/x86/kernel/cpu/mcheck/mce_amd.c.
> >
> > # CONFIG_HOTPLUG_CPU is not set
> >
> > Reverting the commit "fixes" the issue for me.
>
> CCing Sebastian and Borislav.
BUG: unable to handle kernel NULL pointer dereference at 000000000000004c
RIP: kobject_get at lib/kobject.c:594
(inlined by) kobject_add_internal at lib/kobject.c:214
? kobj_to_dev at include/linux/device.h:968 (discriminator 1)
(inlined by) get_device at drivers/base/core.c:1796 (discriminator 1)
? kobject_add at lib/kobject.c:415
? kobject_create_and_add at lib/kobject.c:753
? threshold_create_bank at arch/x86/kernel/cpu/mcheck/mce_amd.c:1212
(inlined by) mce_threshold_create_device at arch/x86/kernel/cpu/mcheck/mce_amd.c:1348
The comment in arch/x86/kernel/cpu/mcheck/mce_amd.c says:
1384 * mcheck_init_device should be inited before threshold_init_device to
1385 * initialize mce_device, otherwise a NULL ptr dereference will cause panic.
--
Markus
On 12/26/2016 10:45 AM, Markus Trippelsdorf wrote:
> On 2016.12.26 at 12:06 +0100, Markus Trippelsdorf wrote:
>> On 2016.12.26 at 08:45 +0100, Markus Trippelsdorf wrote:
>>> On 2016.12.25 at 14:39 +0100, Thomas Gleixner wrote:
>>>> Linus,
>>>>
>>>> please pull the latest smp-urgent-for-linus git tree from:
>>>>
>>>> git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git smp-urgent-for-linus
>>>>
>>>> Thomas Gleixner (11):
>>>> cpu/hotplug: Prevent overwriting of callbacks
>>> The following commit:
>>>
>>> commit dc280d93623927570da279e99393879dbbab39e7
>>> Author: Thomas Gleixner <[email protected]>
>>> Date: Wed Dec 21 20:19:49 2016 +0100
>>>
>>> cpu/hotplug: Prevent overwriting of callbacks
>>>
>>> results in an early OOPs during boot on my AMD machine.
>>> I haven't wrote down the entire backtrace, but basically things start to
>>> go wrong in mce_threshold_create_device() from
>>> arch/x86/kernel/cpu/mcheck/mce_amd.c.
>>>
>>> # CONFIG_HOTPLUG_CPU is not set
>>>
>>> Reverting the commit "fixes" the issue for me.
>> CCing Sebastian and Borislav.
> BUG: unable to handle kernel NULL pointer dereference at 000000000000004c
>
> RIP: kobject_get at lib/kobject.c:594
> (inlined by) kobject_add_internal at lib/kobject.c:214
>
> ? kobj_to_dev at include/linux/device.h:968 (discriminator 1)
> (inlined by) get_device at drivers/base/core.c:1796 (discriminator 1)
>
> ? kobject_add at lib/kobject.c:415
>
> ? kobject_create_and_add at lib/kobject.c:753
>
> ? threshold_create_bank at arch/x86/kernel/cpu/mcheck/mce_amd.c:1212
> (inlined by) mce_threshold_create_device at arch/x86/kernel/cpu/mcheck/mce_amd.c:1348
>
> The comment in arch/x86/kernel/cpu/mcheck/mce_amd.c says:
>
> 1384 * mcheck_init_device should be inited before threshold_init_device to
> 1385 * initialize mce_device, otherwise a NULL ptr dereference will cause panic.
My nightly test hit this as well. AMD only, Intel passed. I haven't
verified whether commit that Markus implicated is the one that caused
this but it's the same BUG signature (but possibly slightly different stack)
[ 1.554351] smpboot: CPU0: AMD Engineering Sample (family: 0x10,
model: 0x4, stepping: 0x1)
...
[ 33.579949] BUG: unable to handle kernel NULL pointer dereference at
000000000000004c
[ 33.588018] IP: kobject_get+0x11/0x80
[ 33.591787] PGD 0
[ 33.591788]
[ 33.595386] Oops: 0000 [#1] SMP
[ 33.598620] Modules linked in:
[ 33.601765] CPU: 1 PID: 1 Comm: swapper/0 Not tainted
4.10.0-rc1upstream #1
[ 33.608936] Hardware name: To Be Filled By O.E.M. To Be Filled By
O.E.M./To be filled by O.E.M., BIOS 080014 07/18/200
8
[ 33.620136] task: ffff880216eb6d40 task.stack: ffffc90000c60000
[ 33.626235] RIP: 0010:kobject_get+0x11/0x80
[ 33.630543] RSP: 0018:ffffc90000c63c98 EFLAGS: 00010202
[ 33.635925] RAX: ffffffff81b6ba09 RBX: 0000000000000010 RCX:
0000000000000000
[ 33.643276] RDX: 0000000000000000 RSI: 000000000000002f RDI:
0000000000000010
[ 33.650627] RBP: ffffc90000c63ca8 R08: 0000000000000001 R09:
0000000000000025
[ 33.657978] R10: dead000000000200 R11: dead000000000100 R12:
ffff8802164887c0
[ 33.665329] R13: 0000000000000000 R14: 000000000000d538 R15:
ffff88021694c180
[ 33.672680] FS: 0000000000000000(0000) GS:ffff88021fc80000(0000)
knlGS:0000000000000000
[ 33.681015] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 33.686933] CR2: 000000000000004c CR3: 0000000001e0a000 CR4:
00000000000006e0
[ 33.694284] Call Trace:
[ 33.696803] kobject_add_internal+0x40/0x2e0
[ 33.701199] ? kfree_const+0x1d/0x30
[ 33.704878] kobject_add_varg+0x38/0x60
[ 33.708829] kobject_add+0x44/0x70
[ 33.712331] kobject_create_and_add+0x3e/0x80
[ 33.716818] mce_threshold_create_device+0x128/0x380
[ 33.721931] ? __debugfs_create_file+0xe9/0x130
[ 33.726596] threshold_init_device+0x26/0x56
[ 33.730994] ? severities_debugfs_init+0x3c/0x3c
[ 33.735749] ? severities_debugfs_init+0x3c/0x3c
[ 33.740504] do_one_initcall+0x45/0x170
[ 33.744455] kernel_init_freeable+0x17b/0x214
[ 33.748941] ? kernel_init_freeable+0x214/0x214
[ 33.753606] ? rest_init+0x90/0x90
[ 33.757108] kernel_init+0x9/0x100
[ 33.760610] ret_from_fork+0x25/0x30
[ 33.764289] Code: 89 e5 e8 b3 a6 e5 ff c9 c3 90 55 48 89 e5 e8 a7 a6
e5 ff c9 c3 0f 1f 44 00 00 55 48 89 e5 53 48 89 fb
48 83 ec 08 48 85 ff 74 18 <f6> 47 3c 01 74 1c b8 01 00 00 00 f0 0f c1
43 38 83 c0 01 83 f8
[ 33.783741] RIP: kobject_get+0x11/0x80 RSP: ffffc90000c63c98
[ 33.789570] CR2: 000000000000004c
[ 33.792984] ---[ end trace 861eb820e5b8a9c8 ]---
[ 33.797737] Kernel panic - not syncing: Fatal exception
[ 33.803132] Kernel Offset: disabled
[ 33.806722] ---[ end Kernel panic - not syncing: Fatal exception
On Mon, 26 Dec 2016, Boris Ostrovsky wrote:
> On 12/26/2016 10:45 AM, Markus Trippelsdorf wrote:
> > The comment in arch/x86/kernel/cpu/mcheck/mce_amd.c says:
> >
> > 1384 * mcheck_init_device should be inited before threshold_init_device to
> > 1385 * initialize mce_device, otherwise a NULL ptr dereference will cause panic.
That's not changed by this commit.
> My nightly test hit this as well. AMD only, Intel passed. I haven't
> verified whether commit that Markus implicated is the one that caused
> this but it's the same BUG signature (but possibly slightly different stack)
>
> [ 1.554351] smpboot: CPU0: AMD Engineering Sample (family: 0x10,
> model: 0x4, stepping: 0x1)
> ...
Is there anything interesting error message before the BUG hits? I'll try
to reproduce on a AMD box tomorrow.
Thanks,
tglx
On 12/26/2016 01:21 PM, Thomas Gleixner wrote:
> On Mon, 26 Dec 2016, Boris Ostrovsky wrote:
>> On 12/26/2016 10:45 AM, Markus Trippelsdorf wrote:
>>> The comment in arch/x86/kernel/cpu/mcheck/mce_amd.c says:
>>>
>>> 1384 * mcheck_init_device should be inited before threshold_init_device to
>>> 1385 * initialize mce_device, otherwise a NULL ptr dereference will cause panic.
>
> That's not changed by this commit.
>
>> My nightly test hit this as well. AMD only, Intel passed. I haven't
>> verified whether commit that Markus implicated is the one that caused
>> this but it's the same BUG signature (but possibly slightly different stack)
>>
>> [ 1.554351] smpboot: CPU0: AMD Engineering Sample (family: 0x10,
>> model: 0x4, stepping: 0x1)
>> ...
>
> Is there anything interesting error message before the BUG hits? I'll try
> to reproduce on a AMD box tomorrow.
Nothing that caught my eye. I can post it tomorrow if you are still
interested.
-boris
On Mon, Dec 26, 2016 at 07:21:44PM +0100, Thomas Gleixner wrote:
> Is there anything interesting error message before the BUG hits? I'll try
> to reproduce on a AMD box tomorrow.
Hmm, so lemme see if I see it correctly:
threshold_create_bank() does kobject_create_and_add(name, &dev->kobj);
and that dev thing is
struct device *dev = per_cpu(mce_device, cpu);
BUT(!), those mce_device per-CPU things get initialized in
mce_cpu_online()
|-> mce_device_create(cpu);
With a CONFIG_HOTPLUG_CPU=n .config that doesn't happen, right?
Oh, and I see what could've changed that:
8c0eeac819c8 ("x86/mcheck: Move CPU_ONLINE and CPU_DOWN_PREPARE to hotplug state machine")
And before that, we did call mce_device_create(cpu) in
mcheck_init_device() which is a device initcall and not dependent on CPU
hotplug.
And frankly, flipping back to the for_each_online_cpu(i) is yucky as
hell but I don't see any other/better solution besides pulling up
mce_device_create() into mcheck_init_device()...
Hmmm.
--
Regards/Gruss,
Boris.
ECO tip #101: Trim your mails when you reply.
--
On Mon, 26 Dec 2016, Borislav Petkov wrote:
> On Mon, Dec 26, 2016 at 07:21:44PM +0100, Thomas Gleixner wrote:
> > Is there anything interesting error message before the BUG hits? I'll try
> > to reproduce on a AMD box tomorrow.
>
> Hmm, so lemme see if I see it correctly:
>
> threshold_create_bank() does kobject_create_and_add(name, &dev->kobj);
> and that dev thing is
>
> struct device *dev = per_cpu(mce_device, cpu);
>
> BUT(!), those mce_device per-CPU things get initialized in
>
> mce_cpu_online()
> |-> mce_device_create(cpu);
>
> With a CONFIG_HOTPLUG_CPU=n .config that doesn't happen, right?
>
> Oh, and I see what could've changed that:
>
> 8c0eeac819c8 ("x86/mcheck: Move CPU_ONLINE and CPU_DOWN_PREPARE to hotplug state machine")
>
> And before that, we did call mce_device_create(cpu) in
> mcheck_init_device() which is a device initcall and not dependent on CPU
> hotplug.
>
> And frankly, flipping back to the for_each_online_cpu(i) is yucky as
> hell but I don't see any other/better solution besides pulling up
> mce_device_create() into mcheck_init_device()...
The hotplug callbacks are invoked even with HOTPLUG=n. So that's not the
problem. I can reproduce it. Will post info once I understand it.
Thanks,
tglx
On Mon, 26 Dec 2016, Thomas Gleixner wrote:
> On Mon, 26 Dec 2016, Borislav Petkov wrote:
> > On Mon, Dec 26, 2016 at 07:21:44PM +0100, Thomas Gleixner wrote:
> > > Is there anything interesting error message before the BUG hits? I'll try
> > > to reproduce on a AMD box tomorrow.
> >
> > Hmm, so lemme see if I see it correctly:
> >
> > threshold_create_bank() does kobject_create_and_add(name, &dev->kobj);
> > and that dev thing is
> >
> > struct device *dev = per_cpu(mce_device, cpu);
> >
> > BUT(!), those mce_device per-CPU things get initialized in
> >
> > mce_cpu_online()
> > |-> mce_device_create(cpu);
> >
> > With a CONFIG_HOTPLUG_CPU=n .config that doesn't happen, right?
> >
> > Oh, and I see what could've changed that:
> >
> > 8c0eeac819c8 ("x86/mcheck: Move CPU_ONLINE and CPU_DOWN_PREPARE to hotplug state machine")
> >
> > And before that, we did call mce_device_create(cpu) in
> > mcheck_init_device() which is a device initcall and not dependent on CPU
> > hotplug.
> >
> > And frankly, flipping back to the for_each_online_cpu(i) is yucky as
> > hell but I don't see any other/better solution besides pulling up
> > mce_device_create() into mcheck_init_device()...
>
> The hotplug callbacks are invoked even with HOTPLUG=n. So that's not the
> problem. I can reproduce it. Will post info once I understand it.
So the issue is indeed in that commit. I'm a moron.
But the amd mce code should be made more solid, because exactly that issue
can happen when something goes wrong in mcheck_init_device(). If that
happens then the device pointer is NULL and this code crashes. Adding the
NULL pointer check makes the machine survive despite the wreckage in the
hotplug code.
Fix below.
Thanks,
tglx
8<---------------------------
arch/x86/kernel/cpu/mcheck/mce_amd.c | 3 +++
kernel/cpu.c | 9 ++++++++-
2 files changed, 11 insertions(+), 1 deletion(-)
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -1182,6 +1182,9 @@ static int threshold_create_bank(unsigne
const char *name = get_name(bank, NULL);
int err = 0;
+ if (!dev)
+ return -ENODEV;
+
if (is_shared_bank(bank)) {
nb = node_to_amd_nb(amd_get_nb_id(cpu));
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -1471,6 +1471,7 @@ int __cpuhp_setup_state(enum cpuhp_state
bool multi_instance)
{
int cpu, ret = 0;
+ bool dynstate;
if (cpuhp_cb_check(state) || !name)
return -EINVAL;
@@ -1480,6 +1481,12 @@ int __cpuhp_setup_state(enum cpuhp_state
ret = cpuhp_store_callbacks(state, name, startup, teardown,
multi_instance);
+ dynstate = state == CPUHP_AP_ONLINE_DYN;
+ if (ret > 0 && dynstate) {
+ state = ret;
+ ret = 0;
+ }
+
if (ret || !invoke || !startup)
goto out;
@@ -1508,7 +1515,7 @@ int __cpuhp_setup_state(enum cpuhp_state
* If the requested state is CPUHP_AP_ONLINE_DYN, return the
* dynamically allocated state in case of success.
*/
- if (!ret && state == CPUHP_AP_ONLINE_DYN)
+ if (!ret && dynstate)
return state;
return ret;
}