2022-10-11 20:38:26

by Alexey Dobriyan

[permalink] [raw]
Subject: [PATCH 1/2 -tip] x86: add and use cpu_is_intel() wrapper

I realised that writing

boot_cpu_data.x86_vendor == X86_VENDOR_INTEL

is simply too much.

Signed-off-by: Alexey Dobriyan <[email protected]>
---

arch/x86/crypto/blowfish_glue.c | 2 +-
arch/x86/crypto/camellia_glue.c | 2 +-
arch/x86/crypto/des3_ede_glue.c | 2 +-
arch/x86/crypto/twofish_glue_3way.c | 2 +-
arch/x86/events/msr.c | 2 +-
arch/x86/include/asm/processor.h | 5 +++++
arch/x86/kernel/apic/apic.c | 2 +-
arch/x86/kernel/apic/io_apic.c | 4 ++--
arch/x86/kernel/cpu/aperfmperf.c | 2 +-
arch/x86/kernel/cpu/bugs.c | 6 +++---
arch/x86/kernel/cpu/common.c | 2 +-
arch/x86/kernel/cpu/intel.c | 2 +-
arch/x86/kernel/cpu/mce/core.c | 6 +++---
arch/x86/kernel/cpu/mce/intel.c | 2 +-
arch/x86/kernel/cpu/mtrr/mtrr.c | 2 +-
arch/x86/kernel/cpu/resctrl/core.c | 6 +++---
arch/x86/kernel/cpu/resctrl/pseudo_lock.c | 2 +-
arch/x86/kernel/hpet.c | 2 +-
arch/x86/kernel/smpboot.c | 2 +-
arch/x86/kernel/tsc.c | 6 +++---
arch/x86/kvm/pmu.h | 4 +---
arch/x86/mm/cpu_entry_area.c | 2 +-
arch/x86/xen/pmu.c | 6 +++---
drivers/acpi/processor_idle.c | 2 +-
drivers/cpufreq/cpufreq_ondemand.c | 2 +-
drivers/cpufreq/intel_pstate.c | 2 +-
drivers/ras/cec.c | 2 +-
drivers/rtc/rtc-cmos.c | 2 +-
drivers/staging/media/atomisp/include/linux/atomisp_platform.h | 4 ++--
29 files changed, 45 insertions(+), 42 deletions(-)

--- a/arch/x86/crypto/blowfish_glue.c
+++ b/arch/x86/crypto/blowfish_glue.c
@@ -284,7 +284,7 @@ static struct skcipher_alg bf_skcipher_algs[] = {

static bool is_blacklisted_cpu(void)
{
- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ if (!cpu_is_intel())
return false;

if (boot_cpu_data.x86 == 0x0f) {
--- a/arch/x86/crypto/camellia_glue.c
+++ b/arch/x86/crypto/camellia_glue.c
@@ -1357,7 +1357,7 @@ static struct skcipher_alg camellia_skcipher_algs[] = {

static bool is_blacklisted_cpu(void)
{
- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ if (!cpu_is_intel())
return false;

if (boot_cpu_data.x86 == 0x0f) {
--- a/arch/x86/crypto/des3_ede_glue.c
+++ b/arch/x86/crypto/des3_ede_glue.c
@@ -335,7 +335,7 @@ static struct skcipher_alg des3_ede_skciphers[] = {

static bool is_blacklisted_cpu(void)
{
- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ if (!cpu_is_intel())
return false;

if (boot_cpu_data.x86 == 0x0f) {
--- a/arch/x86/crypto/twofish_glue_3way.c
+++ b/arch/x86/crypto/twofish_glue_3way.c
@@ -104,7 +104,7 @@ static struct skcipher_alg tf_skciphers[] = {

static bool is_blacklisted_cpu(void)
{
- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ if (!cpu_is_intel())
return false;

if (boot_cpu_data.x86 == 0x06 &&
--- a/arch/x86/events/msr.c
+++ b/arch/x86/events/msr.c
@@ -39,7 +39,7 @@ static bool test_therm_status(int idx, void *data)

static bool test_intel(int idx, void *data)
{
- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
+ if (!cpu_is_intel() ||
boot_cpu_data.x86 != 6)
return false;

--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -177,6 +177,11 @@ enum cpuid_regs_idx {
extern struct cpuinfo_x86 boot_cpu_data;
extern struct cpuinfo_x86 new_cpu_data;

+static inline bool cpu_is_intel(void)
+{
+ return boot_cpu_data.x86_vendor == X86_VENDOR_INTEL;
+}
+
extern __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS];
extern __u32 cpu_caps_set[NCAPINTS + NBUGINTS];

--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1390,7 +1390,7 @@ void __init init_bsp_APIC(void)

#ifdef CONFIG_X86_32
/* This bit is reserved on P4/Xeon and should be cleared */
- if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
+ if (cpu_is_intel() &&
(boot_cpu_data.x86 == 15))
value &= ~APIC_SPIV_FOCUS_DISABLED;
else
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1567,7 +1567,7 @@ void __init setup_ioapic_ids_from_mpc(void)
* Don't check I/O APIC IDs for xAPIC systems. They have
* no meaning without the serial APIC bus.
*/
- if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+ if (!cpu_is_intel()
|| APIC_XAPIC(boot_cpu_apic_version))
return;
setup_ioapic_ids_from_mpc_nocheck();
@@ -2571,7 +2571,7 @@ static int io_apic_get_unique_id(int ioapic, int apic_id)

static u8 io_apic_unique_id(int idx, u8 id)
{
- if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
+ if (cpu_is_intel() &&
!APIC_XAPIC(boot_cpu_apic_version))
return io_apic_get_unique_id(idx, id);
else
--- a/arch/x86/kernel/cpu/aperfmperf.c
+++ b/arch/x86/kernel/cpu/aperfmperf.c
@@ -321,7 +321,7 @@ void freq_invariance_set_perf_ratio(u64 ratio, bool turbo_disabled)

static void __init bp_init_freq_invariance(void)
{
- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ if (!cpu_is_intel())
return;

if (intel_set_max_freq_ratio())
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -928,7 +928,7 @@ static void __init retbleed_select_mitigation(void)
* Let IBRS trump all on Intel without affecting the effects of the
* retbleed= cmdline option.
*/
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
+ if (cpu_is_intel()) {
switch (spectre_v2_enabled) {
case SPECTRE_V2_IBRS:
retbleed_mitigation = RETBLEED_MITIGATION_IBRS;
@@ -1290,7 +1290,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
return SPECTRE_V2_CMD_AUTO;
}

- if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
+ if (cmd == SPECTRE_V2_CMD_IBRS && !cpu_is_intel()) {
pr_err("%s selected but not Intel CPU. Switching to AUTO select\n",
mitigation_options[i].option);
return SPECTRE_V2_CMD_AUTO;
@@ -1414,7 +1414,7 @@ static void __init spectre_v2_select_mitigation(void)
boot_cpu_has_bug(X86_BUG_RETBLEED) &&
retbleed_cmd != RETBLEED_CMD_OFF &&
boot_cpu_has(X86_FEATURE_IBRS) &&
- boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
+ cpu_is_intel()) {
mode = SPECTRE_V2_IBRS;
break;
}
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -2021,7 +2021,7 @@ static void wrmsrl_cstar(unsigned long val)
* is so far ignored by the CPU, but raises a #VE trap in a TDX
* guest. Avoid the pointless write on all Intel CPUs.
*/
- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ if (!cpu_is_intel())
wrmsrl(MSR_CSTAR, val);
}

--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -384,7 +384,7 @@ static void bsp_init_intel(struct cpuinfo_x86 *c)
int ppro_with_ram_bug(void)
{
/* Uses data from early_cpu_detect now */
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+ if (cpu_is_intel() &&
boot_cpu_data.x86 == 6 &&
boot_cpu_data.x86_model == 1 &&
boot_cpu_data.x86_stepping < 8) {
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -471,7 +471,7 @@ int mce_usable_address(struct mce *m)
return 0;

/* Checks after this one are Intel/Zhaoxin-specific: */
- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL &&
+ if (!cpu_is_intel() &&
boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN)
return 1;

@@ -2066,7 +2066,7 @@ bool filter_mce(struct mce *m)
{
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
return amd_filter_mce(m);
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+ if (cpu_is_intel())
return intel_filter_mce(m);

return false;
@@ -2309,7 +2309,7 @@ static void vendor_disable_error_reporting(void)
* the socket like the last level cache (LLC), the integrated memory
* controller (iMC), etc.
*/
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
+ if (cpu_is_intel() ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ||
boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN)
--- a/arch/x86/kernel/cpu/mce/intel.c
+++ b/arch/x86/kernel/cpu/mce/intel.c
@@ -85,7 +85,7 @@ static int cmci_supported(int *banks)
* initialization is vendor keyed and this
* makes sure none of the backdoors are entered otherwise.
*/
- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL &&
+ if (!cpu_is_intel() &&
boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN)
return 0;

--- a/arch/x86/kernel/cpu/mtrr/mtrr.c
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.c
@@ -706,7 +706,7 @@ void __init mtrr_bp_init(void)
if (cpuid_eax(0x80000000) >= 0x80000008) {
phys_addr = cpuid_eax(0x80000008) & 0xff;
/* CPUID workaround for Intel 0F33/0F34 CPU */
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+ if (cpu_is_intel() &&
boot_cpu_data.x86 == 0xF &&
boot_cpu_data.x86_model == 0x3 &&
(boot_cpu_data.x86_stepping == 0x3 ||
--- a/arch/x86/kernel/cpu/resctrl/core.c
+++ b/arch/x86/kernel/cpu/resctrl/core.c
@@ -732,7 +732,7 @@ static __init bool get_mem_config(void)
if (!rdt_cpu_has(X86_FEATURE_MBA))
return false;

- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+ if (cpu_is_intel())
return __get_mem_config_intel(&hw_res->r_resctrl);
else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
return __rdt_get_mem_config_amd(&hw_res->r_resctrl);
@@ -811,7 +811,7 @@ static __init void __check_quirks_intel(void)

static __init void check_quirks(void)
{
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+ if (cpu_is_intel())
__check_quirks_intel();
}

@@ -865,7 +865,7 @@ static __init void rdt_init_res_defs_amd(void)

static __init void rdt_init_res_defs(void)
{
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+ if (cpu_is_intel())
rdt_init_res_defs_intel();
else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
rdt_init_res_defs_amd();
--- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
+++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
@@ -70,7 +70,7 @@ static struct class *pseudo_lock_class;
*/
static u64 get_prefetch_disable_bits(void)
{
- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
+ if (!cpu_is_intel() ||
boot_cpu_data.x86 != 6)
return 0;

--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -921,7 +921,7 @@ static bool __init mwait_pc10_supported(void)
{
unsigned int eax, ebx, ecx, mwait_substates;

- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ if (!cpu_is_intel())
return false;

if (!cpu_feature_enabled(X86_FEATURE_MWAIT))
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -777,7 +777,7 @@ static void __init smp_quirk_init_udelay(void)
return;

/* if modern processor, use no delay */
- if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) ||
+ if ((cpu_is_intel() && (boot_cpu_data.x86 == 6)) ||
((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && (boot_cpu_data.x86 >= 0x18)) ||
((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) {
init_udelay = 0;
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -624,7 +624,7 @@ unsigned long native_calibrate_tsc(void)
unsigned int eax_denominator, ebx_numerator, ecx_hz, edx;
unsigned int crystal_khz;

- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ if (!cpu_is_intel())
return 0;

if (boot_cpu_data.cpuid_level < 0x15)
@@ -697,7 +697,7 @@ static unsigned long cpu_khz_from_cpuid(void)
{
unsigned int eax_base_mhz, ebx_max_mhz, ecx_bus_mhz, edx;

- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ if (!cpu_is_intel())
return 0;

if (boot_cpu_data.cpuid_level < 0x16)
@@ -1244,7 +1244,7 @@ int unsynchronized_tsc(void)
* Intel systems are normally all synchronized.
* Exceptions must mark TSC as unstable:
*/
- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
+ if (!cpu_is_intel()) {
/* assume multi socket systems are not synchronized: */
if (num_possible_cpus() > 1)
return 1;
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -162,15 +162,13 @@ extern struct x86_pmu_capability kvm_pmu_cap;

static inline void kvm_init_pmu_capability(void)
{
- bool is_intel = boot_cpu_data.x86_vendor == X86_VENDOR_INTEL;
-
perf_get_x86_pmu_capability(&kvm_pmu_cap);

/*
* For Intel, only support guest architectural pmu
* on a host with architectural pmu.
*/
- if ((is_intel && !kvm_pmu_cap.version) || !kvm_pmu_cap.num_counters_gp)
+ if ((cpu_is_intel() && !kvm_pmu_cap.version) || !kvm_pmu_cap.num_counters_gp)
enable_pmu = false;

if (!enable_pmu) {
--- a/arch/x86/mm/cpu_entry_area.c
+++ b/arch/x86/mm/cpu_entry_area.c
@@ -63,7 +63,7 @@ static void __init percpu_setup_debug_store(unsigned int cpu)
unsigned int npages;
void *cea;

- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ if (!cpu_is_intel())
return;

cea = &get_cpu_entry_area(cpu)->cpu_debug_store;
--- a/arch/x86/xen/pmu.c
+++ b/arch/x86/xen/pmu.c
@@ -292,7 +292,7 @@ static bool xen_amd_pmu_emulate(unsigned int msr, u64 *val, bool is_read)

bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err)
{
- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
+ if (!cpu_is_intel()) {
if (is_amd_pmu_msr(msr)) {
if (!xen_amd_pmu_emulate(msr, val, 1))
*val = native_read_msr_safe(msr, err);
@@ -315,7 +315,7 @@ bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err)
{
uint64_t val = ((uint64_t)high << 32) | low;

- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
+ if (!cpu_is_intel()) {
if (is_amd_pmu_msr(msr)) {
if (!xen_amd_pmu_emulate(msr, &val, 0))
*err = native_write_msr_safe(msr, low, high);
@@ -386,7 +386,7 @@ static unsigned long long xen_intel_read_pmc(int counter)

unsigned long long xen_read_pmc(int counter)
{
- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ if (!cpu_is_intel())
return xen_amd_read_pmc(counter);
else
return xen_intel_read_pmc(counter);
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -537,7 +537,7 @@ static void wait_for_freeze(void)
* are ancient and may need the dummy wait. This also assumes
* that the motivating chipset issue was Intel-only.
*/
- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ if (!cpu_is_intel())
return;
#endif
/*
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -45,7 +45,7 @@ static int should_io_be_busy(void)
/*
* For Intel, Core 2 (model 15) and later have an efficient idle.
*/
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+ if (cpu_is_intel() &&
boot_cpu_data.x86 == 6 &&
boot_cpu_data.x86_model >= 15)
return 1;
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -3418,7 +3418,7 @@ static int __init intel_pstate_init(void)
const struct x86_cpu_id *id;
int rc;

- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ if (!cpu_is_intel())
return -ENODEV;

id = x86_match_cpu(hwp_support_ids);
--- a/drivers/ras/cec.c
+++ b/drivers/ras/cec.c
@@ -561,7 +561,7 @@ static int __init cec_init(void)
* if pages with corrected errors are aggressively
* taken offline.
*/
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+ if (cpu_is_intel())
action_threshold = 2;

ce_arr.array = (void *)get_zeroed_page(GFP_KERNEL);
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -1257,7 +1257,7 @@ static void rtc_wake_off(struct device *dev)
/* Enable use_acpi_alarm mode for Intel platforms no earlier than 2015 */
static void use_acpi_alarm_quirks(void)
{
- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ if (!cpu_is_intel())
return;

if (!is_hpet_enabled())
--- a/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
+++ b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
@@ -238,10 +238,10 @@ const struct atomisp_platform_data *atomisp_get_platform_data(void);
const struct atomisp_camera_caps *atomisp_get_default_camera_caps(void);

/* API from old platform_camera.h, new CPUID implementation */
-#define __IS_SOC(x) (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && \
+#define __IS_SOC(x) (cpu_is_intel() && \
boot_cpu_data.x86 == 6 && \
boot_cpu_data.x86_model == (x))
-#define __IS_SOCS(x,y) (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && \
+#define __IS_SOCS(x,y) (cpu_is_intel() && \
boot_cpu_data.x86 == 6 && \
(boot_cpu_data.x86_model == (x) || \
boot_cpu_data.x86_model == (y)))


2022-10-11 20:38:26

by Alexey Dobriyan

[permalink] [raw]
Subject: [PATCH 2/2 -tip] x86: add and use cpu_is_amd() wrapper

This is small, easy to remember function name.

Don't bother with 32-bit CPU vendors.
If someone wants to add Hygon, be my guest.

Signed-off-by: Alexey Dobriyan <[email protected]>
---

arch/x86/events/amd/uncore.c | 2 +-
arch/x86/include/asm/acpi.h | 2 +-
arch/x86/include/asm/amd_nb.h | 2 +-
arch/x86/include/asm/geode.h | 2 +-
arch/x86/include/asm/processor.h | 5 +++++
arch/x86/include/asm/virtext.h | 2 +-
arch/x86/kernel/acpi/cppc.c | 2 +-
arch/x86/kernel/amd_nb.c | 4 ++--
arch/x86/kernel/apic/apic.c | 4 ++--
arch/x86/kernel/cpu/bugs.c | 8 ++++----
arch/x86/kernel/cpu/cacheinfo.c | 2 +-
arch/x86/kernel/cpu/mce/core.c | 4 ++--
arch/x86/kernel/cpu/mce/dev-mcelog.c | 2 +-
arch/x86/kernel/cpu/mce/severity.c | 2 +-
arch/x86/kernel/cpu/mtrr/cleanup.c | 2 +-
arch/x86/kernel/cpu/mtrr/generic.c | 2 +-
arch/x86/kernel/cpu/resctrl/core.c | 4 ++--
arch/x86/kernel/smpboot.c | 4 ++--
arch/x86/mm/fault.c | 2 +-
arch/x86/pci/amd_bus.c | 2 +-
arch/x86/xen/pmu.c | 2 +-
drivers/acpi/processor_perflib.c | 2 +-
drivers/acpi/x86/s2idle.c | 2 +-
drivers/cpufreq/acpi-cpufreq.c | 6 +++---
drivers/cpufreq/amd-pstate.c | 2 +-
drivers/cpufreq/amd_freq_sensitivity.c | 2 +-
drivers/infiniband/hw/qib/qib_wc_x86_64.c | 2 +-
drivers/mtd/nand/raw/cs553x_nand.c | 2 +-
drivers/platform/x86/amd/hsmp.c | 2 +-
drivers/platform/x86/thinkpad_acpi.c | 2 +-
drivers/rtc/rtc-mc146818-lib.c | 2 +-
31 files changed, 45 insertions(+), 40 deletions(-)

--- a/arch/x86/events/amd/uncore.c
+++ b/arch/x86/events/amd/uncore.c
@@ -649,7 +649,7 @@ static int __init amd_uncore_init(void)
union cpuid_0x80000022_ebx ebx;
int ret = -ENODEV;

- if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
+ if (!cpu_is_amd() &&
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
return -ENODEV;

--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -75,7 +75,7 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate)
* Steppings 0x0A and later are good
*/
if (boot_cpu_data.x86 == 0x0F &&
- boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+ cpu_is_amd() &&
boot_cpu_data.x86_model <= 0x05 &&
boot_cpu_data.x86_stepping < 0x0A)
return 1;
--- a/arch/x86/include/asm/amd_nb.h
+++ b/arch/x86/include/asm/amd_nb.h
@@ -101,7 +101,7 @@ static inline u16 amd_pci_dev_to_node_id(struct pci_dev *pdev)

static inline bool amd_gart_present(void)
{
- if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+ if (!cpu_is_amd())
return false;

/* GART present only on Fam15h, upto model 0fh */
--- a/arch/x86/include/asm/geode.h
+++ b/arch/x86/include/asm/geode.h
@@ -20,7 +20,7 @@ static inline int is_geode_gx(void)

static inline int is_geode_lx(void)
{
- return ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
+ return (cpu_is_amd() &&
(boot_cpu_data.x86 == 5) &&
(boot_cpu_data.x86_model == 10));
}
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -182,6 +182,11 @@ static inline bool cpu_is_intel(void)
return boot_cpu_data.x86_vendor == X86_VENDOR_INTEL;
}

+static inline bool cpu_is_amd(void)
+{
+ return boot_cpu_data.x86_vendor == X86_VENDOR_AMD;
+}
+
extern __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS];
extern __u32 cpu_caps_set[NCAPINTS + NBUGINTS];

--- a/arch/x86/include/asm/virtext.h
+++ b/arch/x86/include/asm/virtext.h
@@ -94,7 +94,7 @@ static inline void cpu_emergency_vmxoff(void)
*/
static inline int cpu_has_svm(const char **msg)
{
- if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
+ if (!cpu_is_amd() &&
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) {
if (msg)
*msg = "not amd or hygon";
--- a/arch/x86/kernel/acpi/cppc.c
+++ b/arch/x86/kernel/acpi/cppc.c
@@ -107,7 +107,7 @@ void init_freq_invariance_cppc(void)
if (!cpu_feature_enabled(X86_FEATURE_APERFMPERF))
return;

- if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+ if (!cpu_is_amd())
return;

mutex_lock(&freq_invariance_lock);
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -314,7 +314,7 @@ bool __init early_is_amd_nb(u32 device)
const struct pci_device_id *id;
u32 vendor = device & 0xffff;

- if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
+ if (!cpu_is_amd() &&
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
return false;

@@ -334,7 +334,7 @@ struct resource *amd_get_mmconfig_range(struct resource *res)
u64 base, msr;
unsigned int segn_busn_bits;

- if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
+ if (!cpu_is_amd() &&
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
return NULL;

--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -227,7 +227,7 @@ static inline int lapic_is_integrated(void)
static int modern_apic(void)
{
/* AMD systems use old APIC versions, so check the CPU */
- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+ if (cpu_is_amd() &&
boot_cpu_data.x86 >= 0xf)
return 1;

@@ -1279,7 +1279,7 @@ void __init sync_Arb_IDs(void)
* Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 And not
* needed on AMD.
*/
- if (modern_apic() || boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ if (modern_apic() || cpu_is_amd())
return;

/*
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -882,7 +882,7 @@ static void __init retbleed_select_mitigation(void)
do_cmd_auto:
case RETBLEED_CMD_AUTO:
default:
- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
+ if (cpu_is_amd() ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY))
retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
@@ -904,7 +904,7 @@ static void __init retbleed_select_mitigation(void)
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
setup_force_cpu_cap(X86_FEATURE_UNRET);

- if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
+ if (!cpu_is_amd() &&
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
pr_err(RETBLEED_UNTRAIN_MSG);

@@ -1551,7 +1551,7 @@ static void __init spectre_v2_select_mitigation(void)
*/
if (boot_cpu_has_bug(X86_BUG_RETBLEED) &&
boot_cpu_has(X86_FEATURE_IBPB) &&
- (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
+ (cpu_is_amd() ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) {

if (retbleed_cmd != RETBLEED_CMD_IBPB) {
@@ -2369,7 +2369,7 @@ static ssize_t retbleed_show_state(char *buf)
{
if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
- if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
+ if (!cpu_is_amd() &&
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
return sprintf(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n");

--- a/arch/x86/kernel/cpu/cacheinfo.c
+++ b/arch/x86/kernel/cpu/cacheinfo.c
@@ -603,7 +603,7 @@ cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf)
union _cpuid4_leaf_ecx ecx;
unsigned edx;

- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
+ if (cpu_is_amd()) {
if (boot_cpu_has(X86_FEATURE_TOPOEXT))
cpuid_count(0x8000001d, index, &eax.full,
&ebx.full, &ecx.full, &edx);
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -2064,7 +2064,7 @@ static void __mcheck_cpu_init_timer(void)

bool filter_mce(struct mce *m)
{
- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ if (cpu_is_amd())
return amd_filter_mce(m);
if (cpu_is_intel())
return intel_filter_mce(m);
@@ -2311,7 +2311,7 @@ static void vendor_disable_error_reporting(void)
*/
if (cpu_is_intel() ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ||
- boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
+ cpu_is_amd() ||
boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN)
return;

--- a/arch/x86/kernel/cpu/mce/dev-mcelog.c
+++ b/arch/x86/kernel/cpu/mce/dev-mcelog.c
@@ -67,7 +67,7 @@ static int dev_mce_log(struct notifier_block *nb, unsigned long val,
unlock:
mutex_unlock(&mce_chrdev_read_mutex);

- if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+ if (!cpu_is_amd())
mce->kflags |= MCE_HANDLED_MCELOG;

return NOTIFY_OK;
--- a/arch/x86/kernel/cpu/mce/severity.c
+++ b/arch/x86/kernel/cpu/mce/severity.c
@@ -400,7 +400,7 @@ static noinstr int mce_severity_intel(struct mce *m, struct pt_regs *regs, char

int noinstr mce_severity(struct mce *m, struct pt_regs *regs, char **msg, bool is_excp)
{
- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
+ if (cpu_is_amd() ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
return mce_severity_amd(m, regs, msg, is_excp);
else
--- a/arch/x86/kernel/cpu/mtrr/cleanup.c
+++ b/arch/x86/kernel/cpu/mtrr/cleanup.c
@@ -830,7 +830,7 @@ int __init amd_special_default_mtrr(void)
{
u32 l, h;

- if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
+ if (!cpu_is_amd() &&
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
return 0;
if (boot_cpu_data.x86 < 0xf)
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -49,7 +49,7 @@ static inline void k8_check_syscfg_dram_mod_en(void)
{
u32 lo, hi;

- if (!((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
+ if (!(cpu_is_amd() &&
(boot_cpu_data.x86 >= 0x0f)))
return;

--- a/arch/x86/kernel/cpu/resctrl/core.c
+++ b/arch/x86/kernel/cpu/resctrl/core.c
@@ -734,7 +734,7 @@ static __init bool get_mem_config(void)

if (cpu_is_intel())
return __get_mem_config_intel(&hw_res->r_resctrl);
- else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ else if (cpu_is_amd())
return __rdt_get_mem_config_amd(&hw_res->r_resctrl);

return false;
@@ -867,7 +867,7 @@ static __init void rdt_init_res_defs(void)
{
if (cpu_is_intel())
rdt_init_res_defs_intel();
- else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ else if (cpu_is_amd())
rdt_init_res_defs_amd();
}

--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -779,7 +779,7 @@ static void __init smp_quirk_init_udelay(void)
/* if modern processor, use no delay */
if ((cpu_is_intel() && (boot_cpu_data.x86 == 6)) ||
((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && (boot_cpu_data.x86 >= 0x18)) ||
- ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) {
+ (cpu_is_amd() && (boot_cpu_data.x86 >= 0xF))) {
init_udelay = 0;
return;
}
@@ -1752,7 +1752,7 @@ static inline void mwait_play_dead(void)
void *mwait_ptr;
int i;

- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
+ if (cpu_is_amd() ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
return;
if (!this_cpu_has(X86_FEATURE_MWAIT))
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -438,7 +438,7 @@ static void dump_pagetable(unsigned long address)
static int is_errata93(struct pt_regs *regs, unsigned long address)
{
#if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD)
- if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD
+ if (!cpu_is_amd()
|| boot_cpu_data.x86 != 0xf)
return 0;

--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -391,7 +391,7 @@ static int __init pci_io_ecs_init(void)

static int __init amd_postcore_init(void)
{
- if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
+ if (!cpu_is_amd() &&
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
return 0;

--- a/arch/x86/xen/pmu.c
+++ b/arch/x86/xen/pmu.c
@@ -69,7 +69,7 @@ static __read_mostly int intel_num_arch_counters, intel_num_fixed_counters;

static void xen_pmu_arch_init(void)
{
- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
+ if (cpu_is_amd()) {

switch (boot_cpu_data.x86) {
case 0x15:
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -257,7 +257,7 @@ static void amd_fixup_frequency(struct acpi_processor_px *px, int i)
u32 hi, lo, fid, did;
int index = px->control & 0x00000007;

- if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+ if (!cpu_is_amd())
return;

if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -341,7 +341,7 @@ static void acpi_sleep_run_lps0_dsm(unsigned int func, unsigned int func_mask, g

static bool acpi_s2idle_vendor_amd(void)
{
- return boot_cpu_data.x86_vendor == X86_VENDOR_AMD;
+ return cpu_is_amd();
}

static int validate_dsm(acpi_handle handle, const char *uuid, int rev, guid_t *dsm_guid)
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -217,7 +217,7 @@ static unsigned extract_msr(struct cpufreq_policy *policy, u32 msr)
struct cpufreq_frequency_table *pos;
struct acpi_processor_performance *perf;

- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ if (cpu_is_amd())
msr &= AMD_MSR_RANGE;
else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
msr &= HYGON_MSR_RANGE;
@@ -651,7 +651,7 @@ static u64 get_max_boost_ratio(unsigned int cpu)
return 0;
}

- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ if (cpu_is_amd())
highest_perf = amd_get_highest_perf();
else
highest_perf = perf_caps.highest_perf;
@@ -763,7 +763,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)

switch (perf->control_register.space_id) {
case ACPI_ADR_SPACE_SYSTEM_IO:
- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+ if (cpu_is_amd() &&
boot_cpu_data.x86 == 0xf) {
pr_debug("AMD K8 systems must use native drivers.\n");
result = -ENODEV;
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -614,7 +614,7 @@ static int __init amd_pstate_init(void)
{
int ret;

- if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+ if (!cpu_is_amd())
return -ENODEV;

if (!acpi_cpc_valid()) {
--- a/drivers/cpufreq/amd_freq_sensitivity.c
+++ b/drivers/cpufreq/amd_freq_sensitivity.c
@@ -112,7 +112,7 @@ static int __init amd_freq_sensitivity_init(void)
struct pci_dev *pcidev;
unsigned int pci_vendor;

- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ if (cpu_is_amd())
pci_vendor = PCI_VENDOR_ID_AMD;
else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
pci_vendor = PCI_VENDOR_ID_HYGON;
--- a/drivers/infiniband/hw/qib/qib_wc_x86_64.c
+++ b/drivers/infiniband/hw/qib/qib_wc_x86_64.c
@@ -146,5 +146,5 @@ void qib_disable_wc(struct qib_devdata *dd)
*/
int qib_unordered_wc(void)
{
- return boot_cpu_data.x86_vendor != X86_VENDOR_AMD;
+ return !cpu_is_amd();
}
--- a/drivers/mtd/nand/raw/cs553x_nand.c
+++ b/drivers/mtd/nand/raw/cs553x_nand.c
@@ -326,7 +326,7 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
static int is_geode(void)
{
/* These are the CPUs which will have a CS553[56] companion chip */
- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+ if (cpu_is_amd() &&
boot_cpu_data.x86 == 5 &&
boot_cpu_data.x86_model == 10)
return 1; /* Geode LX */
--- a/drivers/platform/x86/amd/hsmp.c
+++ b/drivers/platform/x86/amd/hsmp.c
@@ -363,7 +363,7 @@ static int __init hsmp_plt_init(void)
u16 num_sockets;
int i;

- if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD || boot_cpu_data.x86 < 0x19) {
+ if (!cpu_is_amd() || boot_cpu_data.x86 < 0x19) {
pr_err("HSMP is not supported on Family:%x model:%x\n",
boot_cpu_data.x86, boot_cpu_data.x86_model);
return ret;
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -10567,7 +10567,7 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
}
} else if (dytc_capabilities & BIT(DYTC_FC_PSC)) { /* PSC MODE */
/* Support for this only works on AMD platforms */
- if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
+ if (!cpu_is_amd()) {
dbg_printk(TPACPI_DBG_INIT, "PSC not support on Intel platforms\n");
return -ENODEV;
}
--- a/drivers/rtc/rtc-mc146818-lib.c
+++ b/drivers/rtc/rtc-mc146818-lib.c
@@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(mc146818_get_time);
static bool apply_amd_register_a_behavior(void)
{
#ifdef CONFIG_X86
- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
+ if (cpu_is_amd() ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
return true;
#endif