2024-01-30 05:59:46

by Yuan, Perry

[permalink] [raw]
Subject: [PATCH Resend 1/8] tools/power x86_energy_perf_policy: add info show support for AMD Pstate EPP driver

From: Perry Yuan <[email protected]>

With the amd pstate epp driver implemented, the x86_energy_perf_policy
will need to implemented the utility support to display hardware energy
and performance policy hint information on the AMD processors.

Signed-off-by: Perry Yuan <[email protected]>
---
.../x86_energy_perf_policy.c | 211 ++++++++++++++----
1 file changed, 167 insertions(+), 44 deletions(-)

diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
index 5fd9e594079c..5daf1c2bb601 100644
--- a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
+++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
@@ -76,6 +76,8 @@ unsigned long long pkg_selected_set;
cpu_set_t *cpu_present_set;
cpu_set_t *cpu_selected_set;
int genuine_intel;
+unsigned int authentic_amd;
+unsigned int max_level;

size_t cpu_setsize;

@@ -724,6 +726,53 @@ int put_msr(int cpu, int offset, unsigned long long new_msr)
return 0;
}

+static int amd_put_msr(int cpu, off_t offset, unsigned long msr)
+{
+ ssize_t retval;
+ int fd;
+ char pathname[32];
+
+ sprintf(pathname, "/dev/cpu/%d/msr", cpu);
+ fd = open(pathname, O_RDWR);
+ if (fd < 0) {
+ err(-EACCES, "%s open failed, try chown or chmod +r /dev/cpu/*/msr, or run as root", pathname);
+ goto out;
+ }
+ retval = pwrite(fd, &msr, sizeof(msr), offset);
+ if (retval != sizeof(msr))
+ err(-EFAULT, "cpu%d: msr offset 0x%lx write failed ret = %ld fd = %d", cpu, (unsigned long)offset, retval, fd);
+
+ if (debug > 1)
+ fprintf(stderr, "amd_put_msr(cpu%d, 0x%lx, 0x%lX)\n", cpu, offset, msr);
+
+ close(fd);
+
+out:
+ return (retval == sizeof(msr)) ? 0 : -1;;
+}
+
+
+static int amd_get_msr(int cpu, off_t offset, unsigned long *msr)
+{
+ ssize_t retval;
+ char pathname[32];
+ int fd;
+
+ sprintf(pathname, "/dev/cpu/%d/msr", cpu);
+ fd = open(pathname, O_RDONLY);
+ if (fd < 0) {
+ err(-EACCES, "%s open failed, try chown or chmod +r /dev/cpu/*/msr, or run as root", pathname);
+ goto out;
+ }
+ retval = pread(fd, msr, sizeof(*msr), offset);
+ if (retval != sizeof *msr)
+ err(-EFAULT, "cpu%d: msr offset 0x%llx read failed", cpu, (unsigned long long)offset);
+
+ close(fd);
+out:
+ return (retval == sizeof *msr) ? 0 : -1;;
+}
+
static unsigned int read_sysfs(const char *path, char *buf, size_t buflen)
{
ssize_t numread;
@@ -777,13 +826,21 @@ void print_hwp_cap(int cpu, struct msr_hwp_cap *cap, char *str)
void read_hwp_cap(int cpu, struct msr_hwp_cap *cap, unsigned int msr_offset)
{
unsigned long long msr;
+ int ret;

- get_msr(cpu, msr_offset, &msr);
-
- cap->highest = msr_perf_2_ratio(HWP_HIGHEST_PERF(msr));
- cap->guaranteed = msr_perf_2_ratio(HWP_GUARANTEED_PERF(msr));
- cap->efficient = msr_perf_2_ratio(HWP_MOSTEFFICIENT_PERF(msr));
- cap->lowest = msr_perf_2_ratio(HWP_LOWEST_PERF(msr));
+ if (genuine_intel) {
+ get_msr(cpu, msr_offset, &msr);
+ cap->highest = msr_perf_2_ratio(HWP_HIGHEST_PERF(msr));
+ cap->guaranteed = msr_perf_2_ratio(HWP_GUARANTEED_PERF(msr));
+ cap->efficient = msr_perf_2_ratio(HWP_MOSTEFFICIENT_PERF(msr));
+ cap->lowest = msr_perf_2_ratio(HWP_LOWEST_PERF(msr));
+ } else if (authentic_amd) {
+ ret = amd_get_msr(cpu, msr_offset, (unsigned long *)(&msr));
+ if (ret < 0)
+ errx(-1, "failed to get msr with return %d", ret);
+ cap->highest = msr_perf_2_ratio(AMD_CPPC_HIGHEST_PERF(msr));
+ cap->lowest = msr_perf_2_ratio(AMD_CPPC_LOWEST_PERF(msr));
+ }
}

void print_hwp_request(int cpu, struct msr_hwp_request *h, char *str)
@@ -812,15 +869,27 @@ void print_hwp_request_pkg(int pkg, struct msr_hwp_request *h, char *str)
void read_hwp_request(int cpu, struct msr_hwp_request *hwp_req, unsigned int msr_offset)
{
unsigned long long msr;
+ int ret;

- get_msr(cpu, msr_offset, &msr);
-
- hwp_req->hwp_min = msr_perf_2_ratio((((msr) >> 0) & 0xff));
- hwp_req->hwp_max = msr_perf_2_ratio((((msr) >> 8) & 0xff));
- hwp_req->hwp_desired = msr_perf_2_ratio((((msr) >> 16) & 0xff));
- hwp_req->hwp_epp = (((msr) >> 24) & 0xff);
- hwp_req->hwp_window = (((msr) >> 32) & 0x3ff);
- hwp_req->hwp_use_pkg = (((msr) >> 42) & 0x1);
+ if (genuine_intel) {
+ get_msr(cpu, msr_offset, &msr);
+
+ hwp_req->hwp_min = msr_perf_2_ratio((((msr) >> 0) & 0xff));
+ hwp_req->hwp_max = msr_perf_2_ratio((((msr) >> 8) & 0xff));
+ hwp_req->hwp_desired = msr_perf_2_ratio((((msr) >> 16) & 0xff));
+ hwp_req->hwp_epp = (((msr) >> 24) & 0xff);
+ hwp_req->hwp_window = (((msr) >> 32) & 0x3ff);
+ hwp_req->hwp_use_pkg = (((msr) >> 42) & 0x1);
+ } else if (authentic_amd) {
+ ret = amd_get_msr(cpu, msr_offset, (unsigned long *)(&msr));
+ if (ret < 0)
+ errx(-1, "failed to get msr with return %d", ret);
+ hwp_req->hwp_min = msr_perf_2_ratio((((msr) >> 8) & 0xff));
+ hwp_req->hwp_max = msr_perf_2_ratio((((msr) >> 0) & 0xff));
+
+ hwp_req->hwp_desired = msr_perf_2_ratio((((msr) >> 16) & 0xff));
+ hwp_req->hwp_epp = (((msr) >> 24) & 0xff);
+ }
}

void write_hwp_request(int cpu, struct msr_hwp_request *hwp_req, unsigned int msr_offset)
@@ -895,18 +964,28 @@ int print_cpu_msrs(int cpu)
struct msr_hwp_cap cap;
int epb;

- epb = get_epb(cpu);
- if (epb >= 0)
- printf("cpu%d: EPB %u\n", cpu, (unsigned int) epb);
+ if (genuine_intel) {
+ epb = get_epb(cpu);
+ if (epb >= 0)
+ printf("cpu%d: EPB %u\n", cpu, (unsigned int) epb);
+ }

if (!has_hwp)
return 0;

- read_hwp_request(cpu, &req, MSR_HWP_REQUEST);
- print_hwp_request(cpu, &req, "");
+ if (genuine_intel) {
+ read_hwp_request(cpu, &req, MSR_HWP_REQUEST);
+ print_hwp_request(cpu, &req, "");

- read_hwp_cap(cpu, &cap, MSR_HWP_CAPABILITIES);
- print_hwp_cap(cpu, &cap, "");
+ read_hwp_cap(cpu, &cap, MSR_HWP_CAPABILITIES);
+ print_hwp_cap(cpu, &cap, "");
+ } else if (authentic_amd) {
+ read_hwp_request(cpu, &req, MSR_AMD_CPPC_REQ);//MSR_HWP_REQUEST
+ print_hwp_request(cpu, &req, "");
+
+ read_hwp_cap(cpu, &cap, MSR_AMD_CPPC_CAP1);//MSR_HWP_CAPABILITIES
+ print_hwp_cap(cpu, &cap, "");
+ }

return 0;
}
@@ -1330,12 +1409,19 @@ void init_data_structures(void)
void verify_hwp_is_enabled(void)
{
unsigned long long msr;
+ int ret;

if (!has_hwp) /* set in early_cpuid() */
return;

/* MSR_PM_ENABLE[1] == 1 if HWP is enabled and MSRs visible */
- get_msr(base_cpu, MSR_PM_ENABLE, &msr);
+ if (genuine_intel)
+ get_msr(base_cpu, MSR_PM_ENABLE, &msr);
+ else if (authentic_amd) {
+ ret = amd_get_msr(base_cpu, MSR_AMD_CPPC_ENABLE, (unsigned long *)(&msr));
+ if (ret < 0)
+ errx(-1, "failed to get msr with return %d", ret);
+ }
if ((msr & 1) == 0) {
fprintf(stderr, "HWP can be enabled using '--hwp-enable'\n");
has_hwp = 0;
@@ -1398,6 +1484,17 @@ static void get_cpuid_or_exit(unsigned int leaf,
errx(1, "Processor not supported\n");
}

+static void amd_get_cpuid_or_exit(unsigned int leaf,
+ unsigned int *eax, unsigned int *ebx,
+ unsigned int *ecx, unsigned int *edx)
+{
+ unsigned int leaf_index;
+
+ leaf_index = leaf | 0x80000000;
+ if (!__get_cpuid(leaf_index, eax, ebx, ecx, edx))
+ errx(1, "Processor not supported\n");
+}
+
/*
* early_cpuid()
* initialize turbo_is_enabled, has_hwp, has_epb
@@ -1408,24 +1505,39 @@ void early_cpuid(void)
unsigned int eax, ebx, ecx, edx;
unsigned int fms, family, model;

- get_cpuid_or_exit(1, &fms, &ebx, &ecx, &edx);
- family = (fms >> 8) & 0xf;
- model = (fms >> 4) & 0xf;
- if (family == 6 || family == 0xf)
- model += ((fms >> 16) & 0xf) << 4;
+ eax = ebx = ecx = edx = 0;
+ __cpuid(0, max_level, ebx, ecx, edx);
+ if (ebx == 0x756e6547 && ecx == 0x6c65746e && edx == 0x49656e69)
+ genuine_intel = 1;
+ else if (ebx == 0x68747541 && ecx == 0x444d4163 && edx == 0x69746e65)
+ authentic_amd = 1;

- if (model == 0x4F) {
- unsigned long long msr;
+ if (genuine_intel) {
+ get_cpuid_or_exit(1, &fms, &ebx, &ecx, &edx);
+ family = (fms >> 8) & 0xf;
+ model = (fms >> 4) & 0xf;
+ if (family == 6 || family == 0xf)
+ model += ((fms >> 16) & 0xf) << 4;

- get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr);
+ if (model == 0x4F) {
+ unsigned long long msr;

- bdx_highest_ratio = msr & 0xFF;
- }
+ get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr);
+
+ bdx_highest_ratio = msr & 0xFF;
+ }

- get_cpuid_or_exit(0x6, &eax, &ebx, &ecx, &edx);
- turbo_is_enabled = (eax >> 1) & 1;
- has_hwp = (eax >> 7) & 1;
- has_epb = (ecx >> 3) & 1;
+ get_cpuid_or_exit(0x6, &eax, &ebx, &ecx, &edx);
+ turbo_is_enabled = (eax >> 1) & 1;
+ has_hwp = (eax >> 7) & 1;
+ has_epb = (ecx >> 3) & 1;
+ } else if (authentic_amd) {
+ /* AMD Processors CPUID info */
+ amd_get_cpuid_or_exit(0x8, &eax, &ebx, &ecx, &edx);
+ turbo_is_enabled = (eax >> 1) & 1;
+ has_hwp = (ebx >> 27) & 1;
+ has_hwp_epp = (ebx >> 27) & 1;
+ }
}

/*
@@ -1444,6 +1556,8 @@ void parse_cpuid(void)

if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
genuine_intel = 1;
+ else if (ebx == 0x68747541 && ecx == 0x444d4163 && edx == 0x69746e65)
+ authentic_amd = 1;

if (debug)
fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ",
@@ -1456,6 +1570,11 @@ void parse_cpuid(void)
if (family == 6 || family == 0xf)
model += ((fms >> 16) & 0xf) << 4;

+ if (authentic_amd) {
+ if (family == 0xf)
+ family += (fms >> 20) & 0xff;
+ }
+
if (debug) {
fprintf(stderr, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n",
max_level, family, model, stepping, family, model, stepping);
@@ -1473,14 +1592,18 @@ void parse_cpuid(void)
if (!(edx & (1 << 5)))
errx(1, "CPUID: no MSR");

-
- get_cpuid_or_exit(0x6, &eax, &ebx, &ecx, &edx);
- /* turbo_is_enabled already set */
- /* has_hwp already set */
- has_hwp_notify = eax & (1 << 8);
- has_hwp_activity_window = eax & (1 << 9);
- has_hwp_epp = eax & (1 << 10);
- has_hwp_request_pkg = eax & (1 << 11);
+ if (genuine_intel) {
+ get_cpuid_or_exit(0x6, &eax, &ebx, &ecx, &edx);
+ /* turbo_is_enabled already set */
+ /* has_hwp already set */
+ has_hwp_notify = eax & (1 << 8);
+ has_hwp_activity_window = eax & (1 << 9);
+ has_hwp_epp = eax & (1 << 10);
+ has_hwp_request_pkg = eax & (1 << 11);
+ } else if (authentic_amd) {
+ amd_get_cpuid_or_exit(0x8, &eax, &ebx, &ecx, &edx);
+ has_hwp_epp = (ebx >> 27) & 1;
+ }

if (!has_hwp_request_pkg && update_hwp_use_pkg)
errx(1, "--hwp-use-pkg is not available on this hardware");
--
2.34.1



2024-01-30 20:04:59

by Mario Limonciello

[permalink] [raw]
Subject: Re: [PATCH Resend 1/8] tools/power x86_energy_perf_policy: add info show support for AMD Pstate EPP driver

On 1/29/2024 23:56, Perry Yuan wrote:
> From: Perry Yuan <[email protected]>
>
> With the amd pstate epp driver implemented, the x86_energy_perf_policy
> will need to implemented the utility support to display hardware energy
> and performance policy hint information on the AMD processors.
>
> Signed-off-by: Perry Yuan <[email protected]>
> ---
> .../x86_energy_perf_policy.c | 211 ++++++++++++++----
> 1 file changed, 167 insertions(+), 44 deletions(-)
>
> diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
> index 5fd9e594079c..5daf1c2bb601 100644
> --- a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
> +++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
> @@ -76,6 +76,8 @@ unsigned long long pkg_selected_set;
> cpu_set_t *cpu_present_set;
> cpu_set_t *cpu_selected_set;
> int genuine_intel;
> +unsigned int authentic_amd;
> +unsigned int max_level;
>
> size_t cpu_setsize;
>
> @@ -724,6 +726,53 @@ int put_msr(int cpu, int offset, unsigned long long new_msr)
> return 0;
> }
>
> +static int amd_put_msr(int cpu, off_t offset, unsigned long msr)
> +{
> + ssize_t retval;
> + int fd;
> + char pathname[32];
> +
> + sprintf(pathname, "/dev/cpu/%d/msr", cpu);
> + fd = open(pathname, O_RDWR);
> + if (fd < 0) {
> + err(-EACCES, "%s open failed, try chown or chmod +r /dev/cpu/*/msr, or run as root", pathname);
> + goto out;
> + }
> + retval = pwrite(fd, &msr, sizeof(msr), offset);
> + if (retval != sizeof(msr))
> + err(-EFAULT, "cpu%d: msr offset 0x%lx write failed ret = %ld fd = %d", cpu, (unsigned long)offset, retval, fd);
> +
> + if (debug > 1)
> + fprintf(stderr, "amd_put_msr(cpu%d, 0x%lx, 0x%lX)\n", cpu, offset, msr);
> +
> + close(fd);
> +
> +out:
> + return (retval == sizeof(msr)) ? 0 : -1;;
> +}
> +
> +
> +static int amd_get_msr(int cpu, off_t offset, unsigned long *msr)
> +{
> + ssize_t retval;
> + char pathname[32];
> + int fd;
> +
> + sprintf(pathname, "/dev/cpu/%d/msr", cpu);
> + fd = open(pathname, O_RDONLY);
> + if (fd < 0) {
> + err(-EACCES, "%s open failed, try chown or chmod +r /dev/cpu/*/msr, or run as root", pathname);
> + goto out;
> + }
> + retval = pread(fd, msr, sizeof(*msr), offset);
> + if (retval != sizeof *msr)
> + err(-EFAULT, "cpu%d: msr offset 0x%llx read failed", cpu, (unsigned long long)offset);
> +
> + close(fd);
> +out:
> + return (retval == sizeof *msr) ? 0 : -1;;
> +}
> +

I don't see a reason that the existing put_msr/get_msr can't be rsued on
AMD side too. Did I miss something?

> static unsigned int read_sysfs(const char *path, char *buf, size_t buflen)
> {
> ssize_t numread;
> @@ -777,13 +826,21 @@ void print_hwp_cap(int cpu, struct msr_hwp_cap *cap, char *str)
> void read_hwp_cap(int cpu, struct msr_hwp_cap *cap, unsigned int msr_offset)
> {
> unsigned long long msr;
> + int ret;
>
> - get_msr(cpu, msr_offset, &msr);
> -
> - cap->highest = msr_perf_2_ratio(HWP_HIGHEST_PERF(msr));
> - cap->guaranteed = msr_perf_2_ratio(HWP_GUARANTEED_PERF(msr));
> - cap->efficient = msr_perf_2_ratio(HWP_MOSTEFFICIENT_PERF(msr));
> - cap->lowest = msr_perf_2_ratio(HWP_LOWEST_PERF(msr));
> + if (genuine_intel) {
> + get_msr(cpu, msr_offset, &msr);
> + cap->highest = msr_perf_2_ratio(HWP_HIGHEST_PERF(msr));
> + cap->guaranteed = msr_perf_2_ratio(HWP_GUARANTEED_PERF(msr));
> + cap->efficient = msr_perf_2_ratio(HWP_MOSTEFFICIENT_PERF(msr));
> + cap->lowest = msr_perf_2_ratio(HWP_LOWEST_PERF(msr));
> + } else if (authentic_amd) {
> + ret = amd_get_msr(cpu, msr_offset, (unsigned long *)(&msr));
> + if (ret < 0)
> + errx(-1, "failed to get msr with return %d", ret);
> + cap->highest = msr_perf_2_ratio(AMD_CPPC_HIGHEST_PERF(msr));
> + cap->lowest = msr_perf_2_ratio(AMD_CPPC_LOWEST_PERF(msr));
> + }
> }
>
> void print_hwp_request(int cpu, struct msr_hwp_request *h, char *str)
> @@ -812,15 +869,27 @@ void print_hwp_request_pkg(int pkg, struct msr_hwp_request *h, char *str)
> void read_hwp_request(int cpu, struct msr_hwp_request *hwp_req, unsigned int msr_offset)
> {
> unsigned long long msr;
> + int ret;
>
> - get_msr(cpu, msr_offset, &msr);
> -
> - hwp_req->hwp_min = msr_perf_2_ratio((((msr) >> 0) & 0xff));
> - hwp_req->hwp_max = msr_perf_2_ratio((((msr) >> 8) & 0xff));
> - hwp_req->hwp_desired = msr_perf_2_ratio((((msr) >> 16) & 0xff));
> - hwp_req->hwp_epp = (((msr) >> 24) & 0xff);
> - hwp_req->hwp_window = (((msr) >> 32) & 0x3ff);
> - hwp_req->hwp_use_pkg = (((msr) >> 42) & 0x1);
> + if (genuine_intel) {
> + get_msr(cpu, msr_offset, &msr);
> +
> + hwp_req->hwp_min = msr_perf_2_ratio((((msr) >> 0) & 0xff));
> + hwp_req->hwp_max = msr_perf_2_ratio((((msr) >> 8) & 0xff));
> + hwp_req->hwp_desired = msr_perf_2_ratio((((msr) >> 16) & 0xff));
> + hwp_req->hwp_epp = (((msr) >> 24) & 0xff);
> + hwp_req->hwp_window = (((msr) >> 32) & 0x3ff);
> + hwp_req->hwp_use_pkg = (((msr) >> 42) & 0x1);
> + } else if (authentic_amd) {
> + ret = amd_get_msr(cpu, msr_offset, (unsigned long *)(&msr));
> + if (ret < 0)
> + errx(-1, "failed to get msr with return %d", ret);
> + hwp_req->hwp_min = msr_perf_2_ratio((((msr) >> 8) & 0xff));
> + hwp_req->hwp_max = msr_perf_2_ratio((((msr) >> 0) & 0xff));
> +
> + hwp_req->hwp_desired = msr_perf_2_ratio((((msr) >> 16) & 0xff));
> + hwp_req->hwp_epp = (((msr) >> 24) & 0xff);
> + }
> }
>
> void write_hwp_request(int cpu, struct msr_hwp_request *hwp_req, unsigned int msr_offset)
> @@ -895,18 +964,28 @@ int print_cpu_msrs(int cpu)
> struct msr_hwp_cap cap;
> int epb;
>
> - epb = get_epb(cpu);
> - if (epb >= 0)
> - printf("cpu%d: EPB %u\n", cpu, (unsigned int) epb);
> + if (genuine_intel) {
> + epb = get_epb(cpu);
> + if (epb >= 0)
> + printf("cpu%d: EPB %u\n", cpu, (unsigned int) epb);
> + }
>
> if (!has_hwp)
> return 0;
>
> - read_hwp_request(cpu, &req, MSR_HWP_REQUEST);
> - print_hwp_request(cpu, &req, "");
> + if (genuine_intel) {
> + read_hwp_request(cpu, &req, MSR_HWP_REQUEST);
> + print_hwp_request(cpu, &req, "");
>
> - read_hwp_cap(cpu, &cap, MSR_HWP_CAPABILITIES);
> - print_hwp_cap(cpu, &cap, "");
> + read_hwp_cap(cpu, &cap, MSR_HWP_CAPABILITIES);
> + print_hwp_cap(cpu, &cap, "");
> + } else if (authentic_amd) {
> + read_hwp_request(cpu, &req, MSR_AMD_CPPC_REQ);//MSR_HWP_REQUEST
> + print_hwp_request(cpu, &req, "");
> +
> + read_hwp_cap(cpu, &cap, MSR_AMD_CPPC_CAP1);//MSR_HWP_CAPABILITIES
> + print_hwp_cap(cpu, &cap, "");
> + }
>
> return 0;
> }
> @@ -1330,12 +1409,19 @@ void init_data_structures(void)
> void verify_hwp_is_enabled(void)
> {
> unsigned long long msr;
> + int ret;
>
> if (!has_hwp) /* set in early_cpuid() */
> return;
>
> /* MSR_PM_ENABLE[1] == 1 if HWP is enabled and MSRs visible */
> - get_msr(base_cpu, MSR_PM_ENABLE, &msr);
> + if (genuine_intel)
> + get_msr(base_cpu, MSR_PM_ENABLE, &msr);
> + else if (authentic_amd) {
> + ret = amd_get_msr(base_cpu, MSR_AMD_CPPC_ENABLE, (unsigned long *)(&msr));
> + if (ret < 0)
> + errx(-1, "failed to get msr with return %d", ret);
> + }
> if ((msr & 1) == 0) {
> fprintf(stderr, "HWP can be enabled using '--hwp-enable'\n");
> has_hwp = 0;
> @@ -1398,6 +1484,17 @@ static void get_cpuid_or_exit(unsigned int leaf,
> errx(1, "Processor not supported\n");
> }
>
> +static void amd_get_cpuid_or_exit(unsigned int leaf,
> + unsigned int *eax, unsigned int *ebx,
> + unsigned int *ecx, unsigned int *edx)
> +{
> + unsigned int leaf_index;
> +
> + leaf_index = leaf | 0x80000000;
> + if (!__get_cpuid(leaf_index, eax, ebx, ecx, edx))
> + errx(1, "Processor not supported\n");
> +}
> +
> /*
> * early_cpuid()
> * initialize turbo_is_enabled, has_hwp, has_epb
> @@ -1408,24 +1505,39 @@ void early_cpuid(void)
> unsigned int eax, ebx, ecx, edx;
> unsigned int fms, family, model;
>
> - get_cpuid_or_exit(1, &fms, &ebx, &ecx, &edx);
> - family = (fms >> 8) & 0xf;
> - model = (fms >> 4) & 0xf;
> - if (family == 6 || family == 0xf)
> - model += ((fms >> 16) & 0xf) << 4;
> + eax = ebx = ecx = edx = 0;
> + __cpuid(0, max_level, ebx, ecx, edx);
> + if (ebx == 0x756e6547 && ecx == 0x6c65746e && edx == 0x49656e69)
> + genuine_intel = 1;
> + else if (ebx == 0x68747541 && ecx == 0x444d4163 && edx == 0x69746e65)
> + authentic_amd = 1;
>
> - if (model == 0x4F) {
> - unsigned long long msr;
> + if (genuine_intel) {
> + get_cpuid_or_exit(1, &fms, &ebx, &ecx, &edx);
> + family = (fms >> 8) & 0xf;
> + model = (fms >> 4) & 0xf;
> + if (family == 6 || family == 0xf)
> + model += ((fms >> 16) & 0xf) << 4;
>
> - get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr);
> + if (model == 0x4F) {
> + unsigned long long msr;
>
> - bdx_highest_ratio = msr & 0xFF;
> - }
> + get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr);
> +
> + bdx_highest_ratio = msr & 0xFF;
> + }
>
> - get_cpuid_or_exit(0x6, &eax, &ebx, &ecx, &edx);
> - turbo_is_enabled = (eax >> 1) & 1;
> - has_hwp = (eax >> 7) & 1;
> - has_epb = (ecx >> 3) & 1;
> + get_cpuid_or_exit(0x6, &eax, &ebx, &ecx, &edx);
> + turbo_is_enabled = (eax >> 1) & 1;
> + has_hwp = (eax >> 7) & 1;
> + has_epb = (ecx >> 3) & 1;
> + } else if (authentic_amd) {
> + /* AMD Processors CPUID info */
> + amd_get_cpuid_or_exit(0x8, &eax, &ebx, &ecx, &edx);
> + turbo_is_enabled = (eax >> 1) & 1;
> + has_hwp = (ebx >> 27) & 1;
> + has_hwp_epp = (ebx >> 27) & 1;
> + }
> }
>
> /*
> @@ -1444,6 +1556,8 @@ void parse_cpuid(void)
>
> if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
> genuine_intel = 1;
> + else if (ebx == 0x68747541 && ecx == 0x444d4163 && edx == 0x69746e65)
> + authentic_amd = 1;
>
> if (debug)
> fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ",
> @@ -1456,6 +1570,11 @@ void parse_cpuid(void)
> if (family == 6 || family == 0xf)
> model += ((fms >> 16) & 0xf) << 4;
>
> + if (authentic_amd) {
> + if (family == 0xf)
> + family += (fms >> 20) & 0xff;
> + }
> +
> if (debug) {
> fprintf(stderr, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n",
> max_level, family, model, stepping, family, model, stepping);
> @@ -1473,14 +1592,18 @@ void parse_cpuid(void)
> if (!(edx & (1 << 5)))
> errx(1, "CPUID: no MSR");
>
> -
> - get_cpuid_or_exit(0x6, &eax, &ebx, &ecx, &edx);
> - /* turbo_is_enabled already set */
> - /* has_hwp already set */
> - has_hwp_notify = eax & (1 << 8);
> - has_hwp_activity_window = eax & (1 << 9);
> - has_hwp_epp = eax & (1 << 10);
> - has_hwp_request_pkg = eax & (1 << 11);
> + if (genuine_intel) {
> + get_cpuid_or_exit(0x6, &eax, &ebx, &ecx, &edx);
> + /* turbo_is_enabled already set */
> + /* has_hwp already set */
> + has_hwp_notify = eax & (1 << 8);
> + has_hwp_activity_window = eax & (1 << 9);
> + has_hwp_epp = eax & (1 << 10);
> + has_hwp_request_pkg = eax & (1 << 11);
> + } else if (authentic_amd) {
> + amd_get_cpuid_or_exit(0x8, &eax, &ebx, &ecx, &edx);
> + has_hwp_epp = (ebx >> 27) & 1;
> + }
>
> if (!has_hwp_request_pkg && update_hwp_use_pkg)
> errx(1, "--hwp-use-pkg is not available on this hardware");