2024-02-03 11:17:26

by Zhao Liu

[permalink] [raw]
Subject: [RFC 02/26] thermal: intel: hfi: Add helpers to build HFI/ITD structures

From: Zhuocheng Ding <[email protected]>

Virtual machines need to compose their own HFI tables. Provide helper
functions that collect the relevant features and data from the host
machine.

Tested-by: Yanting Jiang <[email protected]>
Signed-off-by: Zhuocheng Ding <[email protected]>
Co-developed-by: Zhao Liu <[email protected]>
Signed-off-by: Zhao Liu <[email protected]>
---
arch/x86/include/asm/hfi.h | 20 ++++
drivers/thermal/intel/intel_hfi.c | 149 ++++++++++++++++++++++++++++++
2 files changed, 169 insertions(+)

diff --git a/arch/x86/include/asm/hfi.h b/arch/x86/include/asm/hfi.h
index b7fda3e0e8c8..e0fe5b30fb53 100644
--- a/arch/x86/include/asm/hfi.h
+++ b/arch/x86/include/asm/hfi.h
@@ -82,4 +82,24 @@ struct hfi_features {
unsigned int hdr_size;
};

+#if defined(CONFIG_INTEL_HFI_THERMAL)
+int intel_hfi_max_instances(void);
+int intel_hfi_build_virt_features(struct hfi_features *features, unsigned int nr_classes,
+ unsigned int nr_entries);
+int intel_hfi_build_virt_table(struct hfi_table *table, struct hfi_features *features,
+ unsigned int nr_classes, unsigned int hfi_index,
+ unsigned int cpu);
+static inline bool intel_hfi_enabled(void) { return intel_hfi_max_instances() > 0; }
+#else
+static inline int intel_hfi_max_instances(void) { return 0; }
+static inline int intel_hfi_build_virt_features(struct hfi_features *features,
+ unsigned int nr_classes,
+ unsigned int nr_entries) { return 0; }
+static inline int intel_hfi_build_virt_table(struct hfi_table *table,
+ struct hfi_features *features,
+ unsigned int nr_classes, unsigned int hfi_index,
+ unsigned int cpu) { return 0; }
+static inline bool intel_hfi_enabled(void) { return false; }
+#endif
+
#endif /* _ASM_X86_HFI_H */
diff --git a/drivers/thermal/intel/intel_hfi.c b/drivers/thermal/intel/intel_hfi.c
index b69fa234b317..139ce2d4b26b 100644
--- a/drivers/thermal/intel/intel_hfi.c
+++ b/drivers/thermal/intel/intel_hfi.c
@@ -29,6 +29,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/math.h>
+#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/percpu-defs.h>
#include <linux/printk.h>
@@ -642,3 +643,151 @@ void __init intel_hfi_init(void)
kfree(hfi_instances);
hfi_instances = NULL;
}
+
+/**
+ * intel_hfi_max_instances() - Get the maximum number of hfi instances.
+ *
+ * Return: the maximum number of hfi instances.
+ */
+int intel_hfi_max_instances(void)
+{
+ return max_hfi_instances;
+}
+EXPORT_SYMBOL_GPL(intel_hfi_max_instances);
+
+/**
+ * intel_hfi_build_virt_features() - Build a virtual hfi_features structure.
+ *
+ * @features: Feature structure need to be filled
+ * @nr_classes: Maximum number of classes supported. 1 class indicates
+ * only HFI feature is configured and 4 classes indicates
+ * both HFI and ITD features.
+ * @nr_entries: Number of HFI entries in HFI table.
+ *
+ * Fill a virtual hfi_features structure which is used for HFI/ITD virtualization.
+ * HFI and ITD have different feature information, and the virtual feature
+ * structure is based on the corresponding configured number of classes (in Guest
+ * CPUID) to be built.
+ *
+ * Return: -EINVAL if there's the error for the parameters, otherwise 0.
+ */
+int intel_hfi_build_virt_features(struct hfi_features *features,
+ unsigned int nr_classes,
+ unsigned int nr_entries)
+{
+ unsigned int data_size;
+
+ if (!features || !nr_classes || !nr_entries)
+ return -EINVAL;
+
+ /*
+ * The virtual feature must be based on the Host's feature; when Host
+ * enables both HFI and ITD, it is allowed for Guest to create only the
+ * HFI feature structure which has fewer classes than ITD.
+ */
+ if (nr_classes > hfi_features.nr_classes)
+ return -EINVAL;
+
+ features->nr_classes = nr_classes;
+ features->class_stride = hfi_features.class_stride;
+ /*
+ * For the meaning of these two calculations, please refer to the comments
+ * in hfi_parse_features().
+ */
+ features->hdr_size = DIV_ROUND_UP(features->class_stride *
+ features->nr_classes, 8) * 8;
+ features->cpu_stride = DIV_ROUND_UP(features->class_stride *
+ features->nr_classes, 8) * 8;
+
+ data_size = features->hdr_size + nr_entries * features->cpu_stride;
+ features->nr_table_pages = PAGE_ALIGN(data_size) >> PAGE_SHIFT;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(intel_hfi_build_virt_features);
+
+/**
+ * intel_hfi_build_virt_table() - Fill the data of @hfi_index in virtual HFI table.
+ *
+ * @table: HFI table to be filled
+ * @features: Configured feature information of the HFI table
+ * @nr_classes: Number of classes to be updated for @table. This field is
+ * based on the enabled feature, which may be different with
+ * the feature information configured in @features.
+ * @hfi_index: Index of the HFI data in HFI table to be filled
+ * @cpu: CPU whose real HFI data is used to fill the @hfi_index
+ *
+ * Fill the row data of hfi_index in a virtual HFI table which is used for HFI/ITD
+ * virtualization. The size of the virtual HFI table is decided by the configured
+ * feature information in @features, and the filled HFI data range is decided by
+ * specified number of classes @nr_classes.
+ *
+ * Virtual machine may disable ITD at runtime through MSR_IA32_HW_FEEDBACK_CONFIG,
+ * in this case, only 1 class data (class 0) can be dynamically updated in virtual
+ * HFI table (class 0).
+ *
+ * Return: 1 if the @table is changed, 0 if the @table isn't changed, and
+ * -EINVAL/-ENOMEM if there's the error for the parameters.
+ */
+int intel_hfi_build_virt_table(struct hfi_table *table,
+ struct hfi_features *features,
+ unsigned int nr_classes,
+ unsigned int hfi_index,
+ unsigned int cpu)
+{
+ struct hfi_instance *hfi_instance;
+ struct hfi_hdr *hfi_hdr = table->hdr;
+ s16 host_hfi_index;
+ void *src_ptr, *dst_ptr;
+ int table_changed = 0;
+
+ if (!table || !features || !nr_classes)
+ return -EINVAL;
+
+ if (nr_classes > features->nr_classes ||
+ nr_classes > hfi_features.nr_classes)
+ return -EINVAL;
+
+ /*
+ * Make sure that this raw that will be filled doesn't cause overflow.
+ * features->nr_classes indicates the maximum number of possible
+ * classes.
+ */
+ if (features->hdr_size + (hfi_index + 1) * features->cpu_stride >
+ features->nr_table_pages << PAGE_SHIFT)
+ return -ENOMEM;
+
+ if (cpu >= nr_cpu_ids)
+ return -EINVAL;
+
+ if (features->class_stride != hfi_features.class_stride)
+ return -EINVAL;
+
+ hfi_instance = per_cpu(hfi_cpu_info, cpu).hfi_instance;
+ host_hfi_index = per_cpu(hfi_cpu_info, cpu).index;
+
+ src_ptr = hfi_instance->local_table.data +
+ host_hfi_index * hfi_features.cpu_stride;
+ dst_ptr = table->data + hfi_index * features->cpu_stride;
+
+ raw_spin_lock_irq(&hfi_instance->table_lock);
+ for (int i = 0; i < nr_classes; i++) {
+ struct hfi_cpu_data *src = src_ptr + i * hfi_features.class_stride;
+ struct hfi_cpu_data *dst = dst_ptr + i * features->class_stride;
+
+ if (dst->perf_cap != src->perf_cap) {
+ dst->perf_cap = src->perf_cap;
+ hfi_hdr->perf_updated = 1;
+ }
+ if (dst->ee_cap != src->ee_cap) {
+ dst->ee_cap = src->ee_cap;
+ hfi_hdr->ee_updated = 1;
+ }
+ if (hfi_hdr->perf_updated || hfi_hdr->ee_updated)
+ table_changed = 1;
+ hfi_hdr++;
+ }
+ raw_spin_unlock_irq(&hfi_instance->table_lock);
+
+ return table_changed;
+}
+EXPORT_SYMBOL_GPL(intel_hfi_build_virt_table);
--
2.34.1