Dan Williams suggested changing the struct 'node_hmem_attrs' to
'access_coordinates' [1]. The struct is a container of r/w-latency and
r/w-bandwidth numbers. Moving forward, this container will also be used by
CXL to store the performance characteristics of each link hop in
the PCIE/CXL topology. So, where node_hmem_attrs is just the access
parameters of a memory-node, access_coordinates applies more broadly
to hardware topology characteristics.
[1]: http://lore.kernel.org/r/[email protected]/
Suggested-by: Dan Williams <[email protected]>
Reviewed-by: Dan Williams <[email protected]>
Signed-off-by: Dave Jiang <[email protected]>
---
drivers/acpi/numa/hmat.c | 20 ++++++++++----------
drivers/base/node.c | 12 ++++++------
include/linux/node.h | 8 ++++----
3 files changed, 20 insertions(+), 20 deletions(-)
diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c
index bba268ecd802..f9ff992038fa 100644
--- a/drivers/acpi/numa/hmat.c
+++ b/drivers/acpi/numa/hmat.c
@@ -62,7 +62,7 @@ struct memory_target {
unsigned int memory_pxm;
unsigned int processor_pxm;
struct resource memregions;
- struct node_hmem_attrs hmem_attrs[2];
+ struct access_coordinate coord[2];
struct list_head caches;
struct node_cache_attrs cache_attrs;
bool registered;
@@ -227,24 +227,24 @@ static void hmat_update_target_access(struct memory_target *target,
{
switch (type) {
case ACPI_HMAT_ACCESS_LATENCY:
- target->hmem_attrs[access].read_latency = value;
- target->hmem_attrs[access].write_latency = value;
+ target->coord[access].read_latency = value;
+ target->coord[access].write_latency = value;
break;
case ACPI_HMAT_READ_LATENCY:
- target->hmem_attrs[access].read_latency = value;
+ target->coord[access].read_latency = value;
break;
case ACPI_HMAT_WRITE_LATENCY:
- target->hmem_attrs[access].write_latency = value;
+ target->coord[access].write_latency = value;
break;
case ACPI_HMAT_ACCESS_BANDWIDTH:
- target->hmem_attrs[access].read_bandwidth = value;
- target->hmem_attrs[access].write_bandwidth = value;
+ target->coord[access].read_bandwidth = value;
+ target->coord[access].write_bandwidth = value;
break;
case ACPI_HMAT_READ_BANDWIDTH:
- target->hmem_attrs[access].read_bandwidth = value;
+ target->coord[access].read_bandwidth = value;
break;
case ACPI_HMAT_WRITE_BANDWIDTH:
- target->hmem_attrs[access].write_bandwidth = value;
+ target->coord[access].write_bandwidth = value;
break;
default:
break;
@@ -701,7 +701,7 @@ static void hmat_register_target_cache(struct memory_target *target)
static void hmat_register_target_perf(struct memory_target *target, int access)
{
unsigned mem_nid = pxm_to_node(target->memory_pxm);
- node_set_perf_attrs(mem_nid, &target->hmem_attrs[access], access);
+ node_set_perf_attrs(mem_nid, &target->coord[access], access);
}
static void hmat_register_target_devices(struct memory_target *target)
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 2cada01c70da..fc0444b617d0 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -75,14 +75,14 @@ static BIN_ATTR_RO(cpulist, CPULIST_FILE_MAX_BYTES);
* @dev: Device for this memory access class
* @list_node: List element in the node's access list
* @access: The access class rank
- * @hmem_attrs: Heterogeneous memory performance attributes
+ * @coord: Heterogeneous memory performance coordinates
*/
struct node_access_nodes {
struct device dev;
struct list_head list_node;
unsigned int access;
#ifdef CONFIG_HMEM_REPORTING
- struct node_hmem_attrs hmem_attrs;
+ struct access_coordinate coord;
#endif
};
#define to_access_nodes(dev) container_of(dev, struct node_access_nodes, dev)
@@ -168,7 +168,7 @@ static ssize_t property##_show(struct device *dev, \
char *buf) \
{ \
return sysfs_emit(buf, "%u\n", \
- to_access_nodes(dev)->hmem_attrs.property); \
+ to_access_nodes(dev)->coord.property); \
} \
static DEVICE_ATTR_RO(property)
@@ -188,10 +188,10 @@ static struct attribute *access_attrs[] = {
/**
* node_set_perf_attrs - Set the performance values for given access class
* @nid: Node identifier to be set
- * @hmem_attrs: Heterogeneous memory performance attributes
+ * @coord: Heterogeneous memory performance coordinates
* @access: The access class the for the given attributes
*/
-void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs,
+void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord,
unsigned int access)
{
struct node_access_nodes *c;
@@ -206,7 +206,7 @@ void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs,
if (!c)
return;
- c->hmem_attrs = *hmem_attrs;
+ c->coord = *coord;
for (i = 0; access_attrs[i] != NULL; i++) {
if (sysfs_add_file_to_group(&c->dev.kobj, access_attrs[i],
"initiators")) {
diff --git a/include/linux/node.h b/include/linux/node.h
index 427a5975cf40..25b66d705ee2 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -20,14 +20,14 @@
#include <linux/list.h>
/**
- * struct node_hmem_attrs - heterogeneous memory performance attributes
+ * struct access_coordinate - generic performance coordinates container
*
* @read_bandwidth: Read bandwidth in MB/s
* @write_bandwidth: Write bandwidth in MB/s
* @read_latency: Read latency in nanoseconds
* @write_latency: Write latency in nanoseconds
*/
-struct node_hmem_attrs {
+struct access_coordinate {
unsigned int read_bandwidth;
unsigned int write_bandwidth;
unsigned int read_latency;
@@ -65,7 +65,7 @@ struct node_cache_attrs {
#ifdef CONFIG_HMEM_REPORTING
void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs);
-void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs,
+void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord,
unsigned access);
#else
static inline void node_add_cache(unsigned int nid,
@@ -74,7 +74,7 @@ static inline void node_add_cache(unsigned int nid,
}
static inline void node_set_perf_attrs(unsigned int nid,
- struct node_hmem_attrs *hmem_attrs,
+ struct access_coordinate *coord,
unsigned access)
{
}
On Fri, 05 May 2023 14:34:46 -0700
Dave Jiang <[email protected]> wrote:
> Dan Williams suggested changing the struct 'node_hmem_attrs' to
> 'access_coordinates' [1]. The struct is a container of r/w-latency and
> r/w-bandwidth numbers. Moving forward, this container will also be used by
> CXL to store the performance characteristics of each link hop in
> the PCIE/CXL topology. So, where node_hmem_attrs is just the access
> parameters of a memory-node, access_coordinates applies more broadly
> to hardware topology characteristics.
Not that it hugely matters, but why the term "coordinates"?
Looks like Dan used that term, but I've not come across it being applied
in this circumstances and it isn't a case of being immediately obvious
to me what it means.
If it is just another vague entry in kernel word soup then I don't really
mind the term, but nice to give some reasoning in patch description.
Patch otherwise looks fine to me.
Jonathan
>
> [1]: http://lore.kernel.org/r/[email protected]/
>
> Suggested-by: Dan Williams <[email protected]>
> Reviewed-by: Dan Williams <[email protected]>
> Signed-off-by: Dave Jiang <[email protected]>
> ---
> drivers/acpi/numa/hmat.c | 20 ++++++++++----------
> drivers/base/node.c | 12 ++++++------
> include/linux/node.h | 8 ++++----
> 3 files changed, 20 insertions(+), 20 deletions(-)
>
> diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c
> index bba268ecd802..f9ff992038fa 100644
> --- a/drivers/acpi/numa/hmat.c
> +++ b/drivers/acpi/numa/hmat.c
> @@ -62,7 +62,7 @@ struct memory_target {
> unsigned int memory_pxm;
> unsigned int processor_pxm;
> struct resource memregions;
> - struct node_hmem_attrs hmem_attrs[2];
> + struct access_coordinate coord[2];
> struct list_head caches;
> struct node_cache_attrs cache_attrs;
> bool registered;
> @@ -227,24 +227,24 @@ static void hmat_update_target_access(struct memory_target *target,
> {
> switch (type) {
> case ACPI_HMAT_ACCESS_LATENCY:
> - target->hmem_attrs[access].read_latency = value;
> - target->hmem_attrs[access].write_latency = value;
> + target->coord[access].read_latency = value;
> + target->coord[access].write_latency = value;
> break;
> case ACPI_HMAT_READ_LATENCY:
> - target->hmem_attrs[access].read_latency = value;
> + target->coord[access].read_latency = value;
> break;
> case ACPI_HMAT_WRITE_LATENCY:
> - target->hmem_attrs[access].write_latency = value;
> + target->coord[access].write_latency = value;
> break;
> case ACPI_HMAT_ACCESS_BANDWIDTH:
> - target->hmem_attrs[access].read_bandwidth = value;
> - target->hmem_attrs[access].write_bandwidth = value;
> + target->coord[access].read_bandwidth = value;
> + target->coord[access].write_bandwidth = value;
> break;
> case ACPI_HMAT_READ_BANDWIDTH:
> - target->hmem_attrs[access].read_bandwidth = value;
> + target->coord[access].read_bandwidth = value;
> break;
> case ACPI_HMAT_WRITE_BANDWIDTH:
> - target->hmem_attrs[access].write_bandwidth = value;
> + target->coord[access].write_bandwidth = value;
> break;
> default:
> break;
> @@ -701,7 +701,7 @@ static void hmat_register_target_cache(struct memory_target *target)
> static void hmat_register_target_perf(struct memory_target *target, int access)
> {
> unsigned mem_nid = pxm_to_node(target->memory_pxm);
> - node_set_perf_attrs(mem_nid, &target->hmem_attrs[access], access);
> + node_set_perf_attrs(mem_nid, &target->coord[access], access);
> }
>
> static void hmat_register_target_devices(struct memory_target *target)
> diff --git a/drivers/base/node.c b/drivers/base/node.c
> index 2cada01c70da..fc0444b617d0 100644
> --- a/drivers/base/node.c
> +++ b/drivers/base/node.c
> @@ -75,14 +75,14 @@ static BIN_ATTR_RO(cpulist, CPULIST_FILE_MAX_BYTES);
> * @dev: Device for this memory access class
> * @list_node: List element in the node's access list
> * @access: The access class rank
> - * @hmem_attrs: Heterogeneous memory performance attributes
> + * @coord: Heterogeneous memory performance coordinates
> */
> struct node_access_nodes {
> struct device dev;
> struct list_head list_node;
> unsigned int access;
> #ifdef CONFIG_HMEM_REPORTING
> - struct node_hmem_attrs hmem_attrs;
> + struct access_coordinate coord;
> #endif
> };
> #define to_access_nodes(dev) container_of(dev, struct node_access_nodes, dev)
> @@ -168,7 +168,7 @@ static ssize_t property##_show(struct device *dev, \
> char *buf) \
> { \
> return sysfs_emit(buf, "%u\n", \
> - to_access_nodes(dev)->hmem_attrs.property); \
> + to_access_nodes(dev)->coord.property); \
> } \
> static DEVICE_ATTR_RO(property)
>
> @@ -188,10 +188,10 @@ static struct attribute *access_attrs[] = {
> /**
> * node_set_perf_attrs - Set the performance values for given access class
> * @nid: Node identifier to be set
> - * @hmem_attrs: Heterogeneous memory performance attributes
> + * @coord: Heterogeneous memory performance coordinates
> * @access: The access class the for the given attributes
> */
> -void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs,
> +void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord,
> unsigned int access)
> {
> struct node_access_nodes *c;
> @@ -206,7 +206,7 @@ void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs,
> if (!c)
> return;
>
> - c->hmem_attrs = *hmem_attrs;
> + c->coord = *coord;
> for (i = 0; access_attrs[i] != NULL; i++) {
> if (sysfs_add_file_to_group(&c->dev.kobj, access_attrs[i],
> "initiators")) {
> diff --git a/include/linux/node.h b/include/linux/node.h
> index 427a5975cf40..25b66d705ee2 100644
> --- a/include/linux/node.h
> +++ b/include/linux/node.h
> @@ -20,14 +20,14 @@
> #include <linux/list.h>
>
> /**
> - * struct node_hmem_attrs - heterogeneous memory performance attributes
> + * struct access_coordinate - generic performance coordinates container
> *
> * @read_bandwidth: Read bandwidth in MB/s
> * @write_bandwidth: Write bandwidth in MB/s
> * @read_latency: Read latency in nanoseconds
> * @write_latency: Write latency in nanoseconds
> */
> -struct node_hmem_attrs {
> +struct access_coordinate {
> unsigned int read_bandwidth;
> unsigned int write_bandwidth;
> unsigned int read_latency;
> @@ -65,7 +65,7 @@ struct node_cache_attrs {
>
> #ifdef CONFIG_HMEM_REPORTING
> void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs);
> -void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs,
> +void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord,
> unsigned access);
> #else
> static inline void node_add_cache(unsigned int nid,
> @@ -74,7 +74,7 @@ static inline void node_add_cache(unsigned int nid,
> }
>
> static inline void node_set_perf_attrs(unsigned int nid,
> - struct node_hmem_attrs *hmem_attrs,
> + struct access_coordinate *coord,
> unsigned access)
> {
> }
>
>
Jonathan Cameron wrote:
> On Fri, 05 May 2023 14:34:46 -0700
> Dave Jiang <[email protected]> wrote:
>
> > Dan Williams suggested changing the struct 'node_hmem_attrs' to
> > 'access_coordinates' [1]. The struct is a container of r/w-latency and
> > r/w-bandwidth numbers. Moving forward, this container will also be used by
> > CXL to store the performance characteristics of each link hop in
> > the PCIE/CXL topology. So, where node_hmem_attrs is just the access
> > parameters of a memory-node, access_coordinates applies more broadly
> > to hardware topology characteristics.
>
> Not that it hugely matters, but why the term "coordinates"?
> Looks like Dan used that term, but I've not come across it being applied
> in this circumstances and it isn't a case of being immediately obvious
> to me what it means.
>
> If it is just another vague entry in kernel word soup then I don't really
> mind the term, but nice to give some reasoning in patch description.
The inspiration here was past discussions that have been had about
potential API changes for userspace contending with multiple memory
types. The observation was that seemed like an exercise in having the
application identify "where" it falls on a spectrum of bandwidth and
latency needs.
So it's a tuple of read/write-latency and read/write-bandwidth.
"Coordinates" is not a perfect fit. Sometimes it is just conveying
values in isolation not a "location" relative to other performance
points, but in the end this data is used to identify the performance
operation point of a given memory-node.
On Fri, 12 May 2023 08:58:14 -0700
Dan Williams <[email protected]> wrote:
> Jonathan Cameron wrote:
> > On Fri, 05 May 2023 14:34:46 -0700
> > Dave Jiang <[email protected]> wrote:
> >
> > > Dan Williams suggested changing the struct 'node_hmem_attrs' to
> > > 'access_coordinates' [1]. The struct is a container of r/w-latency and
> > > r/w-bandwidth numbers. Moving forward, this container will also be used by
> > > CXL to store the performance characteristics of each link hop in
> > > the PCIE/CXL topology. So, where node_hmem_attrs is just the access
> > > parameters of a memory-node, access_coordinates applies more broadly
> > > to hardware topology characteristics.
> >
> > Not that it hugely matters, but why the term "coordinates"?
> > Looks like Dan used that term, but I've not come across it being applied
> > in this circumstances and it isn't a case of being immediately obvious
> > to me what it means.
> >
> > If it is just another vague entry in kernel word soup then I don't really
> > mind the term, but nice to give some reasoning in patch description.
>
> The inspiration here was past discussions that have been had about
> potential API changes for userspace contending with multiple memory
> types. The observation was that seemed like an exercise in having the
> application identify "where" it falls on a spectrum of bandwidth and
> latency needs.
>
> So it's a tuple of read/write-latency and read/write-bandwidth.
> "Coordinates" is not a perfect fit. Sometimes it is just conveying
> values in isolation not a "location" relative to other performance
> points, but in the end this data is used to identify the performance
> operation point of a given memory-node.
Works for me. Can we add that to the patch description for the historians?
Having read a load more of the code using it, it now feels natural to me.
Thanks,
Jonathan