2024-06-04 14:36:00

by James Clark

[permalink] [raw]
Subject: [PATCH v2 12/16] coresight: Make CPU id map a property of a trace ID map

The global CPU ID mappings won't work for per-sink ID maps so move it to
the ID map struct. coresight_trace_id_release_all_pending() is hard
coded to operate on the default map, but once Perf sessions use their
own maps the pending release mechanism will be deleted. So it doesn't
need to be extended to accept a trace ID map argument at this point.

Signed-off-by: James Clark <[email protected]>
---
drivers/hwtracing/coresight/coresight-trace-id.c | 16 +++++++++-------
include/linux/coresight.h | 1 +
2 files changed, 10 insertions(+), 7 deletions(-)

diff --git a/drivers/hwtracing/coresight/coresight-trace-id.c b/drivers/hwtracing/coresight/coresight-trace-id.c
index 5561989a03fa..8a777c0af6ea 100644
--- a/drivers/hwtracing/coresight/coresight-trace-id.c
+++ b/drivers/hwtracing/coresight/coresight-trace-id.c
@@ -13,10 +13,12 @@
#include "coresight-trace-id.h"

/* Default trace ID map. Used in sysfs mode and for system sources */
-static struct coresight_trace_id_map id_map_default;
+static DEFINE_PER_CPU(atomic_t, id_map_default_cpu_ids) = ATOMIC_INIT(0);
+static struct coresight_trace_id_map id_map_default = {
+ .cpu_map = &id_map_default_cpu_ids
+};

-/* maintain a record of the mapping of IDs and pending releases per cpu */
-static DEFINE_PER_CPU(atomic_t, cpu_id) = ATOMIC_INIT(0);
+/* maintain a record of the pending releases per cpu */
static cpumask_t cpu_id_release_pending;

/* perf session active counter */
@@ -49,7 +51,7 @@ static void coresight_trace_id_dump_table(struct coresight_trace_id_map *id_map,
/* unlocked read of current trace ID value for given CPU */
static int _coresight_trace_id_read_cpu_id(int cpu, struct coresight_trace_id_map *id_map)
{
- return atomic_read(&per_cpu(cpu_id, cpu));
+ return atomic_read(per_cpu_ptr(id_map->cpu_map, cpu));
}

/* look for next available odd ID, return 0 if none found */
@@ -145,7 +147,7 @@ static void coresight_trace_id_release_all_pending(void)
clear_bit(bit, id_map->pend_rel_ids);
}
for_each_cpu(cpu, &cpu_id_release_pending) {
- atomic_set(&per_cpu(cpu_id, cpu), 0);
+ atomic_set(per_cpu_ptr(id_map_default.cpu_map, cpu), 0);
cpumask_clear_cpu(cpu, &cpu_id_release_pending);
}
spin_unlock_irqrestore(&id_map_lock, flags);
@@ -181,7 +183,7 @@ static int _coresight_trace_id_get_cpu_id(int cpu, struct coresight_trace_id_map
goto get_cpu_id_out_unlock;

/* allocate the new id to the cpu */
- atomic_set(&per_cpu(cpu_id, cpu), id);
+ atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), id);

get_cpu_id_clr_pend:
/* we are (re)using this ID - so ensure it is not marked for release */
@@ -215,7 +217,7 @@ static void _coresight_trace_id_put_cpu_id(int cpu, struct coresight_trace_id_ma
} else {
/* otherwise clear id */
coresight_trace_id_free(id, id_map);
- atomic_set(&per_cpu(cpu_id, cpu), 0);
+ atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), 0);
}

spin_unlock_irqrestore(&id_map_lock, flags);
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index c16c61a8411d..7d62b88bfb5c 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -234,6 +234,7 @@ struct coresight_sysfs_link {
struct coresight_trace_id_map {
DECLARE_BITMAP(used_ids, CORESIGHT_TRACE_IDS_MAX);
DECLARE_BITMAP(pend_rel_ids, CORESIGHT_TRACE_IDS_MAX);
+ atomic_t __percpu *cpu_map;
};

/**
--
2.34.1