This is the 4th attempt to fix the report task state issue in sched
tracepint, you can check out previous discussions here:
v1: https://lore.kernel.org/linux-trace-kernel/[email protected]
v2: https://lore.kernel.org/linux-trace-kernel/[email protected]
v3: https://lore.kernel.org/linux-trace-kernel/[email protected]
Against v3, reorganize commits so that they can be cherry-picked
individually if people indeed have strong opinion over this change.
Note PATCH 1-4 are the normal fixes and cleanup whereas PATCH 5-7
introduce new changes.
--
FYI, this series are designed not to break anything now and still do the
1-1 correspondence int-char mapping for each distinct task state we want to
report, and thus will not lose any details intended for debug purposes. Of
course, this might be compromised because of bugs introduced due to my
stupidity. So your sage comments are very important and appreciated!
--
In the status quo, we should see three different outcomes of the reported
sched-out task state from perf-script, perf-sched-timehist, and Tp_printk
of tracepoint sched_switch. And it's not hard to figure out that the
former two are built upon the third one, and the reason why we see this
inconsistency is that the former two does not catch up with the internal
change of reported task state definitions as the kernel evolves.
IMHO, exporting internal representations of task state in the tracepoint
sched_switch is not a good practice and not encouraged at all, which can
easily break userspace tools that relies on it. Especially when tracepoints
are massively used in many observability tools nowadays due to its stable
nature, which makes them no longer used for debug only purpose and we
should be careful to decide what ought to be reported to userspace and what
ought not.
Therefore, to fix the issues mentioned above for good, I proposed to add
a new variable to report task state in sched_switch with a symbolic char
along with the old hardcoded value, and save the further processing of
userspace tools and spare them from knowing implementation details in the
kernel.
After this patch seires, we report 'RSDTtXZPI' the same as in procfs, plus
a 'p' which denotes PREEMP_ACTIVE and is used for sched_switch tracepoint
only.
Reviews welcome!
Regards,
Ze
--
Ze Gao (5):
perf sched: sync state char array with the kernel
perf sched: reorganize sched-out task state report code
sched, tracing: reorganize fields of switch event struct
sched, tracing: add to report task state in symbolic chars
perf sched: prefer to use prev_state_char introduced in sched_switch
include/trace/events/sched.h | 68 +++++++++++++++++-------------
tools/perf/builtin-sched.c | 82 ++++++++++++++++--------------------
2 files changed, 76 insertions(+), 74 deletions(-)
Ze Gao (2):
libtraceevent: sync state char array with the kernel
libtraceevent: prefer to use prev_state_char introduced in
sched_switch
plugins/plugin_sched_switch.c | 9 +++++++--
1 file changed, 7 insertions(+), 2 deletions(-)
--
2.41.0
Mainly does housekeeping work and not introduce any
functional change.
Signed-off-by: Ze Gao <[email protected]>
---
tools/perf/builtin-sched.c | 57 ++++++++++++++++----------------------
1 file changed, 24 insertions(+), 33 deletions(-)
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 8dc8f071721c..eb310d1a7625 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -94,11 +94,6 @@ struct sched_atom {
#define TASK_STATE_TO_CHAR_STR "RSDTtXZPI"
-/* task state bitmask, copied from include/linux/sched.h */
-#define TASK_RUNNING 0
-#define TASK_INTERRUPTIBLE 1
-#define TASK_UNINTERRUPTIBLE 2
-
enum thread_state {
THREAD_SLEEPING = 0,
THREAD_WAIT_CPU,
@@ -255,7 +250,7 @@ struct thread_runtime {
u64 total_preempt_time;
u64 total_delay_time;
- int last_state;
+ char last_state;
char shortname[3];
bool comm_changed;
@@ -425,7 +420,7 @@ static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *t
}
static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task,
- u64 timestamp, u64 task_state __maybe_unused)
+ u64 timestamp, char task_state __maybe_unused)
{
struct sched_atom *event = get_new_event(task, timestamp);
@@ -840,6 +835,20 @@ replay_wakeup_event(struct perf_sched *sched,
return 0;
}
+static inline char task_state_char(int state)
+{
+ static const char state_to_char[] = "RSDTtXZPI";
+ unsigned bit = state ? ffs(state) : 0;
+ return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
+}
+
+static inline char get_task_prev_state(struct evsel *evsel,
+ struct perf_sample *sample)
+{
+ const int prev_state = evsel__intval(evsel, sample, "prev_state");
+ return task_state_char(prev_state);
+}
+
static int replay_switch_event(struct perf_sched *sched,
struct evsel *evsel,
struct perf_sample *sample,
@@ -849,7 +858,7 @@ static int replay_switch_event(struct perf_sched *sched,
*next_comm = evsel__strval(evsel, sample, "next_comm");
const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
next_pid = evsel__intval(evsel, sample, "next_pid");
- const u64 prev_state = evsel__intval(evsel, sample, "prev_state");
+ const char prev_state = get_task_prev_state(evsel, sample);
struct task_desc *prev, __maybe_unused *next;
u64 timestamp0, timestamp = sample->time;
int cpu = sample->cpu;
@@ -1039,12 +1048,6 @@ static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
return 0;
}
-static char sched_out_state(u64 prev_state)
-{
- const char *str = TASK_STATE_TO_CHAR_STR;
-
- return str[prev_state];
-}
static int
add_sched_out_event(struct work_atoms *atoms,
@@ -1121,7 +1124,7 @@ static int latency_switch_event(struct perf_sched *sched,
{
const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
next_pid = evsel__intval(evsel, sample, "next_pid");
- const u64 prev_state = evsel__intval(evsel, sample, "prev_state");
+ const char prev_state = get_task_prev_state(evsel, sample);
struct work_atoms *out_events, *in_events;
struct thread *sched_out, *sched_in;
u64 timestamp0, timestamp = sample->time;
@@ -1157,7 +1160,7 @@ static int latency_switch_event(struct perf_sched *sched,
goto out_put;
}
}
- if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp))
+ if (add_sched_out_event(out_events, prev_state, timestamp))
return -1;
in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
@@ -2022,24 +2025,12 @@ static void timehist_header(struct perf_sched *sched)
printf("\n");
}
-static char task_state_char(struct thread *thread, int state)
-{
- static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
- unsigned bit = state ? ffs(state) : 0;
-
- /* 'I' for idle */
- if (thread__tid(thread) == 0)
- return 'I';
-
- return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
-}
-
static void timehist_print_sample(struct perf_sched *sched,
struct evsel *evsel,
struct perf_sample *sample,
struct addr_location *al,
struct thread *thread,
- u64 t, int state)
+ u64 t, char state)
{
struct thread_runtime *tr = thread__priv(thread);
const char *next_comm = evsel__strval(evsel, sample, "next_comm");
@@ -2080,7 +2071,7 @@ static void timehist_print_sample(struct perf_sched *sched,
print_sched_time(tr->dt_run, 6);
if (sched->show_state)
- printf(" %5c ", task_state_char(thread, state));
+ printf(" %5c ", thread->tid == 0 ? 'I' : state);
if (sched->show_next) {
snprintf(nstr, sizeof(nstr), "next: %s[%d]", next_comm, next_pid);
@@ -2152,9 +2143,9 @@ static void timehist_update_runtime_stats(struct thread_runtime *r,
else if (r->last_time) {
u64 dt_wait = tprev - r->last_time;
- if (r->last_state == TASK_RUNNING)
+ if (r->last_state == 'R')
r->dt_preempt = dt_wait;
- else if (r->last_state == TASK_UNINTERRUPTIBLE)
+ else if (r->last_state == 'D')
r->dt_iowait = dt_wait;
else
r->dt_sleep = dt_wait;
@@ -2579,7 +2570,7 @@ static int timehist_sched_change_event(struct perf_tool *tool,
struct thread_runtime *tr = NULL;
u64 tprev, t = sample->time;
int rc = 0;
- int state = evsel__intval(evsel, sample, "prev_state");
+ const char state = get_task_prev_state(evsel, sample);
addr_location__init(&al);
if (machine__resolve(machine, &al, sample) < 0) {
--
2.41.0
Since the sched_switch tracepoint introduces a new variable to
report sched-out task state in symbolic char, we prefer to use
it to spare from knowing internal implementations in kernel.
Also we keep the old parsing logic intact but sync the state char
array with the latest kernel.
Signed-off-by: Ze Gao <[email protected]>
---
tools/perf/builtin-sched.c | 20 ++++++++++++++++----
1 file changed, 16 insertions(+), 4 deletions(-)
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index eb310d1a7625..7f76ba51e36d 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -837,7 +837,7 @@ replay_wakeup_event(struct perf_sched *sched,
static inline char task_state_char(int state)
{
- static const char state_to_char[] = "RSDTtXZPI";
+ static const char state_to_char[] = "RSDTtXZPIp";
unsigned bit = state ? ffs(state) : 0;
return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
}
@@ -845,8 +845,20 @@ static inline char task_state_char(int state)
static inline char get_task_prev_state(struct evsel *evsel,
struct perf_sample *sample)
{
- const int prev_state = evsel__intval(evsel, sample, "prev_state");
- return task_state_char(prev_state);
+ char prev_state_char;
+ int prev_state;
+
+ //prefer to use prev_state_char
+ if (evsel__field(evsel, "prev_state_char"))
+ prev_state_char = (char) evsel__intval(evsel,
+ sample, "prev_state_char");
+ else {
+ prev_state = (int) evsel__intval(evsel,
+ sample, "prev_state");
+ prev_state_char = task_state_char(prev_state);
+ }
+
+ return prev_state_char;
}
static int replay_switch_event(struct perf_sched *sched,
@@ -2143,7 +2155,7 @@ static void timehist_update_runtime_stats(struct thread_runtime *r,
else if (r->last_time) {
u64 dt_wait = tprev - r->last_time;
- if (r->last_state == 'R')
+ if (r->last_state == 'R' || r->last_state == 'p')
r->dt_preempt = dt_wait;
else if (r->last_state == 'D')
r->dt_iowait = dt_wait;
--
2.41.0