In this patchset, I introduce some new macros TRACE_EVENT_NOP,
DEFINE_EVENT_NOP and DECLARE_EVENT_CLASS_NOP, which will
define a tracepoint as do-nothing inline function.
#define DECLARE_EVENT_NOP(name, proto) \
static inline void trace_##name(proto) \
{ } \
static inline bool trace_##name##_enabled(void) \
{ \
return false; \
}
Let's take some examples for why these macros are needed.
- sched
The tracepoints trace_sched_stat_{iowait, blocked, wait, sleep} should
be not exposed to user if CONFIG_SCHEDSTATS is not set.
- rcu
When CONFIG_RCU_TRACE is not set, some rcu tracepoints are defined as
do-nothing macro without validating arguments, that is not proper.
We should validate the arguments.
Yafang Shao (3):
tracing: introduce TRACE_EVENT_NOP()
sched/fair: do not expose some tracepoints to user if
CONFIG_SCHEDSTATS is not set
rcu: validate arguments for rcu tracepoints
include/linux/tracepoint.h | 15 ++++++++
include/trace/define_trace.h | 8 +++++
include/trace/events/rcu.h | 81 ++++++++++++++------------------------------
include/trace/events/sched.h | 21 ++++++++----
kernel/rcu/rcu.h | 9 ++---
kernel/rcu/tree.c | 8 ++---
6 files changed, 68 insertions(+), 74 deletions(-)
--
1.8.3.1
Sometimes we want to define a tracepoint as a do-nothing function.
So I introduce TRACE_EVENT_NOP, DECLARE_EVENT_CLASS_NOP and
DEFINE_EVENT_NOP for this kind of usage.
Signed-off-by: Yafang Shao <[email protected]>
---
include/linux/tracepoint.h | 15 +++++++++++++++
include/trace/define_trace.h | 8 ++++++++
2 files changed, 23 insertions(+)
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 9c31865..86b019a 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -548,4 +548,19 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
#define TRACE_EVENT_PERF_PERM(event, expr...)
+#define DECLARE_EVENT_NOP(name, proto, args) \
+ static inline void trace_##name(proto) \
+ { } \
+ static inline bool trace_##name##_enabled(void) \
+ { \
+ return false; \
+ }
+
+#define TRACE_EVENT_NOP(name, proto, args, struct, assign, print) \
+ DECLARE_EVENT_NOP(name, PARAMS(proto), PARAMS(args))
+
+#define DECLARE_EVENT_CLASS_NOP(name, proto, args, tstruct, assign, print)
+#define DEFINE_EVENT_NOP(template, name, proto, args) \
+ DECLARE_EVENT_NOP(name, PARAMS(proto), PARAMS(args))
+
#endif /* ifdef TRACE_EVENT (see note above) */
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h
index cb30c55..bd75f97 100644
--- a/include/trace/define_trace.h
+++ b/include/trace/define_trace.h
@@ -46,6 +46,12 @@
assign, print, reg, unreg) \
DEFINE_TRACE_FN(name, reg, unreg)
+#undef TRACE_EVENT_NOP
+#define TRACE_EVENT_NOP(name, proto, args, struct, assign, print)
+
+#undef DEFINE_EVENT_NOP
+#define DEFINE_EVENT_NOP(template, name, proto, args)
+
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args) \
DEFINE_TRACE(name)
@@ -102,6 +108,8 @@
#undef TRACE_EVENT_FN
#undef TRACE_EVENT_FN_COND
#undef TRACE_EVENT_CONDITION
+#undef TRACE_EVENT_NOP
+#undef DEFINE_EVENT_NOP
#undef DECLARE_EVENT_CLASS
#undef DEFINE_EVENT
#undef DEFINE_EVENT_FN
--
1.8.3.1
The tracepoints trace_sched_stat_{iowait, blocked, wait, sleep} should
be not exposed to user if CONFIG_SCHEDSTATS is not set.
Signed-off-by: Yafang Shao <[email protected]>
---
include/trace/events/sched.h | 21 ++++++++++++++-------
1 file changed, 14 insertions(+), 7 deletions(-)
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 9a4bdfa..c8c7c7e 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -241,7 +241,6 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct *
DEFINE_EVENT(sched_process_template, sched_process_free,
TP_PROTO(struct task_struct *p),
TP_ARGS(p));
-
/*
* Tracepoint for a task exiting:
@@ -336,11 +335,20 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct *
__entry->pid, __entry->old_pid)
);
+
+#ifdef CONFIG_SCHEDSTATS
+#define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT
+#define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS
+#else
+#define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT_NOP
+#define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS_NOP
+#endif
+
/*
* XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
* adding sched_stat support to SCHED_FIFO/RR would be welcome.
*/
-DECLARE_EVENT_CLASS(sched_stat_template,
+DECLARE_EVENT_CLASS_SCHEDSTAT(sched_stat_template,
TP_PROTO(struct task_struct *tsk, u64 delay),
@@ -363,12 +371,11 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct *
(unsigned long long)__entry->delay)
);
-
/*
* Tracepoint for accounting wait time (time the task is runnable
* but not actually running due to scheduler contention).
*/
-DEFINE_EVENT(sched_stat_template, sched_stat_wait,
+DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_wait,
TP_PROTO(struct task_struct *tsk, u64 delay),
TP_ARGS(tsk, delay));
@@ -376,7 +383,7 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct *
* Tracepoint for accounting sleep time (time the task is not runnable,
* including iowait, see below).
*/
-DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
+DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_sleep,
TP_PROTO(struct task_struct *tsk, u64 delay),
TP_ARGS(tsk, delay));
@@ -384,14 +391,14 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct *
* Tracepoint for accounting iowait time (time the task is not runnable
* due to waiting on IO to complete).
*/
-DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
+DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_iowait,
TP_PROTO(struct task_struct *tsk, u64 delay),
TP_ARGS(tsk, delay));
/*
* Tracepoint for accounting blocked time (time the task is in uninterruptible).
*/
-DEFINE_EVENT(sched_stat_template, sched_stat_blocked,
+DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_blocked,
TP_PROTO(struct task_struct *tsk, u64 delay),
TP_ARGS(tsk, delay));
--
1.8.3.1
When CONFIG_RCU_TRACE is not set, all these tracepoints are defined as
do-nothing macro.
We'd better make those inline functions that take proper arguments.
As RCU_TRACE() is defined as do-nothing marco as well when
CONFIG_RCU_TRACE is not set, so we can clean it up.
Signed-off-by: Yafang Shao <[email protected]>
---
include/trace/events/rcu.h | 81 ++++++++++++++--------------------------------
kernel/rcu/rcu.h | 9 ++----
kernel/rcu/tree.c | 8 ++---
3 files changed, 31 insertions(+), 67 deletions(-)
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index f0c4d10..e3f357b 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -7,6 +7,12 @@
#include <linux/tracepoint.h>
+#ifdef CONFIG_RCU_TRACE
+#define TRACE_EVENT_RCU TRACE_EVENT
+#else
+#define TRACE_EVENT_RCU TRACE_EVENT_NOP
+#endif
+
/*
* Tracepoint for start/end markers used for utilization calculations.
* By convention, the string is of the following forms:
@@ -35,8 +41,6 @@
TP_printk("%s", __entry->s)
);
-#ifdef CONFIG_RCU_TRACE
-
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
/*
@@ -62,7 +66,7 @@
* "end": End a grace period.
* "cpuend": CPU first notices a grace-period end.
*/
-TRACE_EVENT(rcu_grace_period,
+TRACE_EVENT_RCU(rcu_grace_period,
TP_PROTO(const char *rcuname, unsigned long gp_seq, const char *gpevent),
@@ -101,7 +105,7 @@
* "Cleanup": Clean up rcu_node structure after previous GP.
* "CleanupMore": Clean up, and another GP is needed.
*/
-TRACE_EVENT(rcu_future_grace_period,
+TRACE_EVENT_RCU(rcu_future_grace_period,
TP_PROTO(const char *rcuname, unsigned long gp_seq,
unsigned long gp_seq_req, u8 level, int grplo, int grphi,
@@ -141,7 +145,7 @@
* rcu_node structure, and the mask of CPUs that will be waited for.
* All but the type of RCU are extracted from the rcu_node structure.
*/
-TRACE_EVENT(rcu_grace_period_init,
+TRACE_EVENT_RCU(rcu_grace_period_init,
TP_PROTO(const char *rcuname, unsigned long gp_seq, u8 level,
int grplo, int grphi, unsigned long qsmask),
@@ -186,7 +190,7 @@
* "endwake": Woke piggybackers up.
* "done": Someone else did the expedited grace period for us.
*/
-TRACE_EVENT(rcu_exp_grace_period,
+TRACE_EVENT_RCU(rcu_exp_grace_period,
TP_PROTO(const char *rcuname, unsigned long gpseq, const char *gpevent),
@@ -218,7 +222,7 @@
* "nxtlvl": Advance to next level of rcu_node funnel
* "wait": Wait for someone else to do expedited GP
*/
-TRACE_EVENT(rcu_exp_funnel_lock,
+TRACE_EVENT_RCU(rcu_exp_funnel_lock,
TP_PROTO(const char *rcuname, u8 level, int grplo, int grphi,
const char *gpevent),
@@ -269,7 +273,7 @@
* "WaitQueue": Enqueue partially done, timed wait for it to complete.
* "WokeQueue": Partial enqueue now complete.
*/
-TRACE_EVENT(rcu_nocb_wake,
+TRACE_EVENT_RCU(rcu_nocb_wake,
TP_PROTO(const char *rcuname, int cpu, const char *reason),
@@ -297,7 +301,7 @@
* include SRCU), the grace-period number that the task is blocking
* (the current or the next), and the task's PID.
*/
-TRACE_EVENT(rcu_preempt_task,
+TRACE_EVENT_RCU(rcu_preempt_task,
TP_PROTO(const char *rcuname, int pid, unsigned long gp_seq),
@@ -324,7 +328,7 @@
* read-side critical section exiting that critical section. Track the
* type of RCU (which one day might include SRCU) and the task's PID.
*/
-TRACE_EVENT(rcu_unlock_preempted_task,
+TRACE_EVENT_RCU(rcu_unlock_preempted_task,
TP_PROTO(const char *rcuname, unsigned long gp_seq, int pid),
@@ -353,7 +357,7 @@
* whether there are any blocked tasks blocking the current grace period.
* All but the type of RCU are extracted from the rcu_node structure.
*/
-TRACE_EVENT(rcu_quiescent_state_report,
+TRACE_EVENT_RCU(rcu_quiescent_state_report,
TP_PROTO(const char *rcuname, unsigned long gp_seq,
unsigned long mask, unsigned long qsmask,
@@ -396,7 +400,7 @@
* state, which can be "dti" for dyntick-idle mode or "kick" when kicking
* a CPU that has been in dyntick-idle mode for too long.
*/
-TRACE_EVENT(rcu_fqs,
+TRACE_EVENT_RCU(rcu_fqs,
TP_PROTO(const char *rcuname, unsigned long gp_seq, int cpu, const char *qsevent),
@@ -436,7 +440,7 @@
* events use two separate counters, and that the "++=" and "--=" events
* for irq/NMI will change the counter by two, otherwise by one.
*/
-TRACE_EVENT(rcu_dyntick,
+TRACE_EVENT_RCU(rcu_dyntick,
TP_PROTO(const char *polarity, long oldnesting, long newnesting, atomic_t dynticks),
@@ -468,7 +472,7 @@
* number of lazy callbacks queued, and the fourth element is the
* total number of callbacks queued.
*/
-TRACE_EVENT(rcu_callback,
+TRACE_EVENT_RCU(rcu_callback,
TP_PROTO(const char *rcuname, struct rcu_head *rhp, long qlen_lazy,
long qlen),
@@ -504,7 +508,7 @@
* the fourth argument is the number of lazy callbacks queued, and the
* fifth argument is the total number of callbacks queued.
*/
-TRACE_EVENT(rcu_kfree_callback,
+TRACE_EVENT_RCU(rcu_kfree_callback,
TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset,
long qlen_lazy, long qlen),
@@ -539,7 +543,7 @@
* the total number of callbacks queued, and the fourth argument is
* the current RCU-callback batch limit.
*/
-TRACE_EVENT(rcu_batch_start,
+TRACE_EVENT_RCU(rcu_batch_start,
TP_PROTO(const char *rcuname, long qlen_lazy, long qlen, long blimit),
@@ -569,7 +573,7 @@
* The first argument is the type of RCU, and the second argument is
* a pointer to the RCU callback itself.
*/
-TRACE_EVENT(rcu_invoke_callback,
+TRACE_EVENT_RCU(rcu_invoke_callback,
TP_PROTO(const char *rcuname, struct rcu_head *rhp),
@@ -598,7 +602,7 @@
* is the offset of the callback within the enclosing RCU-protected
* data structure.
*/
-TRACE_EVENT(rcu_invoke_kfree_callback,
+TRACE_EVENT_RCU(rcu_invoke_kfree_callback,
TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset),
@@ -631,7 +635,7 @@
* and the sixth argument (risk) is the return value from
* rcu_is_callbacks_kthread().
*/
-TRACE_EVENT(rcu_batch_end,
+TRACE_EVENT_RCU(rcu_batch_end,
TP_PROTO(const char *rcuname, int callbacks_invoked,
char cb, char nr, char iit, char risk),
@@ -673,7 +677,7 @@
* callback address can be NULL.
*/
#define RCUTORTURENAME_LEN 8
-TRACE_EVENT(rcu_torture_read,
+TRACE_EVENT_RCU(rcu_torture_read,
TP_PROTO(const char *rcutorturename, struct rcu_head *rhp,
unsigned long secs, unsigned long c_old, unsigned long c),
@@ -721,7 +725,7 @@
* The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument
* is the count of remaining callbacks, and "done" is the piggybacking count.
*/
-TRACE_EVENT(rcu_barrier,
+TRACE_EVENT_RCU(rcu_barrier,
TP_PROTO(const char *rcuname, const char *s, int cpu, int cnt, unsigned long done),
@@ -748,41 +752,6 @@
__entry->done)
);
-#else /* #ifdef CONFIG_RCU_TRACE */
-
-#define trace_rcu_grace_period(rcuname, gp_seq, gpevent) do { } while (0)
-#define trace_rcu_future_grace_period(rcuname, gp_seq, gp_seq_req, \
- level, grplo, grphi, event) \
- do { } while (0)
-#define trace_rcu_grace_period_init(rcuname, gp_seq, level, grplo, grphi, \
- qsmask) do { } while (0)
-#define trace_rcu_exp_grace_period(rcuname, gqseq, gpevent) \
- do { } while (0)
-#define trace_rcu_exp_funnel_lock(rcuname, level, grplo, grphi, gpevent) \
- do { } while (0)
-#define trace_rcu_nocb_wake(rcuname, cpu, reason) do { } while (0)
-#define trace_rcu_preempt_task(rcuname, pid, gp_seq) do { } while (0)
-#define trace_rcu_unlock_preempted_task(rcuname, gp_seq, pid) do { } while (0)
-#define trace_rcu_quiescent_state_report(rcuname, gp_seq, mask, qsmask, level, \
- grplo, grphi, gp_tasks) do { } \
- while (0)
-#define trace_rcu_fqs(rcuname, gp_seq, cpu, qsevent) do { } while (0)
-#define trace_rcu_dyntick(polarity, oldnesting, newnesting, dyntick) do { } while (0)
-#define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
-#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
- do { } while (0)
-#define trace_rcu_batch_start(rcuname, qlen_lazy, qlen, blimit) \
- do { } while (0)
-#define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0)
-#define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0)
-#define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \
- do { } while (0)
-#define trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
- do { } while (0)
-#define trace_rcu_barrier(name, s, cpu, cnt, done) do { } while (0)
-
-#endif /* #else #ifdef CONFIG_RCU_TRACE */
-
#endif /* _TRACE_RCU_H */
/* This part must be outside protection */
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index a393e24..2778e44 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -24,11 +24,6 @@
#define __LINUX_RCU_H
#include <trace/events/rcu.h>
-#ifdef CONFIG_RCU_TRACE
-#define RCU_TRACE(stmt) stmt
-#else /* #ifdef CONFIG_RCU_TRACE */
-#define RCU_TRACE(stmt)
-#endif /* #else #ifdef CONFIG_RCU_TRACE */
/* Offset to allow for unmatched rcu_irq_{enter,exit}(). */
#define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1)
@@ -229,12 +224,12 @@ static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
rcu_lock_acquire(&rcu_callback_map);
if (__is_kfree_rcu_offset(offset)) {
- RCU_TRACE(trace_rcu_invoke_kfree_callback(rn, head, offset);)
+ trace_rcu_invoke_kfree_callback(rn, head, offset);
kfree((void *)head - offset);
rcu_lock_release(&rcu_callback_map);
return true;
} else {
- RCU_TRACE(trace_rcu_invoke_callback(rn, head);)
+ trace_rcu_invoke_callback(rn, head);
f = head->func;
WRITE_ONCE(head->func, (rcu_callback_t)0L);
f(head);
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 9180158..d2ad39f 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2329,14 +2329,14 @@ static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
*/
int rcutree_dying_cpu(unsigned int cpu)
{
- RCU_TRACE(bool blkd;)
- RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(&rcu_data);)
- RCU_TRACE(struct rcu_node *rnp = rdp->mynode;)
+ bool blkd;
+ struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
+ struct rcu_node *rnp = rdp->mynode;
if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
return 0;
- RCU_TRACE(blkd = !!(rnp->qsmask & rdp->grpmask);)
+ blkd = !!(rnp->qsmask & rdp->grpmask);
trace_rcu_grace_period(rcu_state.name, rnp->gp_seq,
blkd ? TPS("cpuofl") : TPS("cpuofl-bgp"));
return 0;
--
1.8.3.1
Peter, Ingo,
Are you OK with this patch? If you ack it, I'll pull it in through my
tree.
-- Steve
On Tue, 26 Mar 2019 20:13:10 +0800
Yafang Shao <[email protected]> wrote:
> The tracepoints trace_sched_stat_{iowait, blocked, wait, sleep} should
> be not exposed to user if CONFIG_SCHEDSTATS is not set.
>
> Signed-off-by: Yafang Shao <[email protected]>
> ---
> include/trace/events/sched.h | 21 ++++++++++++++-------
> 1 file changed, 14 insertions(+), 7 deletions(-)
>
> diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
> index 9a4bdfa..c8c7c7e 100644
> --- a/include/trace/events/sched.h
> +++ b/include/trace/events/sched.h
> @@ -241,7 +241,6 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct *
> DEFINE_EVENT(sched_process_template, sched_process_free,
> TP_PROTO(struct task_struct *p),
> TP_ARGS(p));
> -
>
> /*
> * Tracepoint for a task exiting:
> @@ -336,11 +335,20 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct *
> __entry->pid, __entry->old_pid)
> );
>
> +
> +#ifdef CONFIG_SCHEDSTATS
> +#define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT
> +#define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS
> +#else
> +#define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT_NOP
> +#define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS_NOP
> +#endif
> +
> /*
> * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
> * adding sched_stat support to SCHED_FIFO/RR would be welcome.
> */
> -DECLARE_EVENT_CLASS(sched_stat_template,
> +DECLARE_EVENT_CLASS_SCHEDSTAT(sched_stat_template,
>
> TP_PROTO(struct task_struct *tsk, u64 delay),
>
> @@ -363,12 +371,11 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct *
> (unsigned long long)__entry->delay)
> );
>
> -
> /*
> * Tracepoint for accounting wait time (time the task is runnable
> * but not actually running due to scheduler contention).
> */
> -DEFINE_EVENT(sched_stat_template, sched_stat_wait,
> +DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_wait,
> TP_PROTO(struct task_struct *tsk, u64 delay),
> TP_ARGS(tsk, delay));
>
> @@ -376,7 +383,7 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct *
> * Tracepoint for accounting sleep time (time the task is not runnable,
> * including iowait, see below).
> */
> -DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
> +DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_sleep,
> TP_PROTO(struct task_struct *tsk, u64 delay),
> TP_ARGS(tsk, delay));
>
> @@ -384,14 +391,14 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct *
> * Tracepoint for accounting iowait time (time the task is not runnable
> * due to waiting on IO to complete).
> */
> -DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
> +DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_iowait,
> TP_PROTO(struct task_struct *tsk, u64 delay),
> TP_ARGS(tsk, delay));
>
> /*
> * Tracepoint for accounting blocked time (time the task is in uninterruptible).
> */
> -DEFINE_EVENT(sched_stat_template, sched_stat_blocked,
> +DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_blocked,
> TP_PROTO(struct task_struct *tsk, u64 delay),
> TP_ARGS(tsk, delay));
>
Paul,
Are you OK with this patch? If you ack it, I'll pull it in through my
tree.
-- Steve
On Tue, 26 Mar 2019 20:13:11 +0800
Yafang Shao <[email protected]> wrote:
> When CONFIG_RCU_TRACE is not set, all these tracepoints are defined as
> do-nothing macro.
> We'd better make those inline functions that take proper arguments.
>
> As RCU_TRACE() is defined as do-nothing marco as well when
> CONFIG_RCU_TRACE is not set, so we can clean it up.
>
> Signed-off-by: Yafang Shao <[email protected]>
> ---
> include/trace/events/rcu.h | 81 ++++++++++++++--------------------------------
> kernel/rcu/rcu.h | 9 ++----
> kernel/rcu/tree.c | 8 ++---
> 3 files changed, 31 insertions(+), 67 deletions(-)
>
> diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
> index f0c4d10..e3f357b 100644
> --- a/include/trace/events/rcu.h
> +++ b/include/trace/events/rcu.h
> @@ -7,6 +7,12 @@
>
> #include <linux/tracepoint.h>
>
> +#ifdef CONFIG_RCU_TRACE
> +#define TRACE_EVENT_RCU TRACE_EVENT
> +#else
> +#define TRACE_EVENT_RCU TRACE_EVENT_NOP
> +#endif
> +
> /*
> * Tracepoint for start/end markers used for utilization calculations.
> * By convention, the string is of the following forms:
> @@ -35,8 +41,6 @@
> TP_printk("%s", __entry->s)
> );
>
> -#ifdef CONFIG_RCU_TRACE
> -
> #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
>
> /*
> @@ -62,7 +66,7 @@
> * "end": End a grace period.
> * "cpuend": CPU first notices a grace-period end.
> */
> -TRACE_EVENT(rcu_grace_period,
> +TRACE_EVENT_RCU(rcu_grace_period,
>
> TP_PROTO(const char *rcuname, unsigned long gp_seq, const char *gpevent),
>
> @@ -101,7 +105,7 @@
> * "Cleanup": Clean up rcu_node structure after previous GP.
> * "CleanupMore": Clean up, and another GP is needed.
> */
> -TRACE_EVENT(rcu_future_grace_period,
> +TRACE_EVENT_RCU(rcu_future_grace_period,
>
> TP_PROTO(const char *rcuname, unsigned long gp_seq,
> unsigned long gp_seq_req, u8 level, int grplo, int grphi,
> @@ -141,7 +145,7 @@
> * rcu_node structure, and the mask of CPUs that will be waited for.
> * All but the type of RCU are extracted from the rcu_node structure.
> */
> -TRACE_EVENT(rcu_grace_period_init,
> +TRACE_EVENT_RCU(rcu_grace_period_init,
>
> TP_PROTO(const char *rcuname, unsigned long gp_seq, u8 level,
> int grplo, int grphi, unsigned long qsmask),
> @@ -186,7 +190,7 @@
> * "endwake": Woke piggybackers up.
> * "done": Someone else did the expedited grace period for us.
> */
> -TRACE_EVENT(rcu_exp_grace_period,
> +TRACE_EVENT_RCU(rcu_exp_grace_period,
>
> TP_PROTO(const char *rcuname, unsigned long gpseq, const char *gpevent),
>
> @@ -218,7 +222,7 @@
> * "nxtlvl": Advance to next level of rcu_node funnel
> * "wait": Wait for someone else to do expedited GP
> */
> -TRACE_EVENT(rcu_exp_funnel_lock,
> +TRACE_EVENT_RCU(rcu_exp_funnel_lock,
>
> TP_PROTO(const char *rcuname, u8 level, int grplo, int grphi,
> const char *gpevent),
> @@ -269,7 +273,7 @@
> * "WaitQueue": Enqueue partially done, timed wait for it to complete.
> * "WokeQueue": Partial enqueue now complete.
> */
> -TRACE_EVENT(rcu_nocb_wake,
> +TRACE_EVENT_RCU(rcu_nocb_wake,
>
> TP_PROTO(const char *rcuname, int cpu, const char *reason),
>
> @@ -297,7 +301,7 @@
> * include SRCU), the grace-period number that the task is blocking
> * (the current or the next), and the task's PID.
> */
> -TRACE_EVENT(rcu_preempt_task,
> +TRACE_EVENT_RCU(rcu_preempt_task,
>
> TP_PROTO(const char *rcuname, int pid, unsigned long gp_seq),
>
> @@ -324,7 +328,7 @@
> * read-side critical section exiting that critical section. Track the
> * type of RCU (which one day might include SRCU) and the task's PID.
> */
> -TRACE_EVENT(rcu_unlock_preempted_task,
> +TRACE_EVENT_RCU(rcu_unlock_preempted_task,
>
> TP_PROTO(const char *rcuname, unsigned long gp_seq, int pid),
>
> @@ -353,7 +357,7 @@
> * whether there are any blocked tasks blocking the current grace period.
> * All but the type of RCU are extracted from the rcu_node structure.
> */
> -TRACE_EVENT(rcu_quiescent_state_report,
> +TRACE_EVENT_RCU(rcu_quiescent_state_report,
>
> TP_PROTO(const char *rcuname, unsigned long gp_seq,
> unsigned long mask, unsigned long qsmask,
> @@ -396,7 +400,7 @@
> * state, which can be "dti" for dyntick-idle mode or "kick" when kicking
> * a CPU that has been in dyntick-idle mode for too long.
> */
> -TRACE_EVENT(rcu_fqs,
> +TRACE_EVENT_RCU(rcu_fqs,
>
> TP_PROTO(const char *rcuname, unsigned long gp_seq, int cpu, const char *qsevent),
>
> @@ -436,7 +440,7 @@
> * events use two separate counters, and that the "++=" and "--=" events
> * for irq/NMI will change the counter by two, otherwise by one.
> */
> -TRACE_EVENT(rcu_dyntick,
> +TRACE_EVENT_RCU(rcu_dyntick,
>
> TP_PROTO(const char *polarity, long oldnesting, long newnesting, atomic_t dynticks),
>
> @@ -468,7 +472,7 @@
> * number of lazy callbacks queued, and the fourth element is the
> * total number of callbacks queued.
> */
> -TRACE_EVENT(rcu_callback,
> +TRACE_EVENT_RCU(rcu_callback,
>
> TP_PROTO(const char *rcuname, struct rcu_head *rhp, long qlen_lazy,
> long qlen),
> @@ -504,7 +508,7 @@
> * the fourth argument is the number of lazy callbacks queued, and the
> * fifth argument is the total number of callbacks queued.
> */
> -TRACE_EVENT(rcu_kfree_callback,
> +TRACE_EVENT_RCU(rcu_kfree_callback,
>
> TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset,
> long qlen_lazy, long qlen),
> @@ -539,7 +543,7 @@
> * the total number of callbacks queued, and the fourth argument is
> * the current RCU-callback batch limit.
> */
> -TRACE_EVENT(rcu_batch_start,
> +TRACE_EVENT_RCU(rcu_batch_start,
>
> TP_PROTO(const char *rcuname, long qlen_lazy, long qlen, long blimit),
>
> @@ -569,7 +573,7 @@
> * The first argument is the type of RCU, and the second argument is
> * a pointer to the RCU callback itself.
> */
> -TRACE_EVENT(rcu_invoke_callback,
> +TRACE_EVENT_RCU(rcu_invoke_callback,
>
> TP_PROTO(const char *rcuname, struct rcu_head *rhp),
>
> @@ -598,7 +602,7 @@
> * is the offset of the callback within the enclosing RCU-protected
> * data structure.
> */
> -TRACE_EVENT(rcu_invoke_kfree_callback,
> +TRACE_EVENT_RCU(rcu_invoke_kfree_callback,
>
> TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset),
>
> @@ -631,7 +635,7 @@
> * and the sixth argument (risk) is the return value from
> * rcu_is_callbacks_kthread().
> */
> -TRACE_EVENT(rcu_batch_end,
> +TRACE_EVENT_RCU(rcu_batch_end,
>
> TP_PROTO(const char *rcuname, int callbacks_invoked,
> char cb, char nr, char iit, char risk),
> @@ -673,7 +677,7 @@
> * callback address can be NULL.
> */
> #define RCUTORTURENAME_LEN 8
> -TRACE_EVENT(rcu_torture_read,
> +TRACE_EVENT_RCU(rcu_torture_read,
>
> TP_PROTO(const char *rcutorturename, struct rcu_head *rhp,
> unsigned long secs, unsigned long c_old, unsigned long c),
> @@ -721,7 +725,7 @@
> * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument
> * is the count of remaining callbacks, and "done" is the piggybacking count.
> */
> -TRACE_EVENT(rcu_barrier,
> +TRACE_EVENT_RCU(rcu_barrier,
>
> TP_PROTO(const char *rcuname, const char *s, int cpu, int cnt, unsigned long done),
>
> @@ -748,41 +752,6 @@
> __entry->done)
> );
>
> -#else /* #ifdef CONFIG_RCU_TRACE */
> -
> -#define trace_rcu_grace_period(rcuname, gp_seq, gpevent) do { } while (0)
> -#define trace_rcu_future_grace_period(rcuname, gp_seq, gp_seq_req, \
> - level, grplo, grphi, event) \
> - do { } while (0)
> -#define trace_rcu_grace_period_init(rcuname, gp_seq, level, grplo, grphi, \
> - qsmask) do { } while (0)
> -#define trace_rcu_exp_grace_period(rcuname, gqseq, gpevent) \
> - do { } while (0)
> -#define trace_rcu_exp_funnel_lock(rcuname, level, grplo, grphi, gpevent) \
> - do { } while (0)
> -#define trace_rcu_nocb_wake(rcuname, cpu, reason) do { } while (0)
> -#define trace_rcu_preempt_task(rcuname, pid, gp_seq) do { } while (0)
> -#define trace_rcu_unlock_preempted_task(rcuname, gp_seq, pid) do { } while (0)
> -#define trace_rcu_quiescent_state_report(rcuname, gp_seq, mask, qsmask, level, \
> - grplo, grphi, gp_tasks) do { } \
> - while (0)
> -#define trace_rcu_fqs(rcuname, gp_seq, cpu, qsevent) do { } while (0)
> -#define trace_rcu_dyntick(polarity, oldnesting, newnesting, dyntick) do { } while (0)
> -#define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
> -#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
> - do { } while (0)
> -#define trace_rcu_batch_start(rcuname, qlen_lazy, qlen, blimit) \
> - do { } while (0)
> -#define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0)
> -#define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0)
> -#define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \
> - do { } while (0)
> -#define trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
> - do { } while (0)
> -#define trace_rcu_barrier(name, s, cpu, cnt, done) do { } while (0)
> -
> -#endif /* #else #ifdef CONFIG_RCU_TRACE */
> -
> #endif /* _TRACE_RCU_H */
>
> /* This part must be outside protection */
> diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
> index a393e24..2778e44 100644
> --- a/kernel/rcu/rcu.h
> +++ b/kernel/rcu/rcu.h
> @@ -24,11 +24,6 @@
> #define __LINUX_RCU_H
>
> #include <trace/events/rcu.h>
> -#ifdef CONFIG_RCU_TRACE
> -#define RCU_TRACE(stmt) stmt
> -#else /* #ifdef CONFIG_RCU_TRACE */
> -#define RCU_TRACE(stmt)
> -#endif /* #else #ifdef CONFIG_RCU_TRACE */
>
> /* Offset to allow for unmatched rcu_irq_{enter,exit}(). */
> #define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1)
> @@ -229,12 +224,12 @@ static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
>
> rcu_lock_acquire(&rcu_callback_map);
> if (__is_kfree_rcu_offset(offset)) {
> - RCU_TRACE(trace_rcu_invoke_kfree_callback(rn, head, offset);)
> + trace_rcu_invoke_kfree_callback(rn, head, offset);
> kfree((void *)head - offset);
> rcu_lock_release(&rcu_callback_map);
> return true;
> } else {
> - RCU_TRACE(trace_rcu_invoke_callback(rn, head);)
> + trace_rcu_invoke_callback(rn, head);
> f = head->func;
> WRITE_ONCE(head->func, (rcu_callback_t)0L);
> f(head);
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index 9180158..d2ad39f 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -2329,14 +2329,14 @@ static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
> */
> int rcutree_dying_cpu(unsigned int cpu)
> {
> - RCU_TRACE(bool blkd;)
> - RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(&rcu_data);)
> - RCU_TRACE(struct rcu_node *rnp = rdp->mynode;)
> + bool blkd;
> + struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
> + struct rcu_node *rnp = rdp->mynode;
>
> if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
> return 0;
>
> - RCU_TRACE(blkd = !!(rnp->qsmask & rdp->grpmask);)
> + blkd = !!(rnp->qsmask & rdp->grpmask);
> trace_rcu_grace_period(rcu_state.name, rnp->gp_seq,
> blkd ? TPS("cpuofl") : TPS("cpuofl-bgp"));
> return 0;
On Tue, Mar 26, 2019 at 08:13:11PM +0800, Yafang Shao wrote:
> When CONFIG_RCU_TRACE is not set, all these tracepoints are defined as
> do-nothing macro.
> We'd better make those inline functions that take proper arguments.
>
> As RCU_TRACE() is defined as do-nothing marco as well when
> CONFIG_RCU_TRACE is not set, so we can clean it up.
How about this for the commit log?
Unless the CONFIG_RCU_TRACE kconfig option is set, almost all
of RCU's tracepoints are defined as empty macros. It would
be better if these tracepoints could instead be empty inline
functions with proper arguments and type checking. It would
also be good to get rid of the RCU_TRACE() macro, which
compiles its argument in CONFIG_RCU_TRACE=y kernels and
omits them otherwise.
This commit therefore creates a TRACE_EVENT_RCU macro that
is defined as TRACE_EVENT in CONFIG_RCU_TRACE=y kernels and
as the new TRACE_EVENT_NOP otherwise, which allows the
empty macros and the RCU_TRACE() macro to be eliminated.
With that:
Reviewed-by: Paul E. McKenney <[email protected]>
> Signed-off-by: Yafang Shao <[email protected]>
> ---
> include/trace/events/rcu.h | 81 ++++++++++++++--------------------------------
> kernel/rcu/rcu.h | 9 ++----
> kernel/rcu/tree.c | 8 ++---
> 3 files changed, 31 insertions(+), 67 deletions(-)
>
> diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
> index f0c4d10..e3f357b 100644
> --- a/include/trace/events/rcu.h
> +++ b/include/trace/events/rcu.h
> @@ -7,6 +7,12 @@
>
> #include <linux/tracepoint.h>
>
> +#ifdef CONFIG_RCU_TRACE
> +#define TRACE_EVENT_RCU TRACE_EVENT
> +#else
> +#define TRACE_EVENT_RCU TRACE_EVENT_NOP
> +#endif
> +
> /*
> * Tracepoint for start/end markers used for utilization calculations.
> * By convention, the string is of the following forms:
> @@ -35,8 +41,6 @@
> TP_printk("%s", __entry->s)
> );
>
> -#ifdef CONFIG_RCU_TRACE
> -
> #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
>
> /*
> @@ -62,7 +66,7 @@
> * "end": End a grace period.
> * "cpuend": CPU first notices a grace-period end.
> */
> -TRACE_EVENT(rcu_grace_period,
> +TRACE_EVENT_RCU(rcu_grace_period,
>
> TP_PROTO(const char *rcuname, unsigned long gp_seq, const char *gpevent),
>
> @@ -101,7 +105,7 @@
> * "Cleanup": Clean up rcu_node structure after previous GP.
> * "CleanupMore": Clean up, and another GP is needed.
> */
> -TRACE_EVENT(rcu_future_grace_period,
> +TRACE_EVENT_RCU(rcu_future_grace_period,
>
> TP_PROTO(const char *rcuname, unsigned long gp_seq,
> unsigned long gp_seq_req, u8 level, int grplo, int grphi,
> @@ -141,7 +145,7 @@
> * rcu_node structure, and the mask of CPUs that will be waited for.
> * All but the type of RCU are extracted from the rcu_node structure.
> */
> -TRACE_EVENT(rcu_grace_period_init,
> +TRACE_EVENT_RCU(rcu_grace_period_init,
>
> TP_PROTO(const char *rcuname, unsigned long gp_seq, u8 level,
> int grplo, int grphi, unsigned long qsmask),
> @@ -186,7 +190,7 @@
> * "endwake": Woke piggybackers up.
> * "done": Someone else did the expedited grace period for us.
> */
> -TRACE_EVENT(rcu_exp_grace_period,
> +TRACE_EVENT_RCU(rcu_exp_grace_period,
>
> TP_PROTO(const char *rcuname, unsigned long gpseq, const char *gpevent),
>
> @@ -218,7 +222,7 @@
> * "nxtlvl": Advance to next level of rcu_node funnel
> * "wait": Wait for someone else to do expedited GP
> */
> -TRACE_EVENT(rcu_exp_funnel_lock,
> +TRACE_EVENT_RCU(rcu_exp_funnel_lock,
>
> TP_PROTO(const char *rcuname, u8 level, int grplo, int grphi,
> const char *gpevent),
> @@ -269,7 +273,7 @@
> * "WaitQueue": Enqueue partially done, timed wait for it to complete.
> * "WokeQueue": Partial enqueue now complete.
> */
> -TRACE_EVENT(rcu_nocb_wake,
> +TRACE_EVENT_RCU(rcu_nocb_wake,
>
> TP_PROTO(const char *rcuname, int cpu, const char *reason),
>
> @@ -297,7 +301,7 @@
> * include SRCU), the grace-period number that the task is blocking
> * (the current or the next), and the task's PID.
> */
> -TRACE_EVENT(rcu_preempt_task,
> +TRACE_EVENT_RCU(rcu_preempt_task,
>
> TP_PROTO(const char *rcuname, int pid, unsigned long gp_seq),
>
> @@ -324,7 +328,7 @@
> * read-side critical section exiting that critical section. Track the
> * type of RCU (which one day might include SRCU) and the task's PID.
> */
> -TRACE_EVENT(rcu_unlock_preempted_task,
> +TRACE_EVENT_RCU(rcu_unlock_preempted_task,
>
> TP_PROTO(const char *rcuname, unsigned long gp_seq, int pid),
>
> @@ -353,7 +357,7 @@
> * whether there are any blocked tasks blocking the current grace period.
> * All but the type of RCU are extracted from the rcu_node structure.
> */
> -TRACE_EVENT(rcu_quiescent_state_report,
> +TRACE_EVENT_RCU(rcu_quiescent_state_report,
>
> TP_PROTO(const char *rcuname, unsigned long gp_seq,
> unsigned long mask, unsigned long qsmask,
> @@ -396,7 +400,7 @@
> * state, which can be "dti" for dyntick-idle mode or "kick" when kicking
> * a CPU that has been in dyntick-idle mode for too long.
> */
> -TRACE_EVENT(rcu_fqs,
> +TRACE_EVENT_RCU(rcu_fqs,
>
> TP_PROTO(const char *rcuname, unsigned long gp_seq, int cpu, const char *qsevent),
>
> @@ -436,7 +440,7 @@
> * events use two separate counters, and that the "++=" and "--=" events
> * for irq/NMI will change the counter by two, otherwise by one.
> */
> -TRACE_EVENT(rcu_dyntick,
> +TRACE_EVENT_RCU(rcu_dyntick,
>
> TP_PROTO(const char *polarity, long oldnesting, long newnesting, atomic_t dynticks),
>
> @@ -468,7 +472,7 @@
> * number of lazy callbacks queued, and the fourth element is the
> * total number of callbacks queued.
> */
> -TRACE_EVENT(rcu_callback,
> +TRACE_EVENT_RCU(rcu_callback,
>
> TP_PROTO(const char *rcuname, struct rcu_head *rhp, long qlen_lazy,
> long qlen),
> @@ -504,7 +508,7 @@
> * the fourth argument is the number of lazy callbacks queued, and the
> * fifth argument is the total number of callbacks queued.
> */
> -TRACE_EVENT(rcu_kfree_callback,
> +TRACE_EVENT_RCU(rcu_kfree_callback,
>
> TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset,
> long qlen_lazy, long qlen),
> @@ -539,7 +543,7 @@
> * the total number of callbacks queued, and the fourth argument is
> * the current RCU-callback batch limit.
> */
> -TRACE_EVENT(rcu_batch_start,
> +TRACE_EVENT_RCU(rcu_batch_start,
>
> TP_PROTO(const char *rcuname, long qlen_lazy, long qlen, long blimit),
>
> @@ -569,7 +573,7 @@
> * The first argument is the type of RCU, and the second argument is
> * a pointer to the RCU callback itself.
> */
> -TRACE_EVENT(rcu_invoke_callback,
> +TRACE_EVENT_RCU(rcu_invoke_callback,
>
> TP_PROTO(const char *rcuname, struct rcu_head *rhp),
>
> @@ -598,7 +602,7 @@
> * is the offset of the callback within the enclosing RCU-protected
> * data structure.
> */
> -TRACE_EVENT(rcu_invoke_kfree_callback,
> +TRACE_EVENT_RCU(rcu_invoke_kfree_callback,
>
> TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset),
>
> @@ -631,7 +635,7 @@
> * and the sixth argument (risk) is the return value from
> * rcu_is_callbacks_kthread().
> */
> -TRACE_EVENT(rcu_batch_end,
> +TRACE_EVENT_RCU(rcu_batch_end,
>
> TP_PROTO(const char *rcuname, int callbacks_invoked,
> char cb, char nr, char iit, char risk),
> @@ -673,7 +677,7 @@
> * callback address can be NULL.
> */
> #define RCUTORTURENAME_LEN 8
> -TRACE_EVENT(rcu_torture_read,
> +TRACE_EVENT_RCU(rcu_torture_read,
>
> TP_PROTO(const char *rcutorturename, struct rcu_head *rhp,
> unsigned long secs, unsigned long c_old, unsigned long c),
> @@ -721,7 +725,7 @@
> * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument
> * is the count of remaining callbacks, and "done" is the piggybacking count.
> */
> -TRACE_EVENT(rcu_barrier,
> +TRACE_EVENT_RCU(rcu_barrier,
>
> TP_PROTO(const char *rcuname, const char *s, int cpu, int cnt, unsigned long done),
>
> @@ -748,41 +752,6 @@
> __entry->done)
> );
>
> -#else /* #ifdef CONFIG_RCU_TRACE */
> -
> -#define trace_rcu_grace_period(rcuname, gp_seq, gpevent) do { } while (0)
> -#define trace_rcu_future_grace_period(rcuname, gp_seq, gp_seq_req, \
> - level, grplo, grphi, event) \
> - do { } while (0)
> -#define trace_rcu_grace_period_init(rcuname, gp_seq, level, grplo, grphi, \
> - qsmask) do { } while (0)
> -#define trace_rcu_exp_grace_period(rcuname, gqseq, gpevent) \
> - do { } while (0)
> -#define trace_rcu_exp_funnel_lock(rcuname, level, grplo, grphi, gpevent) \
> - do { } while (0)
> -#define trace_rcu_nocb_wake(rcuname, cpu, reason) do { } while (0)
> -#define trace_rcu_preempt_task(rcuname, pid, gp_seq) do { } while (0)
> -#define trace_rcu_unlock_preempted_task(rcuname, gp_seq, pid) do { } while (0)
> -#define trace_rcu_quiescent_state_report(rcuname, gp_seq, mask, qsmask, level, \
> - grplo, grphi, gp_tasks) do { } \
> - while (0)
> -#define trace_rcu_fqs(rcuname, gp_seq, cpu, qsevent) do { } while (0)
> -#define trace_rcu_dyntick(polarity, oldnesting, newnesting, dyntick) do { } while (0)
> -#define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
> -#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
> - do { } while (0)
> -#define trace_rcu_batch_start(rcuname, qlen_lazy, qlen, blimit) \
> - do { } while (0)
> -#define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0)
> -#define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0)
> -#define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \
> - do { } while (0)
> -#define trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
> - do { } while (0)
> -#define trace_rcu_barrier(name, s, cpu, cnt, done) do { } while (0)
> -
> -#endif /* #else #ifdef CONFIG_RCU_TRACE */
> -
> #endif /* _TRACE_RCU_H */
>
> /* This part must be outside protection */
> diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
> index a393e24..2778e44 100644
> --- a/kernel/rcu/rcu.h
> +++ b/kernel/rcu/rcu.h
> @@ -24,11 +24,6 @@
> #define __LINUX_RCU_H
>
> #include <trace/events/rcu.h>
> -#ifdef CONFIG_RCU_TRACE
> -#define RCU_TRACE(stmt) stmt
> -#else /* #ifdef CONFIG_RCU_TRACE */
> -#define RCU_TRACE(stmt)
> -#endif /* #else #ifdef CONFIG_RCU_TRACE */
>
> /* Offset to allow for unmatched rcu_irq_{enter,exit}(). */
> #define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1)
> @@ -229,12 +224,12 @@ static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
>
> rcu_lock_acquire(&rcu_callback_map);
> if (__is_kfree_rcu_offset(offset)) {
> - RCU_TRACE(trace_rcu_invoke_kfree_callback(rn, head, offset);)
> + trace_rcu_invoke_kfree_callback(rn, head, offset);
> kfree((void *)head - offset);
> rcu_lock_release(&rcu_callback_map);
> return true;
> } else {
> - RCU_TRACE(trace_rcu_invoke_callback(rn, head);)
> + trace_rcu_invoke_callback(rn, head);
> f = head->func;
> WRITE_ONCE(head->func, (rcu_callback_t)0L);
> f(head);
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index 9180158..d2ad39f 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -2329,14 +2329,14 @@ static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
> */
> int rcutree_dying_cpu(unsigned int cpu)
> {
> - RCU_TRACE(bool blkd;)
> - RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(&rcu_data);)
> - RCU_TRACE(struct rcu_node *rnp = rdp->mynode;)
> + bool blkd;
> + struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
> + struct rcu_node *rnp = rdp->mynode;
>
> if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
> return 0;
>
> - RCU_TRACE(blkd = !!(rnp->qsmask & rdp->grpmask);)
> + blkd = !!(rnp->qsmask & rdp->grpmask);
> trace_rcu_grace_period(rcu_state.name, rnp->gp_seq,
> blkd ? TPS("cpuofl") : TPS("cpuofl-bgp"));
> return 0;
> --
> 1.8.3.1
>
On Tue, 26 Mar 2019 08:18:15 -0700
"Paul E. McKenney" <[email protected]> wrote:
> On Tue, Mar 26, 2019 at 08:13:11PM +0800, Yafang Shao wrote:
> > When CONFIG_RCU_TRACE is not set, all these tracepoints are defined as
> > do-nothing macro.
> > We'd better make those inline functions that take proper arguments.
> >
> > As RCU_TRACE() is defined as do-nothing marco as well when
> > CONFIG_RCU_TRACE is not set, so we can clean it up.
>
> How about this for the commit log?
>
> Unless the CONFIG_RCU_TRACE kconfig option is set, almost all
> of RCU's tracepoints are defined as empty macros. It would
> be better if these tracepoints could instead be empty inline
> functions with proper arguments and type checking. It would
> also be good to get rid of the RCU_TRACE() macro, which
> compiles its argument in CONFIG_RCU_TRACE=y kernels and
> omits them otherwise.
>
> This commit therefore creates a TRACE_EVENT_RCU macro that
> is defined as TRACE_EVENT in CONFIG_RCU_TRACE=y kernels and
> as the new TRACE_EVENT_NOP otherwise, which allows the
> empty macros and the RCU_TRACE() macro to be eliminated.
>
> With that:
>
> Reviewed-by: Paul E. McKenney <[email protected]>
Yafang,
If you are OK with the above changes, I'll take this patch with the
updated change log.
-- Steve
On Tue, Mar 26, 2019 at 11:30 PM Steven Rostedt <[email protected]> wrote:
>
> On Tue, 26 Mar 2019 08:18:15 -0700
> "Paul E. McKenney" <[email protected]> wrote:
>
> > On Tue, Mar 26, 2019 at 08:13:11PM +0800, Yafang Shao wrote:
> > > When CONFIG_RCU_TRACE is not set, all these tracepoints are defined as
> > > do-nothing macro.
> > > We'd better make those inline functions that take proper arguments.
> > >
> > > As RCU_TRACE() is defined as do-nothing marco as well when
> > > CONFIG_RCU_TRACE is not set, so we can clean it up.
> >
> > How about this for the commit log?
> >
> > Unless the CONFIG_RCU_TRACE kconfig option is set, almost all
> > of RCU's tracepoints are defined as empty macros. It would
> > be better if these tracepoints could instead be empty inline
> > functions with proper arguments and type checking. It would
> > also be good to get rid of the RCU_TRACE() macro, which
> > compiles its argument in CONFIG_RCU_TRACE=y kernels and
> > omits them otherwise.
> >
> > This commit therefore creates a TRACE_EVENT_RCU macro that
> > is defined as TRACE_EVENT in CONFIG_RCU_TRACE=y kernels and
> > as the new TRACE_EVENT_NOP otherwise, which allows the
> > empty macros and the RCU_TRACE() macro to be eliminated.
> >
> > With that:
> >
> > Reviewed-by: Paul E. McKenney <[email protected]>
>
> Yafang,
>
> If you are OK with the above changes, I'll take this patch with the
> updated change log.
>
Hi Steve,
This new changelog is OK by me.
Pls. take it.
Thanks
Yafang
On Tue, Mar 26, 2019 at 08:21:57AM -0400, Steven Rostedt wrote:
> Peter, Ingo,
>
> Are you OK with this patch? If you ack it, I'll pull it in through my
Sure,
Acked-by: Peter Zijlstra (Intel) <[email protected]>