Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752813AbdDLFjG (ORCPT ); Wed, 12 Apr 2017 01:39:06 -0400 Received: from mail-pf0-f174.google.com ([209.85.192.174]:34791 "EHLO mail-pf0-f174.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751181AbdDLFjD (ORCPT ); Wed, 12 Apr 2017 01:39:03 -0400 From: Joel Fernandes To: linux-kernel@vger.kernel.org Cc: Joel Fernandes , Steven Rostedt Subject: [RFC] tracing: Add support for critical section event tracing Date: Tue, 11 Apr 2017 22:38:51 -0700 Message-Id: <20170412053851.26286-1-joelaf@google.com> X-Mailer: git-send-email 2.12.2.715.g7642488e1d-goog Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 7238 Lines: 255 Critical section trace events can be used for tracing the start and end of a critical section which can be used by a trace viewer like systrace to graphically view the start and end of a critical section and correlate them with latencies and scheduling issues. Reason for starting critical section: Either preemption or interrupts were turned off. Reason for ending critical section: Both preemption and interrupts are now turned back on. Cc: Steven Rostedt Signed-off-by: Joel Fernandes -- Here's an example of how Android's systrace will be using it to show critical sections as a gantt chart: http://imgur.com/download/TZplEVp --- include/trace/events/critical.h | 55 +++++++++++++++++++++++++++++++ kernel/trace/Kconfig | 6 ++++ kernel/trace/trace_irqsoff.c | 73 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 134 insertions(+) create mode 100644 include/trace/events/critical.h diff --git a/include/trace/events/critical.h b/include/trace/events/critical.h new file mode 100644 index 000000000000..bfd58dd4f48f --- /dev/null +++ b/include/trace/events/critical.h @@ -0,0 +1,55 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM critical + +#if !defined(_TRACE_CRITICAL_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_CRITICAL_H + +#include +#include +#include + +TRACE_EVENT(critical_start, + + TP_PROTO(unsigned long ip, unsigned long parent_ip), + + TP_ARGS(ip, parent_ip), + + TP_STRUCT__entry( + __array(char, func, 16) + __array(char, parent, 16) + ), + + TP_fast_assign( + snprintf(__entry->func, 16, "%pf", (void *)ip); + snprintf(__entry->parent, 16, "%pf", (void *)parent_ip); + ), + + TP_printk("caller=%s parent=%s\n", + __entry->func, + __entry->parent) +); + +TRACE_EVENT(critical_end, + + TP_PROTO(unsigned long ip, unsigned long parent_ip), + + TP_ARGS(ip, parent_ip), + + TP_STRUCT__entry( + __array(char, func, 16) + __array(char, parent, 16) + ), + + TP_fast_assign( + snprintf(__entry->func, 16, "%pf", (void *)ip); + snprintf(__entry->parent, 16, "%pf", (void *)parent_ip); + ), + + TP_printk("caller=%s parent=%s\n", + __entry->func, + __entry->parent) +); +#endif /* _TRACE_CRITICAL_H */ + +/* This part ust be outside protection */ +#include diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index d5038005eb5d..986a07985360 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -208,6 +208,12 @@ config PREEMPT_TRACER enabled. This option and the irqs-off timing option can be used together or separately.) +config TRACE_CRITICAL_SECTION_EVENTS + bool "Trace critical sections as trace events" + default n + select IRQSOFF_TRACER + select PREEMPT_TRACER + config SCHED_TRACER bool "Scheduling Latency Tracer" select GENERIC_TRACER diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 7758bc0617cb..a14ce6185885 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c @@ -13,6 +13,8 @@ #include #include #include +#define CREATE_TRACE_POINTS +#include #include "trace.h" @@ -20,6 +22,7 @@ static struct trace_array *irqsoff_trace __read_mostly; static int tracer_enabled __read_mostly; static DEFINE_PER_CPU(int, tracing_cpu); +static DEFINE_PER_CPU(int, tracing_events_cpu); static DEFINE_RAW_SPINLOCK(max_trace_lock); @@ -362,6 +365,44 @@ check_critical_timing(struct trace_array *tr, __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); } +#ifdef CONFIG_TRACE_CRITICAL_SECTION_EVENTS +/* + * Called when either preempt or irq are turned off + */ +static inline void +start_critical_event(unsigned long ip, unsigned long parent_ip) +{ + int cpu; + + cpu = raw_smp_processor_id(); + + if (per_cpu(tracing_events_cpu, cpu)) + return; + + trace_critical_start(ip, parent_ip); + + per_cpu(tracing_events_cpu, cpu) = 1; +} + +/* + * Called when both preempt and irq are turned back on + */ +static inline void +stop_critical_event(unsigned long ip, unsigned long parent_ip) +{ + int cpu; + + cpu = raw_smp_processor_id(); + + if (unlikely(per_cpu(tracing_events_cpu, cpu))) + per_cpu(tracing_events_cpu, cpu) = 0; + else + return; + + trace_critical_end(ip, parent_ip); +} +#endif + static inline void start_critical_timing(unsigned long ip, unsigned long parent_ip) { @@ -434,6 +475,10 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip) /* start and stop critical timings used to for stoppage (in idle) */ void start_critical_timings(void) { +#ifdef CONFIG_TRACE_CRITICAL_SECTION_EVENTS + if (irqs_disabled() || preempt_count()) + start_critical_event(CALLER_ADDR0, CALLER_ADDR1); +#endif if (preempt_trace() || irq_trace()) start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); } @@ -441,6 +486,10 @@ EXPORT_SYMBOL_GPL(start_critical_timings); void stop_critical_timings(void) { +#ifdef CONFIG_TRACE_CRITICAL_SECTION_EVENTS + if (irqs_disabled() || preempt_count()) + stop_critical_event(CALLER_ADDR0, CALLER_ADDR1); +#endif if (preempt_trace() || irq_trace()) stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); } @@ -483,6 +532,10 @@ inline void print_irqtrace_events(struct task_struct *curr) */ void trace_hardirqs_on(void) { +#ifdef CONFIG_TRACE_CRITICAL_SECTION_EVENTS + if (!preempt_count()) + stop_critical_event(CALLER_ADDR0, CALLER_ADDR1); +#endif if (!preempt_trace() && irq_trace()) stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); } @@ -490,6 +543,10 @@ EXPORT_SYMBOL(trace_hardirqs_on); void trace_hardirqs_off(void) { +#ifdef CONFIG_TRACE_CRITICAL_SECTION_EVENTS + if (!preempt_count()) + start_critical_event(CALLER_ADDR0, CALLER_ADDR1); +#endif if (!preempt_trace() && irq_trace()) start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); } @@ -497,6 +554,10 @@ EXPORT_SYMBOL(trace_hardirqs_off); __visible void trace_hardirqs_on_caller(unsigned long caller_addr) { +#ifdef CONFIG_TRACE_CRITICAL_SECTION_EVENTS + if (!preempt_count()) + stop_critical_event(CALLER_ADDR0, caller_addr); +#endif if (!preempt_trace() && irq_trace()) stop_critical_timing(CALLER_ADDR0, caller_addr); } @@ -504,6 +565,10 @@ EXPORT_SYMBOL(trace_hardirqs_on_caller); __visible void trace_hardirqs_off_caller(unsigned long caller_addr) { +#ifdef CONFIG_TRACE_CRITICAL_SECTION_EVENTS + if (!preempt_count()) + start_critical_event(CALLER_ADDR0, caller_addr); +#endif if (!preempt_trace() && irq_trace()) start_critical_timing(CALLER_ADDR0, caller_addr); } @@ -515,12 +580,20 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller); #ifdef CONFIG_PREEMPT_TRACER void trace_preempt_on(unsigned long a0, unsigned long a1) { +#ifdef CONFIG_TRACE_CRITICAL_SECTION_EVENTS + if (!irqs_disabled()) + stop_critical_event(a0, a1); +#endif if (preempt_trace() && !irq_trace()) stop_critical_timing(a0, a1); } void trace_preempt_off(unsigned long a0, unsigned long a1) { +#ifdef CONFIG_TRACE_CRITICAL_SECTION_EVENTS + if (!irqs_disabled()) + start_critical_event(a0, a1); +#endif if (preempt_trace() && !irq_trace()) start_critical_timing(a0, a1); } -- 2.12.2.715.g7642488e1d-goog