2017-06-07 08:13:11

by Chunyan Zhang

[permalink] [raw]
Subject: [RESEND PATCH v2] trace: fix the errors caused by incompatible type of RCU variables

The variables which are processed by RCU functions should be annotated
as RCU, otherwise sparse will report the errors like below:

"error: incompatible types in comparison expression (different
address spaces)"

Signed-off-by: Chunyan Zhang <[email protected]>
---

Changes in v2:
* Addressed Steven's comments
- Use rcu_dereference_protected() instead of rcu_dereference_raw_notrace()
* Rebased on v4.12-rc1

include/linux/ftrace.h | 6 +++---
include/linux/trace_events.h | 2 +-
kernel/trace/ftrace.c | 42 ++++++++++++++++++++++++++++--------------
kernel/trace/trace.h | 6 +++---
4 files changed, 35 insertions(+), 21 deletions(-)

diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 473f088..056395c7 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -142,8 +142,8 @@ enum {
#ifdef CONFIG_DYNAMIC_FTRACE
/* The hash used to know what functions callbacks trace */
struct ftrace_ops_hash {
- struct ftrace_hash *notrace_hash;
- struct ftrace_hash *filter_hash;
+ struct ftrace_hash __rcu *notrace_hash;
+ struct ftrace_hash __rcu *filter_hash;
struct mutex regex_lock;
};

@@ -165,7 +165,7 @@ static inline void ftrace_free_init_mem(void) { }
*/
struct ftrace_ops {
ftrace_func_t func;
- struct ftrace_ops *next;
+ struct ftrace_ops __rcu *next;
unsigned long flags;
void *private;
ftrace_func_t saved_func;
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index a556805..89f7bd5d 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -327,7 +327,7 @@ enum {
struct trace_event_file {
struct list_head list;
struct trace_event_call *event_call;
- struct event_filter *filter;
+ struct event_filter __rcu *filter;
struct dentry *dir;
struct trace_array *tr;
struct trace_subsystem_dir *system;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 39dca4e..116742b 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -113,7 +113,8 @@ static int ftrace_disabled __read_mostly;

static DEFINE_MUTEX(ftrace_lock);

-static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
+static struct ftrace_ops __rcu *
+ ftrace_ops_list __read_mostly = &ftrace_list_end;
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
static struct ftrace_ops global_ops;

@@ -169,8 +170,11 @@ int ftrace_nr_registered_ops(void)

mutex_lock(&ftrace_lock);

- for (ops = ftrace_ops_list;
- ops != &ftrace_list_end; ops = ops->next)
+ for (ops = rcu_dereference_protected(ftrace_ops_list,
+ lockdep_is_held(&ftrace_lock));
+ ops != &ftrace_list_end;
+ ops = rcu_dereference_protected(ops->next,
+ lockdep_is_held(&ftrace_lock)))
cnt++;

mutex_unlock(&ftrace_lock);
@@ -275,10 +279,11 @@ static void update_ftrace_function(void)
* If there's only one ftrace_ops registered, the ftrace_ops_list
* will point to the ops we want.
*/
- set_function_trace_op = ftrace_ops_list;
+ set_function_trace_op = rcu_dereference_protected(ftrace_ops_list,
+ lockdep_is_held(&ftrace_lock));

/* If there's no ftrace_ops registered, just call the stub function */
- if (ftrace_ops_list == &ftrace_list_end) {
+ if (set_function_trace_op == &ftrace_list_end) {
func = ftrace_stub;

/*
@@ -286,7 +291,8 @@ static void update_ftrace_function(void)
* recursion safe and not dynamic and the arch supports passing ops,
* then have the mcount trampoline call the function directly.
*/
- } else if (ftrace_ops_list->next == &ftrace_list_end) {
+ } else if (rcu_dereference_protected(ftrace_ops_list->next,
+ lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
func = ftrace_ops_get_list_func(ftrace_ops_list);

} else {
@@ -348,9 +354,11 @@ int using_ftrace_ops_list_func(void)
return ftrace_trace_function == ftrace_ops_list_func;
}

-static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
+static void add_ftrace_ops(struct ftrace_ops __rcu **list,
+ struct ftrace_ops *ops)
{
- ops->next = *list;
+ rcu_assign_pointer(ops->next, *list);
+
/*
* We are entering ops into the list but another
* CPU might be walking that list. We need to make sure
@@ -360,7 +368,8 @@ static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
rcu_assign_pointer(*list, ops);
}

-static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
+static int remove_ftrace_ops(struct ftrace_ops __rcu **list,
+ struct ftrace_ops *ops)
{
struct ftrace_ops **p;

@@ -368,7 +377,10 @@ static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
* If we are removing the last function, then simply point
* to the ftrace_stub.
*/
- if (*list == ops && ops->next == &ftrace_list_end) {
+ if (rcu_dereference_protected(*list,
+ lockdep_is_held(&ftrace_lock)) == ops &&
+ rcu_dereference_protected(ops->next,
+ lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
*list = &ftrace_list_end;
return 0;
}
@@ -1513,8 +1525,8 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
return 0;
#endif

- hash.filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash);
- hash.notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash);
+ rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash);
+ rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash);

if (hash_contains_ip(ip, &hash))
ret = 1;
@@ -2784,7 +2796,8 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
* If there's no more ops registered with ftrace, run a
* sanity check to make sure all rec flags are cleared.
*/
- if (ftrace_ops_list == &ftrace_list_end) {
+ if (rcu_dereference_protected(ftrace_ops_list,
+ lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
struct ftrace_page *pg;
struct dyn_ftrace *rec;

@@ -6122,7 +6135,8 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
if (ftrace_enabled) {

/* we are starting ftrace again */
- if (ftrace_ops_list != &ftrace_list_end)
+ if (rcu_dereference_protected(ftrace_ops_list,
+ lockdep_is_held(&ftrace_lock)) != &ftrace_list_end)
update_ftrace_function();

ftrace_startup_sysctl();
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 291a1bc..60b26b4 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -1183,9 +1183,9 @@ struct ftrace_event_field {
struct event_filter {
int n_preds; /* Number assigned */
int a_preds; /* allocated */
- struct filter_pred *preds;
- struct filter_pred *root;
- char *filter_string;
+ struct filter_pred __rcu *preds;
+ struct filter_pred __rcu *root;
+ char *filter_string;
};

struct event_subsystem {
--
2.7.4


2017-06-07 18:37:05

by Steven Rostedt

[permalink] [raw]
Subject: Re: [RESEND PATCH v2] trace: fix the errors caused by incompatible type of RCU variables

On Wed, 7 Jun 2017 16:12:51 +0800
Chunyan Zhang <[email protected]> wrote:

> The variables which are processed by RCU functions should be annotated
> as RCU, otherwise sparse will report the errors like below:
>
> "error: incompatible types in comparison expression (different
> address spaces)"
>
> Signed-off-by: Chunyan Zhang <[email protected]>

Hi,

> ---
>
> Changes in v2:
> * Addressed Steven's comments
> - Use rcu_dereference_protected() instead of rcu_dereference_raw_notrace()
> * Rebased on v4.12-rc1
>
> include/linux/ftrace.h | 6 +++---
> include/linux/trace_events.h | 2 +-
> kernel/trace/ftrace.c | 42 ++++++++++++++++++++++++++++--------------
> kernel/trace/trace.h | 6 +++---
> 4 files changed, 35 insertions(+), 21 deletions(-)
>
> diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
> index 473f088..056395c7 100644
> --- a/include/linux/ftrace.h
> +++ b/include/linux/ftrace.h
> @@ -142,8 +142,8 @@ enum {
> #ifdef CONFIG_DYNAMIC_FTRACE
> /* The hash used to know what functions callbacks trace */
> struct ftrace_ops_hash {
> - struct ftrace_hash *notrace_hash;
> - struct ftrace_hash *filter_hash;
> + struct ftrace_hash __rcu *notrace_hash;
> + struct ftrace_hash __rcu *filter_hash;
> struct mutex regex_lock;
> };
>
> @@ -165,7 +165,7 @@ static inline void ftrace_free_init_mem(void) { }
> */
> struct ftrace_ops {
> ftrace_func_t func;
> - struct ftrace_ops *next;
> + struct ftrace_ops __rcu *next;
> unsigned long flags;
> void *private;
> ftrace_func_t saved_func;
> diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
> index a556805..89f7bd5d 100644
> --- a/include/linux/trace_events.h
> +++ b/include/linux/trace_events.h
> @@ -327,7 +327,7 @@ enum {
> struct trace_event_file {
> struct list_head list;
> struct trace_event_call *event_call;
> - struct event_filter *filter;
> + struct event_filter __rcu *filter;
> struct dentry *dir;
> struct trace_array *tr;
> struct trace_subsystem_dir *system;
> diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
> index 39dca4e..116742b 100644
> --- a/kernel/trace/ftrace.c
> +++ b/kernel/trace/ftrace.c
> @@ -113,7 +113,8 @@ static int ftrace_disabled __read_mostly;
>
> static DEFINE_MUTEX(ftrace_lock);
>
> -static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
> +static struct ftrace_ops __rcu *
> + ftrace_ops_list __read_mostly = &ftrace_list_end;

I may fix this myself, but newlines like the above is IMHO uglier than
going over 80 characters. I much rather keep it on one line.

> ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
> static struct ftrace_ops global_ops;
>
> @@ -169,8 +170,11 @@ int ftrace_nr_registered_ops(void)
>
> mutex_lock(&ftrace_lock);
>
> - for (ops = ftrace_ops_list;
> - ops != &ftrace_list_end; ops = ops->next)
> + for (ops = rcu_dereference_protected(ftrace_ops_list,
> + lockdep_is_held(&ftrace_lock));
> + ops != &ftrace_list_end;
> + ops = rcu_dereference_protected(ops->next,
> + lockdep_is_held(&ftrace_lock)))
> cnt++;
>
> mutex_unlock(&ftrace_lock);
> @@ -275,10 +279,11 @@ static void update_ftrace_function(void)
> * If there's only one ftrace_ops registered, the ftrace_ops_list
> * will point to the ops we want.
> */
> - set_function_trace_op = ftrace_ops_list;
> + set_function_trace_op = rcu_dereference_protected(ftrace_ops_list,
> + lockdep_is_held(&ftrace_lock));
>
> /* If there's no ftrace_ops registered, just call the stub function */
> - if (ftrace_ops_list == &ftrace_list_end) {
> + if (set_function_trace_op == &ftrace_list_end) {

Slight change of code, but shouldn't have any affect on the flow.

> func = ftrace_stub;
>
> /*
> @@ -286,7 +291,8 @@ static void update_ftrace_function(void)
> * recursion safe and not dynamic and the arch supports passing ops,
> * then have the mcount trampoline call the function directly.
> */
> - } else if (ftrace_ops_list->next == &ftrace_list_end) {
> + } else if (rcu_dereference_protected(ftrace_ops_list->next,
> + lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
> func = ftrace_ops_get_list_func(ftrace_ops_list);
>
> } else {
> @@ -348,9 +354,11 @@ int using_ftrace_ops_list_func(void)
> return ftrace_trace_function == ftrace_ops_list_func;
> }
>
> -static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
> +static void add_ftrace_ops(struct ftrace_ops __rcu **list,
> + struct ftrace_ops *ops)
> {
> - ops->next = *list;
> + rcu_assign_pointer(ops->next, *list);
> +
> /*
> * We are entering ops into the list but another
> * CPU might be walking that list. We need to make sure
> @@ -360,7 +368,8 @@ static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
> rcu_assign_pointer(*list, ops);
> }
>
> -static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
> +static int remove_ftrace_ops(struct ftrace_ops __rcu **list,
> + struct ftrace_ops *ops)
> {
> struct ftrace_ops **p;
>
> @@ -368,7 +377,10 @@ static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
> * If we are removing the last function, then simply point
> * to the ftrace_stub.
> */
> - if (*list == ops && ops->next == &ftrace_list_end) {
> + if (rcu_dereference_protected(*list,
> + lockdep_is_held(&ftrace_lock)) == ops &&
> + rcu_dereference_protected(ops->next,
> + lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
> *list = &ftrace_list_end;
> return 0;
> }
> @@ -1513,8 +1525,8 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
> return 0;
> #endif
>
> - hash.filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash);
> - hash.notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash);
> + rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash);
> + rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash);
>
> if (hash_contains_ip(ip, &hash))
> ret = 1;
> @@ -2784,7 +2796,8 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
> * If there's no more ops registered with ftrace, run a
> * sanity check to make sure all rec flags are cleared.
> */
> - if (ftrace_ops_list == &ftrace_list_end) {
> + if (rcu_dereference_protected(ftrace_ops_list,
> + lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
> struct ftrace_page *pg;
> struct dyn_ftrace *rec;
>
> @@ -6122,7 +6135,8 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
> if (ftrace_enabled) {
>
> /* we are starting ftrace again */
> - if (ftrace_ops_list != &ftrace_list_end)
> + if (rcu_dereference_protected(ftrace_ops_list,
> + lockdep_is_held(&ftrace_lock)) != &ftrace_list_end)
> update_ftrace_function();
>
> ftrace_startup_sysctl();
> diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
> index 291a1bc..60b26b4 100644
> --- a/kernel/trace/trace.h
> +++ b/kernel/trace/trace.h
> @@ -1183,9 +1183,9 @@ struct ftrace_event_field {
> struct event_filter {
> int n_preds; /* Number assigned */
> int a_preds; /* allocated */
> - struct filter_pred *preds;
> - struct filter_pred *root;
> - char *filter_string;
> + struct filter_pred __rcu *preds;
> + struct filter_pred __rcu *root;
> + char *filter_string;
> };
>
> struct event_subsystem {

So far looks fine. I'll run it through my tests and see if it triggers
any lockdep issues.

-- Steve

2017-06-08 02:16:47

by Chunyan Zhang

[permalink] [raw]
Subject: Re: [RESEND PATCH v2] trace: fix the errors caused by incompatible type of RCU variables

On 8 June 2017 at 02:36, Steven Rostedt <[email protected]> wrote:
> On Wed, 7 Jun 2017 16:12:51 +0800
> Chunyan Zhang <[email protected]> wrote:
>
>> The variables which are processed by RCU functions should be annotated
>> as RCU, otherwise sparse will report the errors like below:
>>
>> "error: incompatible types in comparison expression (different
>> address spaces)"
>>
>> Signed-off-by: Chunyan Zhang <[email protected]>
>
> Hi,

Hi Steve,

>
>> ---
>>
>> Changes in v2:
>> * Addressed Steven's comments
>> - Use rcu_dereference_protected() instead of rcu_dereference_raw_notrace()
>> * Rebased on v4.12-rc1
>>
>> include/linux/ftrace.h | 6 +++---
>> include/linux/trace_events.h | 2 +-
>> kernel/trace/ftrace.c | 42 ++++++++++++++++++++++++++++--------------
>> kernel/trace/trace.h | 6 +++---
>> 4 files changed, 35 insertions(+), 21 deletions(-)
>>
>> diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
>> index 473f088..056395c7 100644
>> --- a/include/linux/ftrace.h
>> +++ b/include/linux/ftrace.h
>> @@ -142,8 +142,8 @@ enum {
>> #ifdef CONFIG_DYNAMIC_FTRACE
>> /* The hash used to know what functions callbacks trace */
>> struct ftrace_ops_hash {
>> - struct ftrace_hash *notrace_hash;
>> - struct ftrace_hash *filter_hash;
>> + struct ftrace_hash __rcu *notrace_hash;
>> + struct ftrace_hash __rcu *filter_hash;
>> struct mutex regex_lock;
>> };
>>
>> @@ -165,7 +165,7 @@ static inline void ftrace_free_init_mem(void) { }
>> */
>> struct ftrace_ops {
>> ftrace_func_t func;
>> - struct ftrace_ops *next;
>> + struct ftrace_ops __rcu *next;
>> unsigned long flags;
>> void *private;
>> ftrace_func_t saved_func;
>> diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
>> index a556805..89f7bd5d 100644
>> --- a/include/linux/trace_events.h
>> +++ b/include/linux/trace_events.h
>> @@ -327,7 +327,7 @@ enum {
>> struct trace_event_file {
>> struct list_head list;
>> struct trace_event_call *event_call;
>> - struct event_filter *filter;
>> + struct event_filter __rcu *filter;
>> struct dentry *dir;
>> struct trace_array *tr;
>> struct trace_subsystem_dir *system;
>> diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
>> index 39dca4e..116742b 100644
>> --- a/kernel/trace/ftrace.c
>> +++ b/kernel/trace/ftrace.c
>> @@ -113,7 +113,8 @@ static int ftrace_disabled __read_mostly;
>>
>> static DEFINE_MUTEX(ftrace_lock);
>>
>> -static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
>> +static struct ftrace_ops __rcu *
>> + ftrace_ops_list __read_mostly = &ftrace_list_end;
>
> I may fix this myself, but newlines like the above is IMHO uglier than

Good, thanks.

> going over 80 characters. I much rather keep it on one line.
>
>> ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
>> static struct ftrace_ops global_ops;
>>
>> @@ -169,8 +170,11 @@ int ftrace_nr_registered_ops(void)
>>
>> mutex_lock(&ftrace_lock);
>>
>> - for (ops = ftrace_ops_list;
>> - ops != &ftrace_list_end; ops = ops->next)
>> + for (ops = rcu_dereference_protected(ftrace_ops_list,
>> + lockdep_is_held(&ftrace_lock));
>> + ops != &ftrace_list_end;
>> + ops = rcu_dereference_protected(ops->next,
>> + lockdep_is_held(&ftrace_lock)))
>> cnt++;
>>
>> mutex_unlock(&ftrace_lock);
>> @@ -275,10 +279,11 @@ static void update_ftrace_function(void)
>> * If there's only one ftrace_ops registered, the ftrace_ops_list
>> * will point to the ops we want.
>> */
>> - set_function_trace_op = ftrace_ops_list;
>> + set_function_trace_op = rcu_dereference_protected(ftrace_ops_list,
>> + lockdep_is_held(&ftrace_lock));
>>
>> /* If there's no ftrace_ops registered, just call the stub function */
>> - if (ftrace_ops_list == &ftrace_list_end) {
>> + if (set_function_trace_op == &ftrace_list_end) {
>
> Slight change of code, but shouldn't have any affect on the flow.
>
>> func = ftrace_stub;
>>
>> /*
>> @@ -286,7 +291,8 @@ static void update_ftrace_function(void)
>> * recursion safe and not dynamic and the arch supports passing ops,
>> * then have the mcount trampoline call the function directly.
>> */
>> - } else if (ftrace_ops_list->next == &ftrace_list_end) {
>> + } else if (rcu_dereference_protected(ftrace_ops_list->next,
>> + lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
>> func = ftrace_ops_get_list_func(ftrace_ops_list);
>>
>> } else {
>> @@ -348,9 +354,11 @@ int using_ftrace_ops_list_func(void)
>> return ftrace_trace_function == ftrace_ops_list_func;
>> }
>>
>> -static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
>> +static void add_ftrace_ops(struct ftrace_ops __rcu **list,
>> + struct ftrace_ops *ops)
>> {
>> - ops->next = *list;
>> + rcu_assign_pointer(ops->next, *list);
>> +
>> /*
>> * We are entering ops into the list but another
>> * CPU might be walking that list. We need to make sure
>> @@ -360,7 +368,8 @@ static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
>> rcu_assign_pointer(*list, ops);
>> }
>>
>> -static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
>> +static int remove_ftrace_ops(struct ftrace_ops __rcu **list,
>> + struct ftrace_ops *ops)
>> {
>> struct ftrace_ops **p;
>>
>> @@ -368,7 +377,10 @@ static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
>> * If we are removing the last function, then simply point
>> * to the ftrace_stub.
>> */
>> - if (*list == ops && ops->next == &ftrace_list_end) {
>> + if (rcu_dereference_protected(*list,
>> + lockdep_is_held(&ftrace_lock)) == ops &&
>> + rcu_dereference_protected(ops->next,
>> + lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
>> *list = &ftrace_list_end;
>> return 0;
>> }
>> @@ -1513,8 +1525,8 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
>> return 0;
>> #endif
>>
>> - hash.filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash);
>> - hash.notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash);
>> + rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash);
>> + rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash);
>>
>> if (hash_contains_ip(ip, &hash))
>> ret = 1;
>> @@ -2784,7 +2796,8 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
>> * If there's no more ops registered with ftrace, run a
>> * sanity check to make sure all rec flags are cleared.
>> */
>> - if (ftrace_ops_list == &ftrace_list_end) {
>> + if (rcu_dereference_protected(ftrace_ops_list,
>> + lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
>> struct ftrace_page *pg;
>> struct dyn_ftrace *rec;
>>
>> @@ -6122,7 +6135,8 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
>> if (ftrace_enabled) {
>>
>> /* we are starting ftrace again */
>> - if (ftrace_ops_list != &ftrace_list_end)
>> + if (rcu_dereference_protected(ftrace_ops_list,
>> + lockdep_is_held(&ftrace_lock)) != &ftrace_list_end)
>> update_ftrace_function();
>>
>> ftrace_startup_sysctl();
>> diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
>> index 291a1bc..60b26b4 100644
>> --- a/kernel/trace/trace.h
>> +++ b/kernel/trace/trace.h
>> @@ -1183,9 +1183,9 @@ struct ftrace_event_field {
>> struct event_filter {
>> int n_preds; /* Number assigned */
>> int a_preds; /* allocated */
>> - struct filter_pred *preds;
>> - struct filter_pred *root;
>> - char *filter_string;
>> + struct filter_pred __rcu *preds;
>> + struct filter_pred __rcu *root;
>> + char *filter_string;
>> };
>>
>> struct event_subsystem {
>
> So far looks fine. I'll run it through my tests and see if it triggers
> any lockdep issues.

Thank you for the review and test,
Chunyan

>
> -- Steve