modules is traversed using list_for_each_entry_rcu outside an
RCU read-side critical section but under the protection
of module_mutex or with preemption disabled.
Hence, add corresponding lockdep expression to silence false-positive
lockdep warnings, and harden RCU lists.
list_for_each_entry_rcu when traversed inside a preempt disabled
section, doesn't need an explicit lockdep expression since it is
implicitly checked for.
Add macro for the corresponding lockdep expression.
Signed-off-by: Amol Grover <[email protected]>
---
kernel/module.c | 12 +++++++-----
1 file changed, 7 insertions(+), 5 deletions(-)
diff --git a/kernel/module.c b/kernel/module.c
index b56f3224b161..2425f58159dd 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -84,6 +84,8 @@
* 3) module_addr_min/module_addr_max.
* (delete and add uses RCU list operations). */
DEFINE_MUTEX(module_mutex);
+#define module_mutex_held() \
+ lockdep_is_held(&module_mutex)
EXPORT_SYMBOL_GPL(module_mutex);
static LIST_HEAD(modules);
@@ -214,7 +216,7 @@ static struct module *mod_find(unsigned long addr)
{
struct module *mod;
- list_for_each_entry_rcu(mod, &modules, list) {
+ list_for_each_entry_rcu(mod, &modules, list, module_mutex_held()) {
if (within_module(addr, mod))
return mod;
}
@@ -448,7 +450,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
return true;
- list_for_each_entry_rcu(mod, &modules, list) {
+ list_for_each_entry_rcu(mod, &modules, list, module_mutex_held()) {
struct symsearch arr[] = {
{ mod->syms, mod->syms + mod->num_syms, mod->crcs,
NOT_GPL_ONLY, false },
@@ -616,7 +618,7 @@ static struct module *find_module_all(const char *name, size_t len,
module_assert_mutex_or_preempt();
- list_for_each_entry_rcu(mod, &modules, list) {
+ list_for_each_entry_rcu(mod, &modules, list, module_mutex_held()) {
if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
continue;
if (strlen(mod->name) == len && !memcmp(mod->name, name, len))
@@ -2040,7 +2042,7 @@ void set_all_modules_text_rw(void)
return;
mutex_lock(&module_mutex);
- list_for_each_entry_rcu(mod, &modules, list) {
+ list_for_each_entry_rcu(mod, &modules, list, module_mutex_held()) {
if (mod->state == MODULE_STATE_UNFORMED)
continue;
@@ -2059,7 +2061,7 @@ void set_all_modules_text_ro(void)
return;
mutex_lock(&module_mutex);
- list_for_each_entry_rcu(mod, &modules, list) {
+ list_for_each_entry_rcu(mod, &modules, list, module_mutex_held()) {
/*
* Ignore going modules since it's possible that ro
* protection has already been disabled, otherwise we'll
--
2.24.1
+++ Amol Grover [21/01/20 18:17 +0530]:
>modules is traversed using list_for_each_entry_rcu outside an
>RCU read-side critical section but under the protection
>of module_mutex or with preemption disabled.
>
>Hence, add corresponding lockdep expression to silence false-positive
>lockdep warnings, and harden RCU lists.
>
>list_for_each_entry_rcu when traversed inside a preempt disabled
>section, doesn't need an explicit lockdep expression since it is
>implicitly checked for.
>
>Add macro for the corresponding lockdep expression.
>
>Signed-off-by: Amol Grover <[email protected]>
Hi Amol!
Masami already submitted a patch for this, it's been in linux-next for
a while. See commit bf08949cc8b9 ("modules: lockdep: Suppress
suspicious RCU usage warning").
Thanks!
Jessica
>---
> kernel/module.c | 12 +++++++-----
> 1 file changed, 7 insertions(+), 5 deletions(-)
>
>diff --git a/kernel/module.c b/kernel/module.c
>index b56f3224b161..2425f58159dd 100644
>--- a/kernel/module.c
>+++ b/kernel/module.c
>@@ -84,6 +84,8 @@
> * 3) module_addr_min/module_addr_max.
> * (delete and add uses RCU list operations). */
> DEFINE_MUTEX(module_mutex);
>+#define module_mutex_held() \
>+ lockdep_is_held(&module_mutex)
> EXPORT_SYMBOL_GPL(module_mutex);
> static LIST_HEAD(modules);
>
>@@ -214,7 +216,7 @@ static struct module *mod_find(unsigned long addr)
> {
> struct module *mod;
>
>- list_for_each_entry_rcu(mod, &modules, list) {
>+ list_for_each_entry_rcu(mod, &modules, list, module_mutex_held()) {
> if (within_module(addr, mod))
> return mod;
> }
>@@ -448,7 +450,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
> if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
> return true;
>
>- list_for_each_entry_rcu(mod, &modules, list) {
>+ list_for_each_entry_rcu(mod, &modules, list, module_mutex_held()) {
> struct symsearch arr[] = {
> { mod->syms, mod->syms + mod->num_syms, mod->crcs,
> NOT_GPL_ONLY, false },
>@@ -616,7 +618,7 @@ static struct module *find_module_all(const char *name, size_t len,
>
> module_assert_mutex_or_preempt();
>
>- list_for_each_entry_rcu(mod, &modules, list) {
>+ list_for_each_entry_rcu(mod, &modules, list, module_mutex_held()) {
> if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
> continue;
> if (strlen(mod->name) == len && !memcmp(mod->name, name, len))
>@@ -2040,7 +2042,7 @@ void set_all_modules_text_rw(void)
> return;
>
> mutex_lock(&module_mutex);
>- list_for_each_entry_rcu(mod, &modules, list) {
>+ list_for_each_entry_rcu(mod, &modules, list, module_mutex_held()) {
> if (mod->state == MODULE_STATE_UNFORMED)
> continue;
>
>@@ -2059,7 +2061,7 @@ void set_all_modules_text_ro(void)
> return;
>
> mutex_lock(&module_mutex);
>- list_for_each_entry_rcu(mod, &modules, list) {
>+ list_for_each_entry_rcu(mod, &modules, list, module_mutex_held()) {
> /*
> * Ignore going modules since it's possible that ro
> * protection has already been disabled, otherwise we'll
>--
>2.24.1
>
On Thu, Jan 23, 2020 at 01:10:10PM +0100, Jessica Yu wrote:
> +++ Amol Grover [21/01/20 18:17 +0530]:
> > modules is traversed using list_for_each_entry_rcu outside an
> > RCU read-side critical section but under the protection
> > of module_mutex or with preemption disabled.
> >
> > Hence, add corresponding lockdep expression to silence false-positive
> > lockdep warnings, and harden RCU lists.
> >
> > list_for_each_entry_rcu when traversed inside a preempt disabled
> > section, doesn't need an explicit lockdep expression since it is
> > implicitly checked for.
> >
> > Add macro for the corresponding lockdep expression.
> >
> > Signed-off-by: Amol Grover <[email protected]>
>
> Hi Amol!
>
> Masami already submitted a patch for this, it's been in linux-next for
> a while. See commit bf08949cc8b9 ("modules: lockdep: Suppress
> suspicious RCU usage warning").
>
Hey Jessica,
Thank you for reviewing the patch!
Thanks
Amol
> Thanks!
>
> Jessica
>
> > ---
> > kernel/module.c | 12 +++++++-----
> > 1 file changed, 7 insertions(+), 5 deletions(-)
> >
> > diff --git a/kernel/module.c b/kernel/module.c
> > index b56f3224b161..2425f58159dd 100644
> > --- a/kernel/module.c
> > +++ b/kernel/module.c
> > @@ -84,6 +84,8 @@
> > * 3) module_addr_min/module_addr_max.
> > * (delete and add uses RCU list operations). */
> > DEFINE_MUTEX(module_mutex);
> > +#define module_mutex_held() \
> > + lockdep_is_held(&module_mutex)
> > EXPORT_SYMBOL_GPL(module_mutex);
> > static LIST_HEAD(modules);
> >
> > @@ -214,7 +216,7 @@ static struct module *mod_find(unsigned long addr)
> > {
> > struct module *mod;
> >
> > - list_for_each_entry_rcu(mod, &modules, list) {
> > + list_for_each_entry_rcu(mod, &modules, list, module_mutex_held()) {
> > if (within_module(addr, mod))
> > return mod;
> > }
> > @@ -448,7 +450,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
> > if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
> > return true;
> >
> > - list_for_each_entry_rcu(mod, &modules, list) {
> > + list_for_each_entry_rcu(mod, &modules, list, module_mutex_held()) {
> > struct symsearch arr[] = {
> > { mod->syms, mod->syms + mod->num_syms, mod->crcs,
> > NOT_GPL_ONLY, false },
> > @@ -616,7 +618,7 @@ static struct module *find_module_all(const char *name, size_t len,
> >
> > module_assert_mutex_or_preempt();
> >
> > - list_for_each_entry_rcu(mod, &modules, list) {
> > + list_for_each_entry_rcu(mod, &modules, list, module_mutex_held()) {
> > if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
> > continue;
> > if (strlen(mod->name) == len && !memcmp(mod->name, name, len))
> > @@ -2040,7 +2042,7 @@ void set_all_modules_text_rw(void)
> > return;
> >
> > mutex_lock(&module_mutex);
> > - list_for_each_entry_rcu(mod, &modules, list) {
> > + list_for_each_entry_rcu(mod, &modules, list, module_mutex_held()) {
> > if (mod->state == MODULE_STATE_UNFORMED)
> > continue;
> >
> > @@ -2059,7 +2061,7 @@ void set_all_modules_text_ro(void)
> > return;
> >
> > mutex_lock(&module_mutex);
> > - list_for_each_entry_rcu(mod, &modules, list) {
> > + list_for_each_entry_rcu(mod, &modules, list, module_mutex_held()) {
> > /*
> > * Ignore going modules since it's possible that ro
> > * protection has already been disabled, otherwise we'll
> > --
> > 2.24.1
> >