2021-10-11 16:23:03

by Sean Nyekjaer

[permalink] [raw]
Subject: [PATCH 1/3] mtd: core: protect access to MTD devices while in suspend

Prevent accessing the devices while in a suspended state. Also
prevent suspending a device which is still currently in use.

Commit 013e6292aaf5 ("mtd: rawnand: Simplify the locking") allows the
rawnand layer to return errors rather than waiting in a blocking wait.

Tested on a iMX6ULL.

Fixes: 013e6292aaf5 ("mtd: rawnand: Simplify the locking")
Suggested-by: Boris Brezillon <[email protected]>
Signed-off-by: Sean Nyekjaer <[email protected]>
---
drivers/mtd/mtdcore.c | 133 +++++++++++++++++++++++++++++++++++-----
include/linux/mtd/mtd.h | 109 +++++++++++++++++++++++++++-----
2 files changed, 211 insertions(+), 31 deletions(-)

diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index c8fd7f758938..51be9b46ef54 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -777,6 +777,8 @@ static void mtd_set_dev_defaults(struct mtd_info *mtd)
INIT_LIST_HEAD(&mtd->partitions);
mutex_init(&mtd->master.partitions_lock);
mutex_init(&mtd->master.chrdev_lock);
+ init_waitqueue_head(&mtd->master.resume_wq);
+ init_rwsem(&mtd->master.suspend_lock);
}

static ssize_t mtd_otp_size(struct mtd_info *mtd, bool is_user)
@@ -1257,6 +1259,8 @@ int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)

ledtrig_mtd_activity();

+ mtd_start_access(mtd);
+
if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
adjinstr.addr = (loff_t)mtd_div_by_eb(instr->addr, mtd) *
master->erasesize;
@@ -1278,6 +1282,8 @@ int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
}
}

+ mtd_end_access(mtd);
+
return ret;
}
EXPORT_SYMBOL_GPL(mtd_erase);
@@ -1289,6 +1295,7 @@ int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
void **virt, resource_size_t *phys)
{
struct mtd_info *master = mtd_get_master(mtd);
+ int ret;

*retlen = 0;
*virt = NULL;
@@ -1301,8 +1308,12 @@ int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
if (!len)
return 0;

+ mtd_start_access(mtd);
from = mtd_get_master_ofs(mtd, from);
- return master->_point(master, from, len, retlen, virt, phys);
+ ret = master->_point(master, from, len, retlen, virt, phys);
+ mtd_end_access(mtd);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(mtd_point);

@@ -1310,6 +1321,7 @@ EXPORT_SYMBOL_GPL(mtd_point);
int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
{
struct mtd_info *master = mtd_get_master(mtd);
+ int ret;

if (!master->_unpoint)
return -EOPNOTSUPP;
@@ -1317,7 +1329,12 @@ int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
return -EINVAL;
if (!len)
return 0;
- return master->_unpoint(master, mtd_get_master_ofs(mtd, from), len);
+
+ mtd_start_access(mtd);
+ ret = master->_unpoint(master, mtd_get_master_ofs(mtd, from), len);
+ mtd_end_access(mtd);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(mtd_unpoint);

@@ -1372,6 +1389,7 @@ int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
};
int ret;

+ /* mtd_read_oob handles mtd access protection */
ret = mtd_read_oob(mtd, from, &ops);
*retlen = ops.retlen;

@@ -1388,6 +1406,7 @@ int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
};
int ret;

+ /* mtd_write_oob handles mtd access protection */
ret = mtd_write_oob(mtd, to, &ops);
*retlen = ops.retlen;

@@ -1406,6 +1425,7 @@ int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
const u_char *buf)
{
struct mtd_info *master = mtd_get_master(mtd);
+ int ret;

*retlen = 0;
if (!master->_panic_write)
@@ -1419,8 +1439,12 @@ int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
if (!master->oops_panic_write)
master->oops_panic_write = true;

- return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len,
- retlen, buf);
+ mtd_start_access(mtd);
+ ret = master->_panic_write(master, mtd_get_master_ofs(mtd, to), len,
+ retlen, buf);
+ mtd_end_access(mtd);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(mtd_panic_write);

@@ -1566,6 +1590,8 @@ int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)

ledtrig_mtd_activity();

+ mtd_start_access(mtd);
+
/* Check the validity of a potential fallback on mtd->_read */
if (!master->_read_oob && (!master->_read || ops->oobbuf))
return -EOPNOTSUPP;
@@ -1576,7 +1602,7 @@ int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
ret_code = mtd_read_oob_std(mtd, from, ops);

mtd_update_ecc_stats(mtd, master, &old_stats);
-
+ mtd_end_access(mtd);
/*
* In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
* similar to mtd->_read(), returning a non-negative integer
@@ -1597,6 +1623,8 @@ int mtd_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_info *master = mtd_get_master(mtd);
int ret;

+ mtd_start_access(mtd);
+
ops->retlen = ops->oobretlen = 0;

if (!(mtd->flags & MTD_WRITEABLE))
@@ -1615,7 +1643,10 @@ int mtd_write_oob(struct mtd_info *mtd, loff_t to,
if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
return mtd_io_emulated_slc(mtd, to, false, ops);

- return mtd_write_oob_std(mtd, to, ops);
+ ret = mtd_write_oob_std(mtd, to, ops);
+ mtd_end_access(mtd);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(mtd_write_oob);

@@ -1992,12 +2023,18 @@ int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
struct otp_info *buf)
{
struct mtd_info *master = mtd_get_master(mtd);
+ int ret;

if (!master->_get_fact_prot_info)
return -EOPNOTSUPP;
if (!len)
return 0;
- return master->_get_fact_prot_info(master, len, retlen, buf);
+
+ mtd_start_access(mtd);
+ ret = master->_get_fact_prot_info(master, len, retlen, buf);
+ mtd_end_access(mtd);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);

@@ -2005,13 +2042,19 @@ int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct mtd_info *master = mtd_get_master(mtd);
+ int ret;

*retlen = 0;
if (!master->_read_fact_prot_reg)
return -EOPNOTSUPP;
if (!len)
return 0;
- return master->_read_fact_prot_reg(master, from, len, retlen, buf);
+
+ mtd_start_access(mtd);
+ ret = master->_read_fact_prot_reg(master, from, len, retlen, buf);
+ mtd_end_access(mtd);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);

@@ -2019,12 +2062,18 @@ int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
struct otp_info *buf)
{
struct mtd_info *master = mtd_get_master(mtd);
+ int ret;

if (!master->_get_user_prot_info)
return -EOPNOTSUPP;
if (!len)
return 0;
- return master->_get_user_prot_info(master, len, retlen, buf);
+
+ mtd_start_access(mtd);
+ ret = master->_get_user_prot_info(master, len, retlen, buf);
+ mtd_end_access(mtd);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);

@@ -2032,13 +2081,19 @@ int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct mtd_info *master = mtd_get_master(mtd);
+ int ret;

*retlen = 0;
if (!master->_read_user_prot_reg)
return -EOPNOTSUPP;
if (!len)
return 0;
- return master->_read_user_prot_reg(master, from, len, retlen, buf);
+
+ mtd_start_access(mtd);
+ ret = master->_read_user_prot_reg(master, from, len, retlen, buf);
+ mtd_end_access(mtd);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);

@@ -2053,7 +2108,11 @@ int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
return -EOPNOTSUPP;
if (!len)
return 0;
+
+ mtd_start_access(mtd);
ret = master->_write_user_prot_reg(master, to, len, retlen, buf);
+ mtd_end_access(mtd);
+
if (ret)
return ret;

@@ -2068,24 +2127,36 @@ EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
{
struct mtd_info *master = mtd_get_master(mtd);
+ int ret;

if (!master->_lock_user_prot_reg)
return -EOPNOTSUPP;
if (!len)
return 0;
- return master->_lock_user_prot_reg(master, from, len);
+
+ mtd_start_access(mtd);
+ ret = master->_lock_user_prot_reg(master, from, len);
+ mtd_end_access(mtd);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);

int mtd_erase_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
{
struct mtd_info *master = mtd_get_master(mtd);
+ int ret;

if (!master->_erase_user_prot_reg)
return -EOPNOTSUPP;
if (!len)
return 0;
- return master->_erase_user_prot_reg(master, from, len);
+
+ mtd_start_access(mtd);
+ ret = master->_erase_user_prot_reg(master, from, len);
+ mtd_end_access(mtd);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(mtd_erase_user_prot_reg);

@@ -2093,6 +2164,7 @@ EXPORT_SYMBOL_GPL(mtd_erase_user_prot_reg);
int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct mtd_info *master = mtd_get_master(mtd);
+ int ret;

if (!master->_lock)
return -EOPNOTSUPP;
@@ -2106,13 +2178,18 @@ int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
}

- return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len);
+ mtd_start_access(mtd);
+ ret = master->_lock(master, mtd_get_master_ofs(mtd, ofs), len);
+ mtd_end_access(mtd);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(mtd_lock);

int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct mtd_info *master = mtd_get_master(mtd);
+ int ret;

if (!master->_unlock)
return -EOPNOTSUPP;
@@ -2126,13 +2203,18 @@ int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
}

- return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len);
+ mtd_start_access(mtd);
+ ret = master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len);
+ mtd_end_access(mtd);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(mtd_unlock);

int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct mtd_info *master = mtd_get_master(mtd);
+ int ret;

if (!master->_is_locked)
return -EOPNOTSUPP;
@@ -2146,13 +2228,18 @@ int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
}

- return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len);
+ mtd_start_access(mtd);
+ ret = master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len);
+ mtd_end_access(mtd);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(mtd_is_locked);

int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
{
struct mtd_info *master = mtd_get_master(mtd);
+ int ret;

if (ofs < 0 || ofs >= mtd->size)
return -EINVAL;
@@ -2162,13 +2249,18 @@ int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;

- return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs));
+ mtd_start_access(mtd);
+ ret = master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs));
+ mtd_end_access(mtd);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(mtd_block_isreserved);

int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
{
struct mtd_info *master = mtd_get_master(mtd);
+ int ret;

if (ofs < 0 || ofs >= mtd->size)
return -EINVAL;
@@ -2178,7 +2270,11 @@ int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;

- return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs));
+ mtd_start_access(mtd);
+ ret = master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs));
+ mtd_end_access(mtd);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(mtd_block_isbad);

@@ -2197,7 +2293,10 @@ int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;

+ mtd_start_access(mtd);
ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs));
+ mtd_end_access(mtd);
+
if (ret)
return ret;

diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 88227044fc86..7765d1fcfc8b 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -231,6 +231,8 @@ struct mtd_master {
struct mutex partitions_lock;
struct mutex chrdev_lock;
unsigned int suspended : 1;
+ wait_queue_head_t resume_wq;
+ struct rw_semaphore suspend_lock;
};

struct mtd_info {
@@ -476,10 +478,49 @@ static inline u32 mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize;
}

+static inline void mtd_start_access(struct mtd_info *mtd)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ /*
+ * Don't take the suspend_lock on devices that don't
+ * implement the suspend hook. Otherwise, lockdep will
+ * complain about nested locks when trying to suspend MTD
+ * partitions or MTD devices created by gluebi which are
+ * backed by real devices.
+ */
+ if (!master->_suspend)
+ return;
+
+ /*
+ * Wait until the device is resumed. Should we have a
+ * non-blocking mode here?
+ */
+ while (1) {
+ down_read(&master->master.suspend_lock);
+ if (!master->master.suspended)
+ return;
+
+ up_read(&master->master.suspend_lock);
+ wait_event(master->master.resume_wq, master->master.suspended == 0);
+ }
+}
+
+static inline void mtd_end_access(struct mtd_info *mtd)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->_suspend)
+ return;
+
+ up_read(&master->master.suspend_lock);
+}
+
static inline int mtd_max_bad_blocks(struct mtd_info *mtd,
loff_t ofs, size_t len)
{
struct mtd_info *master = mtd_get_master(mtd);
+ int ret;

if (!master->_max_bad_blocks)
return -ENOTSUPP;
@@ -487,8 +528,12 @@ static inline int mtd_max_bad_blocks(struct mtd_info *mtd,
if (mtd->size < (len + ofs) || ofs < 0)
return -EINVAL;

- return master->_max_bad_blocks(master, mtd_get_master_ofs(mtd, ofs),
- len);
+ mtd_start_access(mtd);
+ ret = master->_max_bad_blocks(master, mtd_get_master_ofs(mtd, ofs),
+ len);
+ mtd_end_access(mtd);
+
+ return ret;
}

int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
@@ -543,33 +588,69 @@ int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs);
int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs);
int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs);

+static inline int __mtd_suspend(struct mtd_info *mtd)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+ int ret = 0;
+
+ if (!master->_suspend)
+ return ret;
+
+ if (!master->master.suspended) {
+ ret = master->_suspend(master);
+ if (!ret)
+ master->master.suspended = 1;
+ }
+
+ return ret;
+}
+
static inline int mtd_suspend(struct mtd_info *mtd)
{
struct mtd_info *master = mtd_get_master(mtd);
- int ret;
+ int ret = 0;

- if (master->master.suspended)
- return 0;
+ if (!master->_suspend)
+ return ret;

- ret = master->_suspend ? master->_suspend(master) : 0;
- if (ret)
+ down_write(&master->master.suspend_lock);
+ ret = __mtd_suspend(mtd);
+ up_write(&master->master.suspend_lock);
+
+ return ret;
+}
+
+static inline int __mtd_resume(struct mtd_info *mtd)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+ int ret = 0;
+
+ if (!master->_suspend)
return ret;

- master->master.suspended = 1;
- return 0;
+ if (master->master.suspended) {
+ if (master->_resume)
+ master->_resume(master);
+
+ master->master.suspended = 0;
+ ret = 1;
+ }
+
+ return ret;
}

static inline void mtd_resume(struct mtd_info *mtd)
{
struct mtd_info *master = mtd_get_master(mtd);

- if (!master->master.suspended)
+ if (!master->_suspend)
return;

- if (master->_resume)
- master->_resume(master);
-
- master->master.suspended = 0;
+ down_write(&master->master.suspend_lock);
+ /* If MTD dev has been resumed, wake up all waiters. */
+ if (__mtd_resume(mtd))
+ wake_up_all(&master->master.resume_wq);
+ up_write(&master->master.suspend_lock);
}

static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)
--
2.33.0


2021-10-11 16:27:28

by Boris Brezillon

[permalink] [raw]
Subject: Re: [PATCH 1/3] mtd: core: protect access to MTD devices while in suspend

On Mon, 11 Oct 2021 13:52:51 +0200
Sean Nyekjaer <[email protected]> wrote:

> Prevent accessing the devices while in a suspended state. Also
> prevent suspending a device which is still currently in use.
>
> Commit 013e6292aaf5 ("mtd: rawnand: Simplify the locking") allows the
> rawnand layer to return errors rather than waiting in a blocking wait.
>
> Tested on a iMX6ULL.
>
> Fixes: 013e6292aaf5 ("mtd: rawnand: Simplify the locking")
> Suggested-by: Boris Brezillon <[email protected]>
> Signed-off-by: Sean Nyekjaer <[email protected]>
> ---
> drivers/mtd/mtdcore.c | 133 +++++++++++++++++++++++++++++++++++-----
> include/linux/mtd/mtd.h | 109 +++++++++++++++++++++++++++-----
> 2 files changed, 211 insertions(+), 31 deletions(-)
>
> diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
> index c8fd7f758938..51be9b46ef54 100644
> --- a/drivers/mtd/mtdcore.c
> +++ b/drivers/mtd/mtdcore.c
> @@ -777,6 +777,8 @@ static void mtd_set_dev_defaults(struct mtd_info *mtd)
> INIT_LIST_HEAD(&mtd->partitions);
> mutex_init(&mtd->master.partitions_lock);
> mutex_init(&mtd->master.chrdev_lock);
> + init_waitqueue_head(&mtd->master.resume_wq);
> + init_rwsem(&mtd->master.suspend_lock);
> }
>
> static ssize_t mtd_otp_size(struct mtd_info *mtd, bool is_user)
> @@ -1257,6 +1259,8 @@ int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
>
> ledtrig_mtd_activity();
>
> + mtd_start_access(mtd);
> +
> if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
> adjinstr.addr = (loff_t)mtd_div_by_eb(instr->addr, mtd) *
> master->erasesize;
> @@ -1278,6 +1282,8 @@ int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
> }
> }
>
> + mtd_end_access(mtd);
> +
> return ret;
> }
> EXPORT_SYMBOL_GPL(mtd_erase);
> @@ -1289,6 +1295,7 @@ int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
> void **virt, resource_size_t *phys)
> {
> struct mtd_info *master = mtd_get_master(mtd);
> + int ret;
>
> *retlen = 0;
> *virt = NULL;
> @@ -1301,8 +1308,12 @@ int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
> if (!len)
> return 0;
>
> + mtd_start_access(mtd);
> from = mtd_get_master_ofs(mtd, from);
> - return master->_point(master, from, len, retlen, virt, phys);
> + ret = master->_point(master, from, len, retlen, virt, phys);
> + mtd_end_access(mtd);
> +
> + return ret;
> }
> EXPORT_SYMBOL_GPL(mtd_point);
>
> @@ -1310,6 +1321,7 @@ EXPORT_SYMBOL_GPL(mtd_point);
> int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
> {
> struct mtd_info *master = mtd_get_master(mtd);
> + int ret;
>
> if (!master->_unpoint)
> return -EOPNOTSUPP;
> @@ -1317,7 +1329,12 @@ int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
> return -EINVAL;
> if (!len)
> return 0;
> - return master->_unpoint(master, mtd_get_master_ofs(mtd, from), len);
> +
> + mtd_start_access(mtd);
> + ret = master->_unpoint(master, mtd_get_master_ofs(mtd, from), len);
> + mtd_end_access(mtd);
> +
> + return ret;
> }
> EXPORT_SYMBOL_GPL(mtd_unpoint);
>
> @@ -1372,6 +1389,7 @@ int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
> };
> int ret;
>
> + /* mtd_read_oob handles mtd access protection */
> ret = mtd_read_oob(mtd, from, &ops);
> *retlen = ops.retlen;
>
> @@ -1388,6 +1406,7 @@ int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
> };
> int ret;
>
> + /* mtd_write_oob handles mtd access protection */
> ret = mtd_write_oob(mtd, to, &ops);
> *retlen = ops.retlen;
>
> @@ -1406,6 +1425,7 @@ int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
> const u_char *buf)
> {
> struct mtd_info *master = mtd_get_master(mtd);
> + int ret;
>
> *retlen = 0;
> if (!master->_panic_write)
> @@ -1419,8 +1439,12 @@ int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
> if (!master->oops_panic_write)
> master->oops_panic_write = true;
>
> - return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len,
> - retlen, buf);
> + mtd_start_access(mtd);
> + ret = master->_panic_write(master, mtd_get_master_ofs(mtd, to), len,
> + retlen, buf);
> + mtd_end_access(mtd);
> +
> + return ret;
> }
> EXPORT_SYMBOL_GPL(mtd_panic_write);
>
> @@ -1566,6 +1590,8 @@ int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
>
> ledtrig_mtd_activity();
>
> + mtd_start_access(mtd);
> +
> /* Check the validity of a potential fallback on mtd->_read */
> if (!master->_read_oob && (!master->_read || ops->oobbuf))
> return -EOPNOTSUPP;
> @@ -1576,7 +1602,7 @@ int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
> ret_code = mtd_read_oob_std(mtd, from, ops);
>
> mtd_update_ecc_stats(mtd, master, &old_stats);
> -
> + mtd_end_access(mtd);
> /*
> * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
> * similar to mtd->_read(), returning a non-negative integer
> @@ -1597,6 +1623,8 @@ int mtd_write_oob(struct mtd_info *mtd, loff_t to,
> struct mtd_info *master = mtd_get_master(mtd);
> int ret;
>
> + mtd_start_access(mtd);
> +
> ops->retlen = ops->oobretlen = 0;
>
> if (!(mtd->flags & MTD_WRITEABLE))
> @@ -1615,7 +1643,10 @@ int mtd_write_oob(struct mtd_info *mtd, loff_t to,
> if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
> return mtd_io_emulated_slc(mtd, to, false, ops);
>
> - return mtd_write_oob_std(mtd, to, ops);
> + ret = mtd_write_oob_std(mtd, to, ops);
> + mtd_end_access(mtd);
> +
> + return ret;
> }
> EXPORT_SYMBOL_GPL(mtd_write_oob);
>
> @@ -1992,12 +2023,18 @@ int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
> struct otp_info *buf)
> {
> struct mtd_info *master = mtd_get_master(mtd);
> + int ret;
>
> if (!master->_get_fact_prot_info)
> return -EOPNOTSUPP;
> if (!len)
> return 0;
> - return master->_get_fact_prot_info(master, len, retlen, buf);
> +
> + mtd_start_access(mtd);
> + ret = master->_get_fact_prot_info(master, len, retlen, buf);
> + mtd_end_access(mtd);
> +
> + return ret;
> }
> EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
>
> @@ -2005,13 +2042,19 @@ int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
> size_t *retlen, u_char *buf)
> {
> struct mtd_info *master = mtd_get_master(mtd);
> + int ret;
>
> *retlen = 0;
> if (!master->_read_fact_prot_reg)
> return -EOPNOTSUPP;
> if (!len)
> return 0;
> - return master->_read_fact_prot_reg(master, from, len, retlen, buf);
> +
> + mtd_start_access(mtd);
> + ret = master->_read_fact_prot_reg(master, from, len, retlen, buf);
> + mtd_end_access(mtd);
> +
> + return ret;
> }
> EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
>
> @@ -2019,12 +2062,18 @@ int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
> struct otp_info *buf)
> {
> struct mtd_info *master = mtd_get_master(mtd);
> + int ret;
>
> if (!master->_get_user_prot_info)
> return -EOPNOTSUPP;
> if (!len)
> return 0;
> - return master->_get_user_prot_info(master, len, retlen, buf);
> +
> + mtd_start_access(mtd);
> + ret = master->_get_user_prot_info(master, len, retlen, buf);
> + mtd_end_access(mtd);
> +
> + return ret;
> }
> EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
>
> @@ -2032,13 +2081,19 @@ int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
> size_t *retlen, u_char *buf)
> {
> struct mtd_info *master = mtd_get_master(mtd);
> + int ret;
>
> *retlen = 0;
> if (!master->_read_user_prot_reg)
> return -EOPNOTSUPP;
> if (!len)
> return 0;
> - return master->_read_user_prot_reg(master, from, len, retlen, buf);
> +
> + mtd_start_access(mtd);
> + ret = master->_read_user_prot_reg(master, from, len, retlen, buf);
> + mtd_end_access(mtd);
> +
> + return ret;
> }
> EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
>
> @@ -2053,7 +2108,11 @@ int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
> return -EOPNOTSUPP;
> if (!len)
> return 0;
> +
> + mtd_start_access(mtd);
> ret = master->_write_user_prot_reg(master, to, len, retlen, buf);
> + mtd_end_access(mtd);
> +
> if (ret)
> return ret;
>
> @@ -2068,24 +2127,36 @@ EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
> int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
> {
> struct mtd_info *master = mtd_get_master(mtd);
> + int ret;
>
> if (!master->_lock_user_prot_reg)
> return -EOPNOTSUPP;
> if (!len)
> return 0;
> - return master->_lock_user_prot_reg(master, from, len);
> +
> + mtd_start_access(mtd);
> + ret = master->_lock_user_prot_reg(master, from, len);
> + mtd_end_access(mtd);
> +
> + return ret;
> }
> EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
>
> int mtd_erase_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
> {
> struct mtd_info *master = mtd_get_master(mtd);
> + int ret;
>
> if (!master->_erase_user_prot_reg)
> return -EOPNOTSUPP;
> if (!len)
> return 0;
> - return master->_erase_user_prot_reg(master, from, len);
> +
> + mtd_start_access(mtd);
> + ret = master->_erase_user_prot_reg(master, from, len);
> + mtd_end_access(mtd);
> +
> + return ret;
> }
> EXPORT_SYMBOL_GPL(mtd_erase_user_prot_reg);
>
> @@ -2093,6 +2164,7 @@ EXPORT_SYMBOL_GPL(mtd_erase_user_prot_reg);
> int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
> {
> struct mtd_info *master = mtd_get_master(mtd);
> + int ret;
>
> if (!master->_lock)
> return -EOPNOTSUPP;
> @@ -2106,13 +2178,18 @@ int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
> len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
> }
>
> - return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len);
> + mtd_start_access(mtd);
> + ret = master->_lock(master, mtd_get_master_ofs(mtd, ofs), len);
> + mtd_end_access(mtd);
> +
> + return ret;
> }
> EXPORT_SYMBOL_GPL(mtd_lock);
>
> int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
> {
> struct mtd_info *master = mtd_get_master(mtd);
> + int ret;
>
> if (!master->_unlock)
> return -EOPNOTSUPP;
> @@ -2126,13 +2203,18 @@ int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
> len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
> }
>
> - return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len);
> + mtd_start_access(mtd);
> + ret = master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len);
> + mtd_end_access(mtd);
> +
> + return ret;
> }
> EXPORT_SYMBOL_GPL(mtd_unlock);
>
> int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
> {
> struct mtd_info *master = mtd_get_master(mtd);
> + int ret;
>
> if (!master->_is_locked)
> return -EOPNOTSUPP;
> @@ -2146,13 +2228,18 @@ int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
> len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
> }
>
> - return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len);
> + mtd_start_access(mtd);
> + ret = master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len);
> + mtd_end_access(mtd);
> +
> + return ret;
> }
> EXPORT_SYMBOL_GPL(mtd_is_locked);
>
> int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
> {
> struct mtd_info *master = mtd_get_master(mtd);
> + int ret;
>
> if (ofs < 0 || ofs >= mtd->size)
> return -EINVAL;
> @@ -2162,13 +2249,18 @@ int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
> if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
> ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
>
> - return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs));
> + mtd_start_access(mtd);
> + ret = master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs));
> + mtd_end_access(mtd);
> +
> + return ret;
> }
> EXPORT_SYMBOL_GPL(mtd_block_isreserved);
>
> int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
> {
> struct mtd_info *master = mtd_get_master(mtd);
> + int ret;
>
> if (ofs < 0 || ofs >= mtd->size)
> return -EINVAL;
> @@ -2178,7 +2270,11 @@ int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
> if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
> ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
>
> - return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs));
> + mtd_start_access(mtd);
> + ret = master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs));
> + mtd_end_access(mtd);
> +
> + return ret;
> }
> EXPORT_SYMBOL_GPL(mtd_block_isbad);
>
> @@ -2197,7 +2293,10 @@ int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
> if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
> ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
>
> + mtd_start_access(mtd);
> ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs));
> + mtd_end_access(mtd);
> +
> if (ret)
> return ret;
>
> diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
> index 88227044fc86..7765d1fcfc8b 100644
> --- a/include/linux/mtd/mtd.h
> +++ b/include/linux/mtd/mtd.h
> @@ -231,6 +231,8 @@ struct mtd_master {
> struct mutex partitions_lock;
> struct mutex chrdev_lock;
> unsigned int suspended : 1;
> + wait_queue_head_t resume_wq;
> + struct rw_semaphore suspend_lock;
> };
>
> struct mtd_info {
> @@ -476,10 +478,49 @@ static inline u32 mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
> return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize;
> }
>
> +static inline void mtd_start_access(struct mtd_info *mtd)
> +{
> + struct mtd_info *master = mtd_get_master(mtd);
> +
> + /*
> + * Don't take the suspend_lock on devices that don't
> + * implement the suspend hook. Otherwise, lockdep will
> + * complain about nested locks when trying to suspend MTD
> + * partitions or MTD devices created by gluebi which are
> + * backed by real devices.
> + */
> + if (!master->_suspend)
> + return;
> +
> + /*
> + * Wait until the device is resumed. Should we have a
> + * non-blocking mode here?
> + */
> + while (1) {
> + down_read(&master->master.suspend_lock);
> + if (!master->master.suspended)
> + return;
> +
> + up_read(&master->master.suspend_lock);
> + wait_event(master->master.resume_wq, master->master.suspended == 0);
> + }
> +}
> +
> +static inline void mtd_end_access(struct mtd_info *mtd)
> +{
> + struct mtd_info *master = mtd_get_master(mtd);
> +
> + if (!master->_suspend)
> + return;
> +
> + up_read(&master->master.suspend_lock);
> +}
> +
> static inline int mtd_max_bad_blocks(struct mtd_info *mtd,
> loff_t ofs, size_t len)
> {
> struct mtd_info *master = mtd_get_master(mtd);
> + int ret;
>
> if (!master->_max_bad_blocks)
> return -ENOTSUPP;
> @@ -487,8 +528,12 @@ static inline int mtd_max_bad_blocks(struct mtd_info *mtd,
> if (mtd->size < (len + ofs) || ofs < 0)
> return -EINVAL;
>
> - return master->_max_bad_blocks(master, mtd_get_master_ofs(mtd, ofs),
> - len);
> + mtd_start_access(mtd);
> + ret = master->_max_bad_blocks(master, mtd_get_master_ofs(mtd, ofs),
> + len);
> + mtd_end_access(mtd);
> +
> + return ret;
> }
>
> int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
> @@ -543,33 +588,69 @@ int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs);
> int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs);
> int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs);
>
> +static inline int __mtd_suspend(struct mtd_info *mtd)

Please give it a proper name, like mtd_suspend_locked().

I'm really not found of this solution where we let mtdconcat grab the
lock and call the mtd_{suspend,resume}_locked() variant on its
subdevices, but fixing it in a sane way would require registering the
subdevices, which is likely to mess up with the MTD dev numbering and
cause even more pain. But please add comment explaining why you need to
expose those _locked() variants, with a big 'DON'T USE UNLESS YOU'RE
MTDCONCAT' disclaimer. Or maybe even better, open-code the _locked()
variants in mtdconcat so others don't get tempted to use it.

> +{
> + struct mtd_info *master = mtd_get_master(mtd);
> + int ret = 0;
> +
> + if (!master->_suspend)
> + return ret;
> +
> + if (!master->master.suspended) {
> + ret = master->_suspend(master);
> + if (!ret)
> + master->master.suspended = 1;
> + }
> +
> + return ret;
> +}
> +
> static inline int mtd_suspend(struct mtd_info *mtd)
> {
> struct mtd_info *master = mtd_get_master(mtd);
> - int ret;
> + int ret = 0;
>
> - if (master->master.suspended)
> - return 0;
> + if (!master->_suspend)
> + return ret;
>
> - ret = master->_suspend ? master->_suspend(master) : 0;
> - if (ret)
> + down_write(&master->master.suspend_lock);
> + ret = __mtd_suspend(mtd);
> + up_write(&master->master.suspend_lock);
> +
> + return ret;
> +}
> +
> +static inline int __mtd_resume(struct mtd_info *mtd)
> +{
> + struct mtd_info *master = mtd_get_master(mtd);
> + int ret = 0;
> +
> + if (!master->_suspend)
> return ret;
>
> - master->master.suspended = 1;
> - return 0;
> + if (master->master.suspended) {
> + if (master->_resume)
> + master->_resume(master);
> +
> + master->master.suspended = 0;
> + ret = 1;
> + }
> +
> + return ret;
> }
>
> static inline void mtd_resume(struct mtd_info *mtd)
> {
> struct mtd_info *master = mtd_get_master(mtd);
>
> - if (!master->master.suspended)
> + if (!master->_suspend)
> return;
>
> - if (master->_resume)
> - master->_resume(master);
> -
> - master->master.suspended = 0;
> + down_write(&master->master.suspend_lock);
> + /* If MTD dev has been resumed, wake up all waiters. */
> + if (__mtd_resume(mtd))
> + wake_up_all(&master->master.resume_wq);
> + up_write(&master->master.suspend_lock);
> }
>
> static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)

2021-10-14 06:31:56

by kernel test robot

[permalink] [raw]
Subject: [mtd] d3ff51cfa9: WARNING:at_kernel/locking/rwsem.c:#down_read



Greeting,

FYI, we noticed the following commit (built with clang-14):

commit: d3ff51cfa9a64cdc18abe24c9821891b5122e617 ("[PATCH 1/3] mtd: core: protect access to MTD devices while in suspend")
url: https://github.com/0day-ci/linux/commits/Sean-Nyekjaer/mtd-core-protect-access-to-mtd-devices-while-in-suspend/20211011-195606
base: https://git.kernel.org/cgit/linux/kernel/git/mtd/linux.git mtd/next

in testcase: boot

on test machine: qemu-system-i386 -enable-kvm -cpu SandyBridge -smp 2 -m 4G

caused below changes (please refer to attached dmesg/kmsg for entire log/backtrace):


+----------------------------------------------+------------+------------+
| | b72841e4dc | d3ff51cfa9 |
+----------------------------------------------+------------+------------+
| boot_successes | 20 | 0 |
| boot_failures | 0 | 21 |
| INFO:trying_to_register_non-static_key | 0 | 21 |
| WARNING:at_kernel/locking/rwsem.c:#down_read | 0 | 21 |
| EIP:down_read | 0 | 21 |
| WARNING:at_kernel/locking/rwsem.c:#__up_read | 0 | 21 |
| EIP:__up_read | 0 | 21 |
+----------------------------------------------+------------+------------+


If you fix the issue, kindly add following tag
Reported-by: kernel test robot <[email protected]>


[ 4.304729][ T1] ------------[ cut here ]------------
[ 4.305194][ T1] DEBUG_RWSEMS_WARN_ON(sem->magic != sem): count = 0x0, magic = 0x0, owner = 0x0, curr 0xb3690000, list not empty
[ 4.306218][ T1] WARNING: CPU: 1 PID: 1 at kernel/locking/rwsem.c:1240 down_read+0xff/0x110
[ 4.306989][ T1] Modules linked in:
[ 4.307318][ T1] CPU: 1 PID: 1 Comm: swapper/0 Not tainted 5.15.0-rc1-00002-gd3ff51cfa9a6 #1
[ 4.308071][ T1] EIP: down_read+0xff/0x110
[ 4.308445][ T1] Code: 9f 03 b2 ba 0f 44 d1 52 0f 44 d1 52 ff 75 f0 ff ff 75 f0 ff 00 b2 68 79 00 b2 68 79 c7 d5 5a ff c7 d5 5a ff c4 1c 0f 0b c4 1c <0f> 0b 58 ff ff ff 58 ff ff ff ff 90 90 3e ff 90 90 3e 55 89 e5 53
[ 4.310153][ T1] EAX: 00000000 EBX: b15a2702 ECX: 80000001 EDX: 00000001
[ 4.310861][ T1] ESI: f5dc0404 EDI: 00000000 EBP: b3693c98 ESP: b3693c84
[ 4.311546][ T1] DS: 007b ES: 007b FS: 00d8 GS: 0000 SS: 0068 EFLAGS: 00010296
[ 4.312277][ T1] CR0: 80050033 CR2: 00000000 CR3: 027d9000 CR4: 000406d0
[ 4.312919][ T1] DR0: 00000000 DR1: 00000000 DR2: 00000000 DR3: 00000000
[ 4.313518][ T1] DR6: fffe0ff0 DR7: 00000400
[ 4.313926][ T1] Call Trace:
[ 4.314237][ T1] mtd_start_access+0x32/0xc0
[ 4.314707][ T1] ? ledtrig_mtd_activity+0x37/0x40
[ 4.315194][ T1] mtd_read_oob+0x75/0x1e0
[ 4.315572][ T1] scan_block_fast+0x6a/0xc0
[ 4.316003][ T1] create_bbt+0x130/0x2a0
[ 4.316399][ T1] ? __kmalloc+0x146/0x1a0
[ 4.316811][ T1] ? trace_kmalloc_node+0x39/0x100
[ 4.317267][ T1] nand_create_bbt+0x1fe/0x9e0
[ 4.317685][ T1] ? ns_alloc_device+0xa4/0x18b
[ 4.318107][ T1] ? ns_init+0x3b6/0x688
[ 4.318459][ T1] ? __kmalloc+0x146/0x1a0
[ 4.318858][ T1] ns_init_module+0x5f9/0x809
[ 4.319254][ T1] ? __this_cpu_preempt_check+0xf/0x11
[ 4.319719][ T1] ? debug_smp_processor_id+0x12/0x20
[ 4.320174][ T1] ? generic_onenand_driver_init+0x16/0x16
[ 4.320662][ T1] ? rcu_read_lock_sched_held+0x36/0x70
[ 4.321122][ T1] ? generic_onenand_driver_init+0x16/0x16
[ 4.321602][ T1] do_one_initcall+0x93/0x160
[ 4.321997][ T1] ? irqentry_exit+0x56/0x80
[ 4.322382][ T1] ? __this_cpu_preempt_check+0xf/0x11
[ 4.322854][ T1] ? lockdep_hardirqs_on+0x82/0x110
[ 4.323273][ T1] ? irqentry_exit+0x56/0x80
[ 4.323642][ T1] ? sysvec_call_function_single+0x30/0x30
[ 4.324124][ T1] ? trace_hardirqs_on+0x45/0x50
[ 4.324523][ T1] ? irqentry_exit+0x56/0x80
[ 4.324893][ T1] ? sysvec_apic_timer_interrupt+0x29/0x30
[ 4.325367][ T1] ? handle_exception+0x101/0x101
[ 4.325776][ T1] ? p9_client_readlink+0x9b/0xa0
[ 4.326186][ T1] ? strlen+0xd/0x20
[ 4.326508][ T1] ? next_arg+0xfb/0x110
[ 4.326877][ T1] ? parse_args+0x15c/0x330
[ 4.327244][ T1] ? __this_cpu_preempt_check+0xf/0x11
[ 4.327688][ T1] ? debug_smp_processor_id+0x12/0x20
[ 4.328155][ T1] ? generic_onenand_driver_init+0x16/0x16
[ 4.328664][ T1] do_initcall_level+0x80/0x92
[ 4.329088][ T1] ? rest_init+0x1c0/0x1c0
[ 4.329475][ T1] do_initcalls+0x41/0x62
[ 4.329838][ T1] do_basic_setup+0x17/0x19
[ 4.330214][ T1] kernel_init_freeable+0x8c/0xd1
[ 4.330657][ T1] kernel_init+0x17/0x170
[ 4.331087][ T1] ret_from_fork+0x1c/0x28
[ 4.331503][ T1] irq event stamp: 319917
[ 4.331919][ T1] hardirqs last enabled at (319917): [<b10e59cc>] __up_console_sem+0x5c/0x90
[ 4.332719][ T1] hardirqs last disabled at (319916): [<b10e59b3>] __up_console_sem+0x43/0x90
[ 4.333495][ T1] softirqs last enabled at (318956): [<b121d2f1>] bdi_register_va+0x1d1/0x1f0
[ 4.334243][ T1] softirqs last disabled at (318954): [<b121d202>] bdi_register_va+0xe2/0x1f0
[ 4.334994][ T1] _warn_unseeded_randomness: 5 callbacks suppressed
[ 4.334997][ T1] random: get_random_bytes called from __warn+0xc3/0x140 with crng_init=0
[ 4.335006][ T1] ---[ end trace 8e9a8c821c89b3d1 ]---




To reproduce:

# build kernel
cd linux
cp config-5.15.0-rc1-00002-gd3ff51cfa9a6 .config
make HOSTCC=clang-14 CC=clang-14 ARCH=i386 olddefconfig prepare modules_prepare bzImage

git clone https://github.com/intel/lkp-tests.git
cd lkp-tests
bin/lkp qemu -k <bzImage> job-script # job-script is attached in this email

# if come across any failure that blocks the test,
# please remove ~/.lkp and /lkp dir to run from a clean state.



---
0DAY/LKP+ Test Infrastructure Open Source Technology Center
https://lists.01.org/hyperkitty/list/[email protected] Intel Corporation

Thanks,
Oliver Sang


Attachments:
(No filename) (6.22 kB)
config-5.15.0-rc1-00002-gd3ff51cfa9a6 (127.61 kB)
job-script (4.91 kB)
dmesg.xz (18.88 kB)
Download all attachments

2021-10-15 13:06:06

by Boris Brezillon

[permalink] [raw]
Subject: Re: [PATCH 1/3] mtd: core: protect access to MTD devices while in suspend

On Mon, 11 Oct 2021 13:52:51 +0200
Sean Nyekjaer <[email protected]> wrote:

> struct mtd_info {
> @@ -476,10 +478,49 @@ static inline u32 mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
> return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize;
> }
>
> +static inline void mtd_start_access(struct mtd_info *mtd)
> +{
> + struct mtd_info *master = mtd_get_master(mtd);

mtd_start_{access,end}() should only be called on master devices, so I
guess you can drop the mtd_get_master() call and use mtd directly.
Maybe add a WARN_ON_ONCE(mtd != mtd_get_master(mtd)) so we can
easily catch silly mistakes.

> +
> + /*
> + * Don't take the suspend_lock on devices that don't
> + * implement the suspend hook. Otherwise, lockdep will
> + * complain about nested locks when trying to suspend MTD
> + * partitions or MTD devices created by gluebi which are
> + * backed by real devices.
> + */
> + if (!master->_suspend)
> + return;
> +
> + /*
> + * Wait until the device is resumed. Should we have a
> + * non-blocking mode here?
> + */
> + while (1) {
> + down_read(&master->master.suspend_lock);
> + if (!master->master.suspended)
> + return;
> +
> + up_read(&master->master.suspend_lock);
> + wait_event(master->master.resume_wq, master->master.suspended == 0);
> + }
> +}
> +
> +static inline void mtd_end_access(struct mtd_info *mtd)
> +{
> + struct mtd_info *master = mtd_get_master(mtd);
> +
> + if (!master->_suspend)
> + return;
> +
> + up_read(&master->master.suspend_lock);
> +}
> +
> static inline int mtd_max_bad_blocks(struct mtd_info *mtd,
> loff_t ofs, size_t len)
> {
> struct mtd_info *master = mtd_get_master(mtd);
> + int ret;
>
> if (!master->_max_bad_blocks)
> return -ENOTSUPP;
> @@ -487,8 +528,12 @@ static inline int mtd_max_bad_blocks(struct mtd_info *mtd,
> if (mtd->size < (len + ofs) || ofs < 0)
> return -EINVAL;
>
> - return master->_max_bad_blocks(master, mtd_get_master_ofs(mtd, ofs),
> - len);
> + mtd_start_access(mtd);
> + ret = master->_max_bad_blocks(master, mtd_get_master_ofs(mtd, ofs),
> + len);
> + mtd_end_access(mtd);

Please pass the master to those functions, there's no point walking the
parent chain again in the start/end_access() functions if you already
have the master retrieved in the caller. Oh, and there seems to be a
common pattern here, so maybe it's worth adding those macros:

#define mtd_no_suspend_void_call(master, method, ...) \
mtd_start_access(master); \
master->method(master, __VA_ARGS__); \
mtd_end_access(master);

#define mtd_no_suspend_ret_call(ret, master, method, ...) \
mtd_start_access(master); \
ret = master->method(master, __VA_ARGS__); \
mtd_end_access(master);

I don't really like the helper names, so feel free to propose something
else.

> +
> + return ret;
> }
>