2016-04-05 14:56:15

by Matias Bjørling

[permalink] [raw]
Subject: [PATCH 0/5] Fixes to LightNVM

A couple of patches that add additional error handling, new useful variables,
and preparation for the upcoming persistent block management information work
and pblk target.

Matias Bjørling (5):
lightnvm: handle submit_io failure
lightnvm: implement nvm_submit_ppa_list
lightnvm: add fpg_size and pfpg_size to struct nvm_dev
lightnvm: move block fold outside of get_bb_tbl()
lightnvm: avoid memory leak when lun_map kcalloc fails

drivers/lightnvm/core.c | 175 +++++++++++++++++++++++++++++++++----------
drivers/lightnvm/gennvm.c | 14 ++--
drivers/lightnvm/sysblk.c | 46 +++++++-----
drivers/nvme/host/lightnvm.c | 47 ++----------
include/linux/lightnvm.h | 10 ++-
5 files changed, 186 insertions(+), 106 deletions(-)

--
2.1.4


2016-04-05 14:56:21

by Matias Bjørling

[permalink] [raw]
Subject: [PATCH 3/5] lightnvm: add fpg_size and pfpg_size to struct nvm_dev

The flash page size (fpg) and size across planes (pfpg) are convenient
to know when allocating buffer sizes. This has previously been a
calculated in various places. Replace with the pre-calculated values.

Signed-off-by: Matias Bjørling <[email protected]>
---
drivers/lightnvm/core.c | 2 ++
drivers/lightnvm/sysblk.c | 17 +++++++----------
include/linux/lightnvm.h | 2 ++
3 files changed, 11 insertions(+), 10 deletions(-)

diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index f4e04a5..652b8c7 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -476,6 +476,8 @@ static int nvm_core_init(struct nvm_dev *dev)
dev->pgs_per_blk = grp->num_pg;
dev->blks_per_lun = grp->num_blk;
dev->nr_planes = grp->num_pln;
+ dev->fpg_size = grp->fpg_sz;
+ dev->pfpg_size = grp->fpg_sz * grp->num_pln;
dev->sec_size = grp->csecs;
dev->oob_size = grp->sos;
dev->sec_per_pg = grp->fpg_sz / grp->csecs;
diff --git a/drivers/lightnvm/sysblk.c b/drivers/lightnvm/sysblk.c
index 321de1f..8835d89 100644
--- a/drivers/lightnvm/sysblk.c
+++ b/drivers/lightnvm/sysblk.c
@@ -154,13 +154,12 @@ static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa,
struct nvm_system_block *sblk)
{
struct nvm_system_block *cur;
- int pg, cursz, ret, found = 0;
+ int pg, ret, found = 0;

/* the full buffer for a flash page is allocated. Only the first of it
* contains the system block information
*/
- cursz = dev->sec_size * dev->sec_per_pg * dev->nr_planes;
- cur = kmalloc(cursz, GFP_KERNEL);
+ cur = kmalloc(dev->pfpg_size, GFP_KERNEL);
if (!cur)
return -ENOMEM;

@@ -169,7 +168,7 @@ static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa,
ppa->g.pg = ppa_to_slc(dev, pg);

ret = nvm_submit_ppa(dev, ppa, 1, NVM_OP_PREAD, NVM_IO_SLC_MODE,
- cur, cursz);
+ cur, dev->pfpg_size);
if (ret) {
if (ret == NVM_RSP_ERR_EMPTYPAGE) {
pr_debug("nvm: sysblk scan empty ppa (%u %u %u %u)\n",
@@ -272,14 +271,12 @@ static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
{
struct nvm_system_block nvmsb;
void *buf;
- int i, sect, ret, bufsz;
+ int i, sect, ret;
struct ppa_addr *ppas;

nvm_cpu_to_sysblk(&nvmsb, info);

- /* buffer for flash page */
- bufsz = dev->sec_size * dev->sec_per_pg * dev->nr_planes;
- buf = kzalloc(bufsz, GFP_KERNEL);
+ buf = kzalloc(dev->pfpg_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
memcpy(buf, &nvmsb, sizeof(struct nvm_system_block));
@@ -309,7 +306,7 @@ static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
}

ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PWRITE,
- NVM_IO_SLC_MODE, buf, bufsz);
+ NVM_IO_SLC_MODE, buf, dev->pfpg_size);
if (ret) {
pr_err("nvm: sysblk failed program (%u %u %u)\n",
ppas[0].g.ch,
@@ -319,7 +316,7 @@ static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
}

ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PREAD,
- NVM_IO_SLC_MODE, buf, bufsz);
+ NVM_IO_SLC_MODE, buf, dev->pfpg_size);
if (ret) {
pr_err("nvm: sysblk failed read (%u %u %u)\n",
ppas[0].g.ch,
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 38814e2..f7c607f 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -323,6 +323,8 @@ struct nvm_dev {
int sec_per_pg; /* only sectors for a single page */
int pgs_per_blk;
int blks_per_lun;
+ int fpg_size;
+ int pfpg_size; /* size of buffer if all pages are to be read */
int sec_size;
int oob_size;
int mccap;
--
2.1.4

2016-04-05 14:56:19

by Matias Bjørling

[permalink] [raw]
Subject: [PATCH 2/5] lightnvm: implement nvm_submit_ppa_list

The nvm_submit_ppa function assumes that users manage all plane
blocks as a single block. Extend the API with nvm_submit_ppa_list
to allow the user to send its own ppa list. If the user submits more
than a single PPA, the user must take care to allocate and free
the corresponding ppa list.

Signed-off-by: Matias Bjørling <[email protected]>
---
drivers/lightnvm/core.c | 88 +++++++++++++++++++++++++++++++++++++-----------
include/linux/lightnvm.h | 2 ++
2 files changed, 71 insertions(+), 19 deletions(-)

diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index c2ef53a..f4e04a5 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -322,11 +322,10 @@ static void nvm_end_io_sync(struct nvm_rq *rqd)
complete(waiting);
}

-int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
- int opcode, int flags, void *buf, int len)
+int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode,
+ int flags, void *buf, int len)
{
DECLARE_COMPLETION_ONSTACK(wait);
- struct nvm_rq rqd;
struct bio *bio;
int ret;
unsigned long hang_check;
@@ -335,24 +334,17 @@ int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
if (IS_ERR_OR_NULL(bio))
return -ENOMEM;

- memset(&rqd, 0, sizeof(struct nvm_rq));
- ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas);
- if (ret) {
- bio_put(bio);
- return ret;
- }
+ nvm_generic_to_addr_mode(dev, rqd);

- rqd.opcode = opcode;
- rqd.bio = bio;
- rqd.wait = &wait;
- rqd.dev = dev;
- rqd.end_io = nvm_end_io_sync;
- rqd.flags = flags;
- nvm_generic_to_addr_mode(dev, &rqd);
+ rqd->dev = dev;
+ rqd->opcode = opcode;
+ rqd->flags = flags;
+ rqd->bio = bio;
+ rqd->wait = &wait;
+ rqd->end_io = nvm_end_io_sync;

- ret = dev->ops->submit_io(dev, &rqd);
+ ret = dev->ops->submit_io(dev, rqd);
if (ret) {
- nvm_free_rqd_ppalist(dev, &rqd);
bio_put(bio);
return ret;
}
@@ -364,9 +356,67 @@ int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
else
wait_for_completion_io(&wait);

+ return rqd->error;
+}
+
+/**
+ * nvm_submit_ppa_list - submit user-defined ppa list to device. The user must
+ * take to free ppa list if necessary.
+ * @dev: device
+ * @ppa_list: user created ppa_list
+ * @nr_ppas: length of ppa_list
+ * @opcode: device opcode
+ * @flags: device flags
+ * @buf: data buffer
+ * @len: data buffer length
+ */
+int nvm_submit_ppa_list(struct nvm_dev *dev, struct ppa_addr *ppa_list,
+ int nr_ppas, int opcode, int flags, void *buf, int len)
+{
+ struct nvm_rq rqd;
+
+ if (dev->ops->max_phys_sect < nr_ppas)
+ return -EINVAL;
+
+ memset(&rqd, 0, sizeof(struct nvm_rq));
+
+ rqd.nr_pages = nr_ppas;
+ if (nr_ppas > 1)
+ rqd.ppa_list = ppa_list;
+ else
+ rqd.ppa_addr = ppa_list[0];
+
+ return __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
+}
+EXPORT_SYMBOL(nvm_submit_ppa_list);
+
+/**
+ * nvm_submit_ppa - submit PPAs to device. PPAs will automatically be unfolded
+ * as single, dual, quad plane PPAs depending on device type.
+ * @dev: device
+ * @ppa: user created ppa_list
+ * @nr_ppas: length of ppa_list
+ * @opcode: device opcode
+ * @flags: device flags
+ * @buf: data buffer
+ * @len: data buffer length
+ */
+int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
+ int opcode, int flags, void *buf, int len)
+{
+ struct nvm_rq rqd;
+ int ret;
+
+ memset(&rqd, 0, sizeof(struct nvm_rq));
+ ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas);
+ if (ret)
+ return ret;
+
+ ret = __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
+
nvm_free_rqd_ppalist(dev, &rqd);

- return rqd.error;
+ return ret;
}
EXPORT_SYMBOL(nvm_submit_ppa);

diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index cdcb2cc..38814e2 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -534,6 +534,8 @@ extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *);
extern void nvm_end_io(struct nvm_rq *, int);
extern int nvm_submit_ppa(struct nvm_dev *, struct ppa_addr *, int, int, int,
void *, int);
+extern int nvm_submit_ppa_list(struct nvm_dev *, struct ppa_addr *, int, int,
+ int, void *, int);

/* sysblk.c */
#define NVM_SYSBLK_MAGIC 0x4E564D53 /* "NVMS" */
--
2.1.4

2016-04-05 14:56:38

by Matias Bjørling

[permalink] [raw]
Subject: [PATCH 5/5] lightnvm: avoid memory leak when lun_map kcalloc fails

A memory leak occurs if the lower page table is initialized and the
following dev->lun_map fails on allocation.

Rearrange the initialization of lower page table to allow dev->lun_map
to fail gracefully without memory leak.

Reported-by: Dan Carpenter <[email protected]>
Signed-off-by: Matias Bjørling <[email protected]>
---
drivers/lightnvm/core.c | 51 ++++++++++++++++++++++++++++---------------------
1 file changed, 29 insertions(+), 22 deletions(-)

diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 4cadbe0..fd5a9f3 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -504,6 +504,7 @@ static int nvm_core_init(struct nvm_dev *dev)
{
struct nvm_id *id = &dev->identity;
struct nvm_id_group *grp = &id->groups[0];
+ int ret;

/* device values */
dev->nr_chnls = grp->num_ch;
@@ -522,33 +523,16 @@ static int nvm_core_init(struct nvm_dev *dev)
dev->plane_mode = NVM_PLANE_SINGLE;
dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size;

- if (grp->mtype != 0) {
- pr_err("nvm: memory type not supported\n");
- return -EINVAL;
- }
-
- switch (grp->fmtype) {
- case NVM_ID_FMTYPE_SLC:
- if (nvm_init_slc_tbl(dev, grp))
- return -ENOMEM;
- break;
- case NVM_ID_FMTYPE_MLC:
- if (nvm_init_mlc_tbl(dev, grp))
- return -ENOMEM;
- break;
- default:
- pr_err("nvm: flash type not supported\n");
- return -EINVAL;
- }
-
- if (!dev->lps_per_blk)
- pr_info("nvm: lower page programming table missing\n");
-
if (grp->mpos & 0x020202)
dev->plane_mode = NVM_PLANE_DOUBLE;
if (grp->mpos & 0x040404)
dev->plane_mode = NVM_PLANE_QUAD;

+ if (grp->mtype != 0) {
+ pr_err("nvm: memory type not supported\n");
+ return -EINVAL;
+ }
+
/* calculated values */
dev->sec_per_pl = dev->sec_per_pg * dev->nr_planes;
dev->sec_per_blk = dev->sec_per_pl * dev->pgs_per_blk;
@@ -560,11 +544,34 @@ static int nvm_core_init(struct nvm_dev *dev)
sizeof(unsigned long), GFP_KERNEL);
if (!dev->lun_map)
return -ENOMEM;
+
+ switch (grp->fmtype) {
+ case NVM_ID_FMTYPE_SLC:
+ if (nvm_init_slc_tbl(dev, grp)) {
+ ret = -ENOMEM;
+ goto err_fmtype;
+ }
+ break;
+ case NVM_ID_FMTYPE_MLC:
+ if (nvm_init_mlc_tbl(dev, grp)) {
+ ret = -ENOMEM;
+ goto err_fmtype;
+ }
+ break;
+ default:
+ pr_err("nvm: flash type not supported\n");
+ ret = -EINVAL;
+ goto err_fmtype;
+ }
+
INIT_LIST_HEAD(&dev->online_targets);
mutex_init(&dev->mlock);
spin_lock_init(&dev->lock);

return 0;
+err_fmtype:
+ kfree(dev->lun_map);
+ return ret;
}

static void nvm_free(struct nvm_dev *dev)
--
2.1.4

2016-04-05 14:56:58

by Matias Bjørling

[permalink] [raw]
Subject: [PATCH 4/5] lightnvm: move block fold outside of get_bb_tbl()

The get block table command returns a list of blocks and planes
with their associated state. Users, such as gennvm and sysblk,
manages all planes as a single virtual block.

It was therefore natural to fold the bad block list before it is
returned. However, to allow users, which manages on a per-plane
block level, to also use the interface, the get_bb_tbl interface is
changed to not fold by default and instead let the caller fold if
necessary.

Signed-off-by: Matias Bjørling <[email protected]>
---
drivers/lightnvm/core.c | 35 +++++++++++++++++++++++++++++++++
drivers/lightnvm/gennvm.c | 14 +++++++------
drivers/lightnvm/sysblk.c | 29 ++++++++++++++++++---------
drivers/nvme/host/lightnvm.c | 47 ++++++--------------------------------------
include/linux/lightnvm.h | 6 ++++--
5 files changed, 73 insertions(+), 58 deletions(-)

diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 652b8c7..4cadbe0 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -420,6 +420,41 @@ int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
}
EXPORT_SYMBOL(nvm_submit_ppa);

+/*
+ * folds a bad block list from its plane representation to its virtual
+ * block representation. The fold is done in place and reduced size is
+ * returned.
+ *
+ * If any of the planes status are bad or grown bad block, the virtual block
+ * is marked bad. If not bad, the first plane state acts as the block state.
+ */
+int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
+{
+ int blk, offset, pl, blktype;
+
+ if (nr_blks != dev->blks_per_lun * dev->plane_mode)
+ return -EINVAL;
+
+ for (blk = 0; blk < dev->blks_per_lun; blk++) {
+ offset = blk * dev->plane_mode;
+ blktype = blks[offset];
+
+ /* Bad blocks on any planes take precedence over other types */
+ for (pl = 0; pl < dev->plane_mode; pl++) {
+ if (blks[offset + pl] &
+ (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
+ blktype = blks[offset + pl];
+ break;
+ }
+ }
+
+ blks[blk] = blktype;
+ }
+
+ return dev->blks_per_lun;
+}
+EXPORT_SYMBOL(nvm_bb_tbl_fold);
+
static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
{
int i;
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index 72e124a..6096077 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -129,18 +129,21 @@ static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn)
return 0;
}

-static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks,
- void *private)
+static int gennvm_block_bb(struct nvm_dev *dev, struct ppa_addr ppa,
+ u8 *blks, int nr_blks, void *private)
{
struct gen_nvm *gn = private;
- struct nvm_dev *dev = gn->dev;
struct gen_lun *lun;
struct nvm_block *blk;
int i;

+ nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
+ if (nr_blks < 0)
+ return nr_blks;
+
lun = &gn->luns[(dev->luns_per_chnl * ppa.g.ch) + ppa.g.lun];

- for (i = 0; i < nr_blocks; i++) {
+ for (i = 0; i < nr_blks; i++) {
if (blks[i] == 0)
continue;

@@ -250,8 +253,7 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
ppa = generic_to_dev_addr(dev, ppa);

ret = dev->ops->get_bb_tbl(dev, ppa,
- dev->blks_per_lun,
- gennvm_block_bb, gn);
+ gennvm_block_bb, gn);
if (ret)
pr_err("gennvm: could not read BB table\n");
}
diff --git a/drivers/lightnvm/sysblk.c b/drivers/lightnvm/sysblk.c
index 8835d89..fcd16d8 100644
--- a/drivers/lightnvm/sysblk.c
+++ b/drivers/lightnvm/sysblk.c
@@ -93,12 +93,16 @@ void nvm_setup_sysblk_scan(struct nvm_dev *dev, struct sysblk_scan *s,
s->nr_rows = nvm_setup_sysblks(dev, sysblk_ppas);
}

-static int sysblk_get_host_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
- void *private)
+static int sysblk_get_host_blks(struct nvm_dev *dev, struct ppa_addr ppa,
+ u8 *blks, int nr_blks, void *private)
{
struct sysblk_scan *s = private;
int i, nr_sysblk = 0;

+ nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
+ if (nr_blks < 0)
+ return nr_blks;
+
for (i = 0; i < nr_blks; i++) {
if (blks[i] != NVM_BLK_T_HOST)
continue;
@@ -130,7 +134,7 @@ static int nvm_get_all_sysblks(struct nvm_dev *dev, struct sysblk_scan *s,
dppa = generic_to_dev_addr(dev, ppas[i]);
s->row = i;

- ret = dev->ops->get_bb_tbl(dev, dppa, dev->blks_per_lun, fn, s);
+ ret = dev->ops->get_bb_tbl(dev, dppa, fn, s);
if (ret) {
pr_err("nvm: failed bb tbl for ppa (%u %u)\n",
ppas[i].g.ch,
@@ -235,13 +239,17 @@ static int nvm_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s, int type)
return 0;
}

-static int sysblk_get_free_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
- void *private)
+static int sysblk_get_free_blks(struct nvm_dev *dev, struct ppa_addr ppa,
+ u8 *blks, int nr_blks, void *private)
{
struct sysblk_scan *s = private;
struct ppa_addr *sppa;
int i, blkid = 0;

+ nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
+ if (nr_blks < 0)
+ return nr_blks;
+
for (i = 0; i < nr_blks; i++) {
if (blks[i] == NVM_BLK_T_HOST)
return -EEXIST;
@@ -578,13 +586,16 @@ static unsigned int factory_blk_offset(struct nvm_dev *dev, int ch, int lun)
BITS_PER_LONG;
}

-static int nvm_factory_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
- void *private)
+static int nvm_factory_blks(struct nvm_dev *dev, struct ppa_addr ppa,
+ u8 *blks, int nr_blks, void *private)
{
struct factory_blks *f = private;
- struct nvm_dev *dev = f->dev;
int i, lunoff;

+ nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
+ if (nr_blks < 0)
+ return nr_blks;
+
lunoff = factory_blk_offset(dev, ppa.g.ch, ppa.g.lun);

/* non-set bits correspond to the block must be erased */
@@ -661,7 +672,7 @@ static int nvm_fact_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa,

dev_ppa = generic_to_dev_addr(dev, ppa);

- ret = dev->ops->get_bb_tbl(dev, dev_ppa, dev->blks_per_lun, fn, priv);
+ ret = dev->ops->get_bb_tbl(dev, dev_ppa, fn, priv);
if (ret)
pr_err("nvm: failed bb tbl for ch%u lun%u\n",
ppa.g.ch, ppa.g.blk);
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 9461dd6..d289980 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -387,41 +387,16 @@ out:
return ret;
}

-static void nvme_nvm_bb_tbl_fold(struct nvm_dev *nvmdev,
- int nr_dst_blks, u8 *dst_blks,
- int nr_src_blks, u8 *src_blks)
-{
- int blk, offset, pl, blktype;
-
- for (blk = 0; blk < nr_dst_blks; blk++) {
- offset = blk * nvmdev->plane_mode;
- blktype = src_blks[offset];
-
- /* Bad blocks on any planes take precedence over other types */
- for (pl = 0; pl < nvmdev->plane_mode; pl++) {
- if (src_blks[offset + pl] &
- (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
- blktype = src_blks[offset + pl];
- break;
- }
- }
-
- dst_blks[blk] = blktype;
- }
-}
-
static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
- int nr_dst_blks, nvm_bb_update_fn *update_bbtbl,
- void *priv)
+ nvm_bb_update_fn *update_bbtbl, void *priv)
{
struct request_queue *q = nvmdev->q;
struct nvme_ns *ns = q->queuedata;
struct nvme_ctrl *ctrl = ns->ctrl;
struct nvme_nvm_command c = {};
struct nvme_nvm_bb_tbl *bb_tbl;
- u8 *dst_blks = NULL;
- int nr_src_blks = nr_dst_blks * nvmdev->plane_mode;
- int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_src_blks;
+ int nr_blks = nvmdev->blks_per_lun * nvmdev->plane_mode;
+ int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks;
int ret = 0;

c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
@@ -432,12 +407,6 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
if (!bb_tbl)
return -ENOMEM;

- dst_blks = kzalloc(nr_dst_blks, GFP_KERNEL);
- if (!dst_blks) {
- ret = -ENOMEM;
- goto out;
- }
-
ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c,
bb_tbl, tblsz);
if (ret) {
@@ -459,21 +428,17 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
goto out;
}

- if (le32_to_cpu(bb_tbl->tblks) != nr_src_blks) {
+ if (le32_to_cpu(bb_tbl->tblks) != nr_blks) {
ret = -EINVAL;
dev_err(ctrl->dev, "bbt unsuspected blocks returned (%u!=%u)",
- le32_to_cpu(bb_tbl->tblks), nr_src_blks);
+ le32_to_cpu(bb_tbl->tblks), nr_blks);
goto out;
}

- nvme_nvm_bb_tbl_fold(nvmdev, nr_dst_blks, dst_blks,
- nr_src_blks, bb_tbl->blk);
-
ppa = dev_to_generic_addr(nvmdev, ppa);
- ret = update_bbtbl(ppa, nr_dst_blks, dst_blks, priv);
+ ret = update_bbtbl(nvmdev, ppa, bb_tbl->blk, nr_blks, priv);

out:
- kfree(dst_blks);
kfree(bb_tbl);
return ret;
}
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index f7c607f..dacaa28 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -41,11 +41,12 @@ struct nvm_id;
struct nvm_dev;

typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
-typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *);
+typedef int (nvm_bb_update_fn)(struct nvm_dev *, struct ppa_addr, u8 *, int,
+ void *);
typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
nvm_l2p_update_fn *, void *);
-typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int,
+typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr,
nvm_bb_update_fn *, void *);
typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *, int);
typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
@@ -538,6 +539,7 @@ extern int nvm_submit_ppa(struct nvm_dev *, struct ppa_addr *, int, int, int,
void *, int);
extern int nvm_submit_ppa_list(struct nvm_dev *, struct ppa_addr *, int, int,
int, void *, int);
+extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int);

/* sysblk.c */
#define NVM_SYSBLK_MAGIC 0x4E564D53 /* "NVMS" */
--
2.1.4

2016-04-05 14:57:54

by Matias Bjørling

[permalink] [raw]
Subject: [PATCH 1/5] lightnvm: handle submit_io failure

The device ->submit_io() callback might fail to submit I/O to device.
In that case, the nvm_submit_ppa function should not wait for
completion. Instead return the ->submit_io() error.

Signed-off-by: Matias Bjørling <[email protected]>
---
drivers/lightnvm/core.c | 5 +++++
1 file changed, 5 insertions(+)

diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 0dc9a80..c2ef53a 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -351,6 +351,11 @@ int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
nvm_generic_to_addr_mode(dev, &rqd);

ret = dev->ops->submit_io(dev, &rqd);
+ if (ret) {
+ nvm_free_rqd_ppalist(dev, &rqd);
+ bio_put(bio);
+ return ret;
+ }

/* Prevent hang_check timer from firing at us during very long I/O */
hang_check = sysctl_hung_task_timeout_secs;
--
2.1.4

2016-04-05 15:04:10

by Johannes Thumshirn

[permalink] [raw]
Subject: Re: [PATCH 1/5] lightnvm: handle submit_io failure

On 2016-04-05 16:55, Matias Bjørling wrote:
> The device ->submit_io() callback might fail to submit I/O to device.
> In that case, the nvm_submit_ppa function should not wait for
> completion. Instead return the ->submit_io() error.
>
> Signed-off-by: Matias Bjørling <[email protected]>
> ---
> drivers/lightnvm/core.c | 5 +++++
> 1 file changed, 5 insertions(+)
>
> diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
> index 0dc9a80..c2ef53a 100644
> --- a/drivers/lightnvm/core.c
> +++ b/drivers/lightnvm/core.c
> @@ -351,6 +351,11 @@ int nvm_submit_ppa(struct nvm_dev *dev, struct
> ppa_addr *ppa, int nr_ppas,
> nvm_generic_to_addr_mode(dev, &rqd);
>
> ret = dev->ops->submit_io(dev, &rqd);
> + if (ret) {
> + nvm_free_rqd_ppalist(dev, &rqd);
> + bio_put(bio);
> + return ret;
> + }
>
> /* Prevent hang_check timer from firing at us during very long I/O */
> hang_check = sysctl_hung_task_timeout_secs;

Looks good,
Reviewed-by: Johannes Thumshirn <[email protected]>

2016-04-05 15:07:18

by Johannes Thumshirn

[permalink] [raw]
Subject: Re: [PATCH 2/5] lightnvm: implement nvm_submit_ppa_list

On 2016-04-05 16:55, Matias Bjørling wrote:
> The nvm_submit_ppa function assumes that users manage all plane
> blocks as a single block. Extend the API with nvm_submit_ppa_list
> to allow the user to send its own ppa list. If the user submits more
> than a single PPA, the user must take care to allocate and free
> the corresponding ppa list.
>
> Signed-off-by: Matias Bjørling <[email protected]>

Looks good,
Reviewed-by: Johannes Thumshirn <[email protected]>

2016-04-05 15:09:36

by Johannes Thumshirn

[permalink] [raw]
Subject: Re: [PATCH 3/5] lightnvm: add fpg_size and pfpg_size to struct nvm_dev

On 2016-04-05 16:55, Matias Bjørling wrote:
> The flash page size (fpg) and size across planes (pfpg) are convenient
> to know when allocating buffer sizes. This has previously been a
> calculated in various places. Replace with the pre-calculated values.
>
> Signed-off-by: Matias Bjørling <[email protected]>
> ---
> drivers/lightnvm/core.c | 2 ++
> drivers/lightnvm/sysblk.c | 17 +++++++----------
> include/linux/lightnvm.h | 2 ++
> 3 files changed, 11 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
> index f4e04a5..652b8c7 100644
> --- a/drivers/lightnvm/core.c
> +++ b/drivers/lightnvm/core.c
> @@ -476,6 +476,8 @@ static int nvm_core_init(struct nvm_dev *dev)
> dev->pgs_per_blk = grp->num_pg;
> dev->blks_per_lun = grp->num_blk;
> dev->nr_planes = grp->num_pln;
> + dev->fpg_size = grp->fpg_sz;
> + dev->pfpg_size = grp->fpg_sz * grp->num_pln;
> dev->sec_size = grp->csecs;
> dev->oob_size = grp->sos;
> dev->sec_per_pg = grp->fpg_sz / grp->csecs;
> diff --git a/drivers/lightnvm/sysblk.c b/drivers/lightnvm/sysblk.c
> index 321de1f..8835d89 100644
> --- a/drivers/lightnvm/sysblk.c
> +++ b/drivers/lightnvm/sysblk.c
> @@ -154,13 +154,12 @@ static int nvm_scan_block(struct nvm_dev *dev,
> struct ppa_addr *ppa,
> struct nvm_system_block *sblk)
> {
> struct nvm_system_block *cur;
> - int pg, cursz, ret, found = 0;
> + int pg, ret, found = 0;
>
> /* the full buffer for a flash page is allocated. Only the first of
> it
> * contains the system block information
> */
> - cursz = dev->sec_size * dev->sec_per_pg * dev->nr_planes;
> - cur = kmalloc(cursz, GFP_KERNEL);
> + cur = kmalloc(dev->pfpg_size, GFP_KERNEL);
> if (!cur)
> return -ENOMEM;
>
> @@ -169,7 +168,7 @@ static int nvm_scan_block(struct nvm_dev *dev,
> struct ppa_addr *ppa,
> ppa->g.pg = ppa_to_slc(dev, pg);
>
> ret = nvm_submit_ppa(dev, ppa, 1, NVM_OP_PREAD, NVM_IO_SLC_MODE,
> - cur, cursz);
> + cur, dev->pfpg_size);
> if (ret) {
> if (ret == NVM_RSP_ERR_EMPTYPAGE) {
> pr_debug("nvm: sysblk scan empty ppa (%u %u %u %u)\n",
> @@ -272,14 +271,12 @@ static int nvm_write_and_verify(struct nvm_dev
> *dev, struct nvm_sb_info *info,
> {
> struct nvm_system_block nvmsb;
> void *buf;
> - int i, sect, ret, bufsz;
> + int i, sect, ret;
> struct ppa_addr *ppas;
>
> nvm_cpu_to_sysblk(&nvmsb, info);
>
> - /* buffer for flash page */
> - bufsz = dev->sec_size * dev->sec_per_pg * dev->nr_planes;
> - buf = kzalloc(bufsz, GFP_KERNEL);
> + buf = kzalloc(dev->pfpg_size, GFP_KERNEL);
> if (!buf)
> return -ENOMEM;
> memcpy(buf, &nvmsb, sizeof(struct nvm_system_block));
> @@ -309,7 +306,7 @@ static int nvm_write_and_verify(struct nvm_dev
> *dev, struct nvm_sb_info *info,
> }
>
> ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PWRITE,
> - NVM_IO_SLC_MODE, buf, bufsz);
> + NVM_IO_SLC_MODE, buf, dev->pfpg_size);
> if (ret) {
> pr_err("nvm: sysblk failed program (%u %u %u)\n",
> ppas[0].g.ch,
> @@ -319,7 +316,7 @@ static int nvm_write_and_verify(struct nvm_dev
> *dev, struct nvm_sb_info *info,
> }
>
> ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PREAD,
> - NVM_IO_SLC_MODE, buf, bufsz);
> + NVM_IO_SLC_MODE, buf, dev->pfpg_size);
> if (ret) {
> pr_err("nvm: sysblk failed read (%u %u %u)\n",
> ppas[0].g.ch,
> diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
> index 38814e2..f7c607f 100644
> --- a/include/linux/lightnvm.h
> +++ b/include/linux/lightnvm.h
> @@ -323,6 +323,8 @@ struct nvm_dev {
> int sec_per_pg; /* only sectors for a single page */
> int pgs_per_blk;
> int blks_per_lun;
> + int fpg_size;
> + int pfpg_size; /* size of buffer if all pages are to be read */
> int sec_size;
> int oob_size;
> int mccap;

Reviewed-by: Johannes Thumshirn <[email protected]>

2016-04-05 15:12:49

by Johannes Thumshirn

[permalink] [raw]
Subject: Re: [PATCH 4/5] lightnvm: move block fold outside of get_bb_tbl()

On 2016-04-05 16:55, Matias Bjørling wrote:
> The get block table command returns a list of blocks and planes
> with their associated state. Users, such as gennvm and sysblk,
> manages all planes as a single virtual block.
>
> It was therefore natural to fold the bad block list before it is
> returned. However, to allow users, which manages on a per-plane
> block level, to also use the interface, the get_bb_tbl interface is
> changed to not fold by default and instead let the caller fold if
> necessary.
>
> Signed-off-by: Matias Bjørling <[email protected]>
> ---
> drivers/lightnvm/core.c | 35 +++++++++++++++++++++++++++++++++
> drivers/lightnvm/gennvm.c | 14 +++++++------
> drivers/lightnvm/sysblk.c | 29 ++++++++++++++++++---------
> drivers/nvme/host/lightnvm.c | 47
> ++++++--------------------------------------
> include/linux/lightnvm.h | 6 ++++--
> 5 files changed, 73 insertions(+), 58 deletions(-)
>
> diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
> index 652b8c7..4cadbe0 100644
> --- a/drivers/lightnvm/core.c
> +++ b/drivers/lightnvm/core.c
> @@ -420,6 +420,41 @@ int nvm_submit_ppa(struct nvm_dev *dev, struct
> ppa_addr *ppa, int nr_ppas,
> }
> EXPORT_SYMBOL(nvm_submit_ppa);
>
> +/*
> + * folds a bad block list from its plane representation to its virtual
> + * block representation. The fold is done in place and reduced size is
> + * returned.
> + *
> + * If any of the planes status are bad or grown bad block, the virtual
> block
> + * is marked bad. If not bad, the first plane state acts as the block
> state.
> + */
> +int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
> +{
> + int blk, offset, pl, blktype;
> +
> + if (nr_blks != dev->blks_per_lun * dev->plane_mode)
> + return -EINVAL;
> +
> + for (blk = 0; blk < dev->blks_per_lun; blk++) {
> + offset = blk * dev->plane_mode;
> + blktype = blks[offset];
> +
> + /* Bad blocks on any planes take precedence over other types */
> + for (pl = 0; pl < dev->plane_mode; pl++) {
> + if (blks[offset + pl] &
> + (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
> + blktype = blks[offset + pl];
> + break;
> + }
> + }
> +
> + blks[blk] = blktype;
> + }
> +
> + return dev->blks_per_lun;
> +}
> +EXPORT_SYMBOL(nvm_bb_tbl_fold);
> +
> static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group
> *grp)
> {
> int i;
> diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
> index 72e124a..6096077 100644
> --- a/drivers/lightnvm/gennvm.c
> +++ b/drivers/lightnvm/gennvm.c
> @@ -129,18 +129,21 @@ static int gennvm_luns_init(struct nvm_dev *dev,
> struct gen_nvm *gn)
> return 0;
> }
>
> -static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8
> *blks,
> - void *private)
> +static int gennvm_block_bb(struct nvm_dev *dev, struct ppa_addr ppa,
> + u8 *blks, int nr_blks, void *private)
> {
> struct gen_nvm *gn = private;
> - struct nvm_dev *dev = gn->dev;
> struct gen_lun *lun;
> struct nvm_block *blk;
> int i;
>
> + nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
> + if (nr_blks < 0)
> + return nr_blks;
> +
> lun = &gn->luns[(dev->luns_per_chnl * ppa.g.ch) + ppa.g.lun];
>
> - for (i = 0; i < nr_blocks; i++) {
> + for (i = 0; i < nr_blks; i++) {
> if (blks[i] == 0)
> continue;
>
> @@ -250,8 +253,7 @@ static int gennvm_blocks_init(struct nvm_dev *dev,
> struct gen_nvm *gn)
> ppa = generic_to_dev_addr(dev, ppa);
>
> ret = dev->ops->get_bb_tbl(dev, ppa,
> - dev->blks_per_lun,
> - gennvm_block_bb, gn);
> + gennvm_block_bb, gn);
> if (ret)
> pr_err("gennvm: could not read BB table\n");
> }
> diff --git a/drivers/lightnvm/sysblk.c b/drivers/lightnvm/sysblk.c
> index 8835d89..fcd16d8 100644
> --- a/drivers/lightnvm/sysblk.c
> +++ b/drivers/lightnvm/sysblk.c
> @@ -93,12 +93,16 @@ void nvm_setup_sysblk_scan(struct nvm_dev *dev,
> struct sysblk_scan *s,
> s->nr_rows = nvm_setup_sysblks(dev, sysblk_ppas);
> }
>
> -static int sysblk_get_host_blks(struct ppa_addr ppa, int nr_blks, u8
> *blks,
> - void *private)
> +static int sysblk_get_host_blks(struct nvm_dev *dev, struct ppa_addr
> ppa,
> + u8 *blks, int nr_blks, void *private)
> {
> struct sysblk_scan *s = private;
> int i, nr_sysblk = 0;
>
> + nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
> + if (nr_blks < 0)
> + return nr_blks;
> +
> for (i = 0; i < nr_blks; i++) {
> if (blks[i] != NVM_BLK_T_HOST)
> continue;
> @@ -130,7 +134,7 @@ static int nvm_get_all_sysblks(struct nvm_dev
> *dev, struct sysblk_scan *s,
> dppa = generic_to_dev_addr(dev, ppas[i]);
> s->row = i;
>
> - ret = dev->ops->get_bb_tbl(dev, dppa, dev->blks_per_lun, fn, s);
> + ret = dev->ops->get_bb_tbl(dev, dppa, fn, s);
> if (ret) {
> pr_err("nvm: failed bb tbl for ppa (%u %u)\n",
> ppas[i].g.ch,
> @@ -235,13 +239,17 @@ static int nvm_set_bb_tbl(struct nvm_dev *dev,
> struct sysblk_scan *s, int type)
> return 0;
> }
>
> -static int sysblk_get_free_blks(struct ppa_addr ppa, int nr_blks, u8
> *blks,
> - void *private)
> +static int sysblk_get_free_blks(struct nvm_dev *dev, struct ppa_addr
> ppa,
> + u8 *blks, int nr_blks, void *private)
> {
> struct sysblk_scan *s = private;
> struct ppa_addr *sppa;
> int i, blkid = 0;
>
> + nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
> + if (nr_blks < 0)
> + return nr_blks;
> +
> for (i = 0; i < nr_blks; i++) {
> if (blks[i] == NVM_BLK_T_HOST)
> return -EEXIST;
> @@ -578,13 +586,16 @@ static unsigned int factory_blk_offset(struct
> nvm_dev *dev, int ch, int lun)
> BITS_PER_LONG;
> }
>
> -static int nvm_factory_blks(struct ppa_addr ppa, int nr_blks, u8
> *blks,
> - void *private)
> +static int nvm_factory_blks(struct nvm_dev *dev, struct ppa_addr ppa,
> + u8 *blks, int nr_blks, void *private)
> {
> struct factory_blks *f = private;
> - struct nvm_dev *dev = f->dev;
> int i, lunoff;
>
> + nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
> + if (nr_blks < 0)
> + return nr_blks;
> +
> lunoff = factory_blk_offset(dev, ppa.g.ch, ppa.g.lun);
>
> /* non-set bits correspond to the block must be erased */
> @@ -661,7 +672,7 @@ static int nvm_fact_get_bb_tbl(struct nvm_dev
> *dev, struct ppa_addr ppa,
>
> dev_ppa = generic_to_dev_addr(dev, ppa);
>
> - ret = dev->ops->get_bb_tbl(dev, dev_ppa, dev->blks_per_lun, fn,
> priv);
> + ret = dev->ops->get_bb_tbl(dev, dev_ppa, fn, priv);
> if (ret)
> pr_err("nvm: failed bb tbl for ch%u lun%u\n",
> ppa.g.ch, ppa.g.blk);
> diff --git a/drivers/nvme/host/lightnvm.c
> b/drivers/nvme/host/lightnvm.c
> index 9461dd6..d289980 100644
> --- a/drivers/nvme/host/lightnvm.c
> +++ b/drivers/nvme/host/lightnvm.c
> @@ -387,41 +387,16 @@ out:
> return ret;
> }
>
> -static void nvme_nvm_bb_tbl_fold(struct nvm_dev *nvmdev,
> - int nr_dst_blks, u8 *dst_blks,
> - int nr_src_blks, u8 *src_blks)
> -{
> - int blk, offset, pl, blktype;
> -
> - for (blk = 0; blk < nr_dst_blks; blk++) {
> - offset = blk * nvmdev->plane_mode;
> - blktype = src_blks[offset];
> -
> - /* Bad blocks on any planes take precedence over other types */
> - for (pl = 0; pl < nvmdev->plane_mode; pl++) {
> - if (src_blks[offset + pl] &
> - (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
> - blktype = src_blks[offset + pl];
> - break;
> - }
> - }
> -
> - dst_blks[blk] = blktype;
> - }
> -}
> -
> static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr
> ppa,
> - int nr_dst_blks, nvm_bb_update_fn *update_bbtbl,
> - void *priv)
> + nvm_bb_update_fn *update_bbtbl, void *priv)
> {
> struct request_queue *q = nvmdev->q;
> struct nvme_ns *ns = q->queuedata;
> struct nvme_ctrl *ctrl = ns->ctrl;
> struct nvme_nvm_command c = {};
> struct nvme_nvm_bb_tbl *bb_tbl;
> - u8 *dst_blks = NULL;
> - int nr_src_blks = nr_dst_blks * nvmdev->plane_mode;
> - int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_src_blks;
> + int nr_blks = nvmdev->blks_per_lun * nvmdev->plane_mode;
> + int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks;
> int ret = 0;
>
> c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
> @@ -432,12 +407,6 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev
> *nvmdev, struct ppa_addr ppa,
> if (!bb_tbl)
> return -ENOMEM;
>
> - dst_blks = kzalloc(nr_dst_blks, GFP_KERNEL);
> - if (!dst_blks) {
> - ret = -ENOMEM;
> - goto out;
> - }
> -
> ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c,
> bb_tbl, tblsz);
> if (ret) {
> @@ -459,21 +428,17 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev
> *nvmdev, struct ppa_addr ppa,
> goto out;
> }
>
> - if (le32_to_cpu(bb_tbl->tblks) != nr_src_blks) {
> + if (le32_to_cpu(bb_tbl->tblks) != nr_blks) {
> ret = -EINVAL;
> dev_err(ctrl->dev, "bbt unsuspected blocks returned (%u!=%u)",
> - le32_to_cpu(bb_tbl->tblks), nr_src_blks);
> + le32_to_cpu(bb_tbl->tblks), nr_blks);
> goto out;
> }
>
> - nvme_nvm_bb_tbl_fold(nvmdev, nr_dst_blks, dst_blks,
> - nr_src_blks, bb_tbl->blk);
> -
> ppa = dev_to_generic_addr(nvmdev, ppa);
> - ret = update_bbtbl(ppa, nr_dst_blks, dst_blks, priv);
> + ret = update_bbtbl(nvmdev, ppa, bb_tbl->blk, nr_blks, priv);
>
> out:
> - kfree(dst_blks);
> kfree(bb_tbl);
> return ret;
> }
> diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
> index f7c607f..dacaa28 100644
> --- a/include/linux/lightnvm.h
> +++ b/include/linux/lightnvm.h
> @@ -41,11 +41,12 @@ struct nvm_id;
> struct nvm_dev;
>
> typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
> -typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *);
> +typedef int (nvm_bb_update_fn)(struct nvm_dev *, struct ppa_addr, u8
> *, int,
> + void *);
> typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
> typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
> nvm_l2p_update_fn *, void *);
> -typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int,
> +typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr,
> nvm_bb_update_fn *, void *);
> typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *,
> int);
> typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
> @@ -538,6 +539,7 @@ extern int nvm_submit_ppa(struct nvm_dev *, struct
> ppa_addr *, int, int, int,
> void *, int);
> extern int nvm_submit_ppa_list(struct nvm_dev *, struct ppa_addr *,
> int, int,
> int, void *, int);
> +extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int);
>
> /* sysblk.c */
> #define NVM_SYSBLK_MAGIC 0x4E564D53 /* "NVMS" */

Reviewed-by: Johannes Thumshirn <[email protected]>

2016-04-05 15:14:08

by Johannes Thumshirn

[permalink] [raw]
Subject: Re: [PATCH 5/5] lightnvm: avoid memory leak when lun_map kcalloc fails

On 2016-04-05 16:55, Matias Bjørling wrote:
> A memory leak occurs if the lower page table is initialized and the
> following dev->lun_map fails on allocation.
>
> Rearrange the initialization of lower page table to allow dev->lun_map
> to fail gracefully without memory leak.
>
> Reported-by: Dan Carpenter <[email protected]>
> Signed-off-by: Matias Bjørling <[email protected]>
> ---
> drivers/lightnvm/core.c | 51
> ++++++++++++++++++++++++++++---------------------
> 1 file changed, 29 insertions(+), 22 deletions(-)
>
> diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
> index 4cadbe0..fd5a9f3 100644
> --- a/drivers/lightnvm/core.c
> +++ b/drivers/lightnvm/core.c
> @@ -504,6 +504,7 @@ static int nvm_core_init(struct nvm_dev *dev)
> {
> struct nvm_id *id = &dev->identity;
> struct nvm_id_group *grp = &id->groups[0];
> + int ret;
>
> /* device values */
> dev->nr_chnls = grp->num_ch;
> @@ -522,33 +523,16 @@ static int nvm_core_init(struct nvm_dev *dev)
> dev->plane_mode = NVM_PLANE_SINGLE;
> dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size;
>
> - if (grp->mtype != 0) {
> - pr_err("nvm: memory type not supported\n");
> - return -EINVAL;
> - }
> -
> - switch (grp->fmtype) {
> - case NVM_ID_FMTYPE_SLC:
> - if (nvm_init_slc_tbl(dev, grp))
> - return -ENOMEM;
> - break;
> - case NVM_ID_FMTYPE_MLC:
> - if (nvm_init_mlc_tbl(dev, grp))
> - return -ENOMEM;
> - break;
> - default:
> - pr_err("nvm: flash type not supported\n");
> - return -EINVAL;
> - }
> -
> - if (!dev->lps_per_blk)
> - pr_info("nvm: lower page programming table missing\n");
> -
> if (grp->mpos & 0x020202)
> dev->plane_mode = NVM_PLANE_DOUBLE;
> if (grp->mpos & 0x040404)
> dev->plane_mode = NVM_PLANE_QUAD;
>
> + if (grp->mtype != 0) {
> + pr_err("nvm: memory type not supported\n");
> + return -EINVAL;
> + }
> +
> /* calculated values */
> dev->sec_per_pl = dev->sec_per_pg * dev->nr_planes;
> dev->sec_per_blk = dev->sec_per_pl * dev->pgs_per_blk;
> @@ -560,11 +544,34 @@ static int nvm_core_init(struct nvm_dev *dev)
> sizeof(unsigned long), GFP_KERNEL);
> if (!dev->lun_map)
> return -ENOMEM;
> +
> + switch (grp->fmtype) {
> + case NVM_ID_FMTYPE_SLC:
> + if (nvm_init_slc_tbl(dev, grp)) {
> + ret = -ENOMEM;
> + goto err_fmtype;
> + }
> + break;
> + case NVM_ID_FMTYPE_MLC:
> + if (nvm_init_mlc_tbl(dev, grp)) {
> + ret = -ENOMEM;
> + goto err_fmtype;
> + }
> + break;
> + default:
> + pr_err("nvm: flash type not supported\n");
> + ret = -EINVAL;
> + goto err_fmtype;
> + }
> +
> INIT_LIST_HEAD(&dev->online_targets);
> mutex_init(&dev->mlock);
> spin_lock_init(&dev->lock);
>
> return 0;
> +err_fmtype:
> + kfree(dev->lun_map);
> + return ret;
> }
>
> static void nvm_free(struct nvm_dev *dev)

Reviewed-by: Johannes Thumshirn <[email protected]>