2016-04-26 10:31:26

by Matias Bjørling

[permalink] [raw]
Subject: [PATCH 0/5] LightNVM fixes

Hi,

A couple of patches for the 4.7 cycle. The first three are cleanups for
iterating ppas, better get_bb_tbl interface and removal of struct
factory_blks. The forth patch enables the caller to control if mapping
should by a single plane or multiple planes. The last patch fixes a bug
when unfolding a virtual block onto multiple die planes.

Matias Bjørling (5):
lightnvm: introduce nvm_for_each_lun_ppa() macro
lightnvm: refactor device ops->get_bb_tbl()
lightnvm: remove struct factory_blks
lightnvm: make nvm_set_rqd_ppalist() aware of vblks
lightnvm: set block priority in nvm_set_rqd_ppalist()

drivers/lightnvm/core.c | 40 ++++---
drivers/lightnvm/gennvm.c | 29 +++--
drivers/lightnvm/sysblk.c | 248 +++++++++++++++++++++----------------------
drivers/nvme/host/lightnvm.c | 6 +-
include/linux/lightnvm.h | 15 ++-
5 files changed, 179 insertions(+), 159 deletions(-)

--
2.1.4


2016-04-26 10:31:30

by Matias Bjørling

[permalink] [raw]
Subject: [PATCH 2/5] lightnvm: refactor device ops->get_bb_tbl()

The device ops->get_bb_tbl() takes a callback, that allows the caller
to use its own callback function to update its data structures in the
returning function.

This makes it difficult to send parameters to the callback, and usually
is circumvented by small private structures, that both carry the callers
state and any flags needed to fulfill the update.

Refactor ops->get_bb_tbl() to fill a data buffer with the status of the
blocks returned, and let the user call the callback function manually.
That will provide the necessary flags and data structures and simplify
the logic around ops->get_bb_tbl().

Signed-off-by: Matias Bjørling <[email protected]>
---
drivers/lightnvm/core.c | 8 +++
drivers/lightnvm/gennvm.c | 29 ++++++---
drivers/lightnvm/sysblk.c | 146 +++++++++++++++++++++++--------------------
drivers/nvme/host/lightnvm.c | 6 +-
include/linux/lightnvm.h | 6 +-
5 files changed, 111 insertions(+), 84 deletions(-)

diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 0296223..e6d7a98 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -467,6 +467,14 @@ int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
}
EXPORT_SYMBOL(nvm_bb_tbl_fold);

+int nvm_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa, u8 *blks)
+{
+ ppa = generic_to_dev_addr(dev, ppa);
+
+ return dev->ops->get_bb_tbl(dev, ppa, blks);
+}
+EXPORT_SYMBOL(nvm_get_bb_tbl);
+
static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
{
int i;
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index 6096077..9c6b141 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -129,10 +129,10 @@ static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn)
return 0;
}

-static int gennvm_block_bb(struct nvm_dev *dev, struct ppa_addr ppa,
- u8 *blks, int nr_blks, void *private)
+static int gennvm_block_bb(struct gen_nvm *gn, struct ppa_addr ppa,
+ u8 *blks, int nr_blks)
{
- struct gen_nvm *gn = private;
+ struct nvm_dev *dev = gn->dev;
struct gen_lun *lun;
struct nvm_block *blk;
int i;
@@ -219,13 +219,21 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
struct gen_lun *lun;
struct nvm_block *block;
sector_t lun_iter, blk_iter, cur_block_id = 0;
- int ret;
+ int ret, nr_blks;
+ u8 *blks;
+
+ nr_blks = dev->blks_per_lun * dev->plane_mode;
+ blks = kmalloc(nr_blks, GFP_KERNEL);
+ if (!blks)
+ return -ENOMEM;

gennvm_for_each_lun(gn, lun, lun_iter) {
lun->vlun.blocks = vzalloc(sizeof(struct nvm_block) *
dev->blks_per_lun);
- if (!lun->vlun.blocks)
+ if (!lun->vlun.blocks) {
+ kfree(blks);
return -ENOMEM;
+ }

for (blk_iter = 0; blk_iter < dev->blks_per_lun; blk_iter++) {
block = &lun->vlun.blocks[blk_iter];
@@ -250,12 +258,14 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
ppa.ppa = 0;
ppa.g.ch = lun->vlun.chnl_id;
ppa.g.lun = lun->vlun.id;
- ppa = generic_to_dev_addr(dev, ppa);

- ret = dev->ops->get_bb_tbl(dev, ppa,
- gennvm_block_bb, gn);
+ ret = nvm_get_bb_tbl(dev, ppa, blks);
if (ret)
- pr_err("gennvm: could not read BB table\n");
+ pr_err("gennvm: could not get BB table\n");
+
+ ret = gennvm_block_bb(gn, ppa, blks, nr_blks);
+ if (ret)
+ pr_err("gennvm: BB table map failed\n");
}
}

@@ -268,6 +278,7 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
}
}

+ kfree(blks);
return 0;
}

diff --git a/drivers/lightnvm/sysblk.c b/drivers/lightnvm/sysblk.c
index a48093d..dc99c0a 100644
--- a/drivers/lightnvm/sysblk.c
+++ b/drivers/lightnvm/sysblk.c
@@ -93,10 +93,45 @@ void nvm_setup_sysblk_scan(struct nvm_dev *dev, struct sysblk_scan *s,
s->nr_rows = nvm_setup_sysblks(dev, sysblk_ppas);
}

+static int sysblk_get_free_blks(struct nvm_dev *dev, struct ppa_addr ppa,
+ u8 *blks, int nr_blks,
+ struct sysblk_scan *s)
+{
+ struct ppa_addr *sppa;
+ int i, blkid = 0;
+
+ nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
+ if (nr_blks < 0)
+ return nr_blks;
+
+ for (i = 0; i < nr_blks; i++) {
+ if (blks[i] == NVM_BLK_T_HOST)
+ return -EEXIST;
+
+ if (blks[i] != NVM_BLK_T_FREE)
+ continue;
+
+ sppa = &s->ppas[scan_ppa_idx(s->row, blkid)];
+ sppa->g.ch = ppa.g.ch;
+ sppa->g.lun = ppa.g.lun;
+ sppa->g.blk = i;
+ s->nr_ppas++;
+ blkid++;
+
+ pr_debug("nvm: use (%u %u %u) as sysblk\n",
+ sppa->g.ch, sppa->g.lun, sppa->g.blk);
+ if (blkid > MAX_BLKS_PR_SYSBLK - 1)
+ return 0;
+ }
+
+ pr_err("nvm: sysblk failed get sysblk\n");
+ return -EINVAL;
+}
+
static int sysblk_get_host_blks(struct nvm_dev *dev, struct ppa_addr ppa,
- u8 *blks, int nr_blks, void *private)
+ u8 *blks, int nr_blks,
+ struct sysblk_scan *s)
{
- struct sysblk_scan *s = private;
int i, nr_sysblk = 0;

nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
@@ -123,26 +158,42 @@ static int sysblk_get_host_blks(struct nvm_dev *dev, struct ppa_addr ppa,
}

static int nvm_get_all_sysblks(struct nvm_dev *dev, struct sysblk_scan *s,
- struct ppa_addr *ppas, nvm_bb_update_fn *fn)
+ struct ppa_addr *ppas, int get_free)
{
- struct ppa_addr dppa;
- int i, ret = 0;
+ int i, nr_blks, ret = 0;
+ u8 *blks;

s->nr_ppas = 0;
+ nr_blks = dev->blks_per_lun * dev->plane_mode;
+
+ blks = kmalloc(nr_blks, GFP_KERNEL);
+ if (!blks)
+ return -ENOMEM;

for (i = 0; i < s->nr_rows; i++) {
- dppa = generic_to_dev_addr(dev, ppas[i]);
s->row = i;

- ret = dev->ops->get_bb_tbl(dev, dppa, fn, s);
+ ret = nvm_get_bb_tbl(dev, ppas[i], blks);
if (ret) {
pr_err("nvm: failed bb tbl for ppa (%u %u)\n",
ppas[i].g.ch,
ppas[i].g.blk);
- return ret;
+ goto err_get;
}
+
+ if (get_free)
+ ret = sysblk_get_free_blks(dev, ppas[i], blks, nr_blks,
+ s);
+ else
+ ret = sysblk_get_host_blks(dev, ppas[i], blks, nr_blks,
+ s);
+
+ if (ret)
+ goto err_get;
}

+err_get:
+ kfree(blks);
return ret;
}

@@ -239,41 +290,6 @@ static int nvm_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s, int type)
return 0;
}

-static int sysblk_get_free_blks(struct nvm_dev *dev, struct ppa_addr ppa,
- u8 *blks, int nr_blks, void *private)
-{
- struct sysblk_scan *s = private;
- struct ppa_addr *sppa;
- int i, blkid = 0;
-
- nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
- if (nr_blks < 0)
- return nr_blks;
-
- for (i = 0; i < nr_blks; i++) {
- if (blks[i] == NVM_BLK_T_HOST)
- return -EEXIST;
-
- if (blks[i] != NVM_BLK_T_FREE)
- continue;
-
- sppa = &s->ppas[scan_ppa_idx(s->row, blkid)];
- sppa->g.ch = ppa.g.ch;
- sppa->g.lun = ppa.g.lun;
- sppa->g.blk = i;
- s->nr_ppas++;
- blkid++;
-
- pr_debug("nvm: use (%u %u %u) as sysblk\n",
- sppa->g.ch, sppa->g.lun, sppa->g.blk);
- if (blkid > MAX_BLKS_PR_SYSBLK - 1)
- return 0;
- }
-
- pr_err("nvm: sysblk failed get sysblk\n");
- return -EINVAL;
-}
-
static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
struct sysblk_scan *s)
{
@@ -393,7 +409,7 @@ int nvm_get_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);

mutex_lock(&dev->mlock);
- ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_host_blks);
+ ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0);
if (ret)
goto err_sysblk;

@@ -453,7 +469,7 @@ int nvm_update_sysblock(struct nvm_dev *dev, struct nvm_sb_info *new)
nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);

mutex_lock(&dev->mlock);
- ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_host_blks);
+ ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0);
if (ret)
goto err_sysblk;

@@ -551,7 +567,7 @@ int nvm_init_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);

mutex_lock(&dev->mlock);
- ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_free_blks);
+ ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 1);
if (ret)
goto err_mark;

@@ -587,9 +603,9 @@ static unsigned int factory_blk_offset(struct nvm_dev *dev, int ch, int lun)
}

static int nvm_factory_blks(struct nvm_dev *dev, struct ppa_addr ppa,
- u8 *blks, int nr_blks, void *private)
+ u8 *blks, int nr_blks,
+ struct factory_blks *f)
{
- struct factory_blks *f = private;
int i, lunoff;

nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
@@ -659,32 +675,29 @@ static int nvm_fact_get_blks(struct nvm_dev *dev, struct ppa_addr *erase_list,
return ppa_cnt;
}

-static int nvm_fact_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa,
- nvm_bb_update_fn *fn, void *priv)
-{
- struct ppa_addr dev_ppa;
- int ret;
-
- dev_ppa = generic_to_dev_addr(dev, ppa);
-
- ret = dev->ops->get_bb_tbl(dev, dev_ppa, fn, priv);
- if (ret)
- pr_err("nvm: failed bb tbl for ch%u lun%u\n",
- ppa.g.ch, ppa.g.blk);
- return ret;
-}
-
static int nvm_fact_select_blks(struct nvm_dev *dev, struct factory_blks *f)
{
- int ch, lun, ret;
struct ppa_addr ppa;
+ int ch, lun, nr_blks, ret;
+ u8 *blks;
+
+ nr_blks = dev->blks_per_lun * dev->plane_mode;
+ blks = kmalloc(nr_blks, GFP_KERNEL);
+ if (!blks)
+ return -ENOMEM;

nvm_for_each_lun_ppa(dev, ppa, ch, lun) {
- ret = nvm_fact_get_bb_tbl(dev, ppa, nvm_factory_blks, f);
+ ret = nvm_get_bb_tbl(dev, ppa, blks);
+ if (ret)
+ pr_err("nvm: failed bb tbl for ch%u lun%u\n",
+ ppa.g.ch, ppa.g.blk);
+
+ ret = nvm_factory_blks(dev, ppa, blks, nr_blks, f);
if (ret)
return ret;
}

+ kfree(blks);
return 0;
}

@@ -722,8 +735,7 @@ int nvm_dev_factory(struct nvm_dev *dev, int flags)
if (flags & NVM_FACTORY_RESET_HOST_BLKS) {
nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
mutex_lock(&dev->mlock);
- ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas,
- sysblk_get_host_blks);
+ ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0);
if (!ret)
ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_FREE);
mutex_unlock(&dev->mlock);
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index d289980..45e3511 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -388,7 +388,7 @@ out:
}

static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
- nvm_bb_update_fn *update_bbtbl, void *priv)
+ u8 *blks)
{
struct request_queue *q = nvmdev->q;
struct nvme_ns *ns = q->queuedata;
@@ -435,9 +435,7 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
goto out;
}

- ppa = dev_to_generic_addr(nvmdev, ppa);
- ret = update_bbtbl(nvmdev, ppa, bb_tbl->blk, nr_blks, priv);
-
+ memcpy(blks, bb_tbl->blk, nvmdev->blks_per_lun * nvmdev->plane_mode);
out:
kfree(bb_tbl);
return ret;
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 3f25635..16d4f2e 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -41,13 +41,10 @@ struct nvm_id;
struct nvm_dev;

typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
-typedef int (nvm_bb_update_fn)(struct nvm_dev *, struct ppa_addr, u8 *, int,
- void *);
typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
nvm_l2p_update_fn *, void *);
-typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr,
- nvm_bb_update_fn *, void *);
+typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *, int);
typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *);
@@ -539,6 +536,7 @@ extern int nvm_submit_ppa(struct nvm_dev *, struct ppa_addr *, int, int, int,
extern int nvm_submit_ppa_list(struct nvm_dev *, struct ppa_addr *, int, int,
int, void *, int);
extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int);
+extern int nvm_get_bb_tbl(struct nvm_dev *, struct ppa_addr, u8 *);

/* sysblk.c */
#define NVM_SYSBLK_MAGIC 0x4E564D53 /* "NVMS" */
--
2.1.4

2016-04-26 10:31:46

by Matias Bjørling

[permalink] [raw]
Subject: [PATCH 4/5] lightnvm: make nvm_set_rqd_ppalist() aware of vblks

A virtual block enables a block to identify multiple physical blocks.
This is useful for metadata where a device media supports multiple
planes. In that case, a block, with multiple planes can be managed
as a single vblk. Reducing the metadata required by one forth.

nvm_set_rqd_ppalist() takes care of expanding a ppa_list with vblks
automatically. However, for some use-cases, where only a single physical
block is required, the ppa_list should not be expanded.

Therefore, add a vblk parameter to nvm_set_rqd_ppalist(), and only
expand the ppa_list if vblk is set.

Signed-off-by: Matias Bjørling <[email protected]>
---
drivers/lightnvm/core.c | 31 +++++++++++++++++--------------
drivers/lightnvm/sysblk.c | 2 +-
include/linux/lightnvm.h | 2 +-
3 files changed, 19 insertions(+), 16 deletions(-)

diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index e6d7a98..de5db7b 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -251,33 +251,36 @@ void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
EXPORT_SYMBOL(nvm_generic_to_addr_mode);

int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
- struct ppa_addr *ppas, int nr_ppas)
+ struct ppa_addr *ppas, int nr_ppas, int vblk)
{
int i, plane_cnt, pl_idx;

- if (dev->plane_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
- rqd->nr_pages = 1;
+ if ((!vblk || dev->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) {
+ rqd->nr_pages = nr_ppas;
rqd->ppa_addr = ppas[0];

return 0;
}

- plane_cnt = dev->plane_mode;
- rqd->nr_pages = plane_cnt * nr_ppas;
-
- if (dev->ops->max_phys_sect < rqd->nr_pages)
- return -EINVAL;
-
+ rqd->nr_pages = nr_ppas;
rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
if (!rqd->ppa_list) {
pr_err("nvm: failed to allocate dma memory\n");
return -ENOMEM;
}

- for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
+ if (!vblk) {
+ for (i = 0; i < nr_ppas; i++)
+ rqd->ppa_list[i] = ppas[i];
+ } else {
+ plane_cnt = dev->plane_mode;
+ rqd->nr_pages *= plane_cnt;
+
for (i = 0; i < nr_ppas; i++) {
- ppas[i].g.pl = pl_idx;
- rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppas[i];
+ for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
+ ppas[i].g.pl = pl_idx;
+ rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppas[i];
+ }
}
}

@@ -304,7 +307,7 @@ int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas)

memset(&rqd, 0, sizeof(struct nvm_rq));

- ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas);
+ ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
if (ret)
return ret;

@@ -420,7 +423,7 @@ int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
int ret;

memset(&rqd, 0, sizeof(struct nvm_rq));
- ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas);
+ ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas, 1);
if (ret)
return ret;

diff --git a/drivers/lightnvm/sysblk.c b/drivers/lightnvm/sysblk.c
index bca6902..737fbc3 100644
--- a/drivers/lightnvm/sysblk.c
+++ b/drivers/lightnvm/sysblk.c
@@ -277,7 +277,7 @@ static int nvm_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s, int type)

memset(&rqd, 0, sizeof(struct nvm_rq));

- nvm_set_rqd_ppalist(dev, &rqd, s->ppas, s->nr_ppas);
+ nvm_set_rqd_ppalist(dev, &rqd, s->ppas, s->nr_ppas, 1);
nvm_generic_to_addr_mode(dev, &rqd);

ret = dev->ops->set_bb_tbl(dev, &rqd, type);
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 16d4f2e..9ae0b7c 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -526,7 +526,7 @@ extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *);
extern void nvm_generic_to_addr_mode(struct nvm_dev *, struct nvm_rq *);
extern void nvm_addr_to_generic_mode(struct nvm_dev *, struct nvm_rq *);
extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *,
- struct ppa_addr *, int);
+ struct ppa_addr *, int, int);
extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *);
extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int);
extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *);
--
2.1.4

2016-04-26 10:31:44

by Matias Bjørling

[permalink] [raw]
Subject: [PATCH 3/5] lightnvm: remove struct factory_blks

Now that device ops->get_bb_table no longer uses a callback, the
struct factory_blks can be removed.

Signed-off-by: Matias Bjørling <[email protected]>
---
drivers/lightnvm/sysblk.c | 62 +++++++++++++++++++++--------------------------
1 file changed, 28 insertions(+), 34 deletions(-)

diff --git a/drivers/lightnvm/sysblk.c b/drivers/lightnvm/sysblk.c
index dc99c0a..bca6902 100644
--- a/drivers/lightnvm/sysblk.c
+++ b/drivers/lightnvm/sysblk.c
@@ -582,29 +582,23 @@ err_mark:
return ret;
}

-struct factory_blks {
- struct nvm_dev *dev;
- int flags;
- unsigned long *blks;
-};
-
static int factory_nblks(int nblks)
{
/* Round up to nearest BITS_PER_LONG */
return (nblks + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
}

-static unsigned int factory_blk_offset(struct nvm_dev *dev, int ch, int lun)
+static unsigned int factory_blk_offset(struct nvm_dev *dev, struct ppa_addr ppa)
{
int nblks = factory_nblks(dev->blks_per_lun);

- return ((ch * dev->luns_per_chnl * nblks) + (lun * nblks)) /
+ return ((ppa.g.ch * dev->luns_per_chnl * nblks) + (ppa.g.lun * nblks)) /
BITS_PER_LONG;
}

static int nvm_factory_blks(struct nvm_dev *dev, struct ppa_addr ppa,
u8 *blks, int nr_blks,
- struct factory_blks *f)
+ unsigned long *blk_bitmap, int flags)
{
int i, lunoff;

@@ -612,25 +606,25 @@ static int nvm_factory_blks(struct nvm_dev *dev, struct ppa_addr ppa,
if (nr_blks < 0)
return nr_blks;

- lunoff = factory_blk_offset(dev, ppa.g.ch, ppa.g.lun);
+ lunoff = factory_blk_offset(dev, ppa);

/* non-set bits correspond to the block must be erased */
for (i = 0; i < nr_blks; i++) {
switch (blks[i]) {
case NVM_BLK_T_FREE:
- if (f->flags & NVM_FACTORY_ERASE_ONLY_USER)
- set_bit(i, &f->blks[lunoff]);
+ if (flags & NVM_FACTORY_ERASE_ONLY_USER)
+ set_bit(i, &blk_bitmap[lunoff]);
break;
case NVM_BLK_T_HOST:
- if (!(f->flags & NVM_FACTORY_RESET_HOST_BLKS))
- set_bit(i, &f->blks[lunoff]);
+ if (!(flags & NVM_FACTORY_RESET_HOST_BLKS))
+ set_bit(i, &blk_bitmap[lunoff]);
break;
case NVM_BLK_T_GRWN_BAD:
- if (!(f->flags & NVM_FACTORY_RESET_GRWN_BBLKS))
- set_bit(i, &f->blks[lunoff]);
+ if (!(flags & NVM_FACTORY_RESET_GRWN_BBLKS))
+ set_bit(i, &blk_bitmap[lunoff]);
break;
default:
- set_bit(i, &f->blks[lunoff]);
+ set_bit(i, &blk_bitmap[lunoff]);
break;
}
}
@@ -639,7 +633,7 @@ static int nvm_factory_blks(struct nvm_dev *dev, struct ppa_addr ppa,
}

static int nvm_fact_get_blks(struct nvm_dev *dev, struct ppa_addr *erase_list,
- int max_ppas, struct factory_blks *f)
+ int max_ppas, unsigned long *blk_bitmap)
{
struct ppa_addr ppa;
int ch, lun, blkid, idx, done = 0, ppa_cnt = 0;
@@ -648,8 +642,8 @@ static int nvm_fact_get_blks(struct nvm_dev *dev, struct ppa_addr *erase_list,
while (!done) {
done = 1;
nvm_for_each_lun_ppa(dev, ppa, ch, lun) {
- idx = factory_blk_offset(dev, ch, lun);
- offset = &f->blks[idx];
+ idx = factory_blk_offset(dev, ppa);
+ offset = &blk_bitmap[idx];

blkid = find_first_zero_bit(offset,
dev->blks_per_lun);
@@ -675,10 +669,11 @@ static int nvm_fact_get_blks(struct nvm_dev *dev, struct ppa_addr *erase_list,
return ppa_cnt;
}

-static int nvm_fact_select_blks(struct nvm_dev *dev, struct factory_blks *f)
+static int nvm_fact_select_blks(struct nvm_dev *dev, unsigned long *blk_bitmap,
+ int flags)
{
struct ppa_addr ppa;
- int ch, lun, nr_blks, ret;
+ int ch, lun, nr_blks, ret = 0;
u8 *blks;

nr_blks = dev->blks_per_lun * dev->plane_mode;
@@ -692,43 +687,42 @@ static int nvm_fact_select_blks(struct nvm_dev *dev, struct factory_blks *f)
pr_err("nvm: failed bb tbl for ch%u lun%u\n",
ppa.g.ch, ppa.g.blk);

- ret = nvm_factory_blks(dev, ppa, blks, nr_blks, f);
+ ret = nvm_factory_blks(dev, ppa, blks, nr_blks, blk_bitmap,
+ flags);
if (ret)
- return ret;
+ break;
}

kfree(blks);
- return 0;
+ return ret;
}

int nvm_dev_factory(struct nvm_dev *dev, int flags)
{
- struct factory_blks f;
struct ppa_addr *ppas;
int ppa_cnt, ret = -ENOMEM;
int max_ppas = dev->ops->max_phys_sect / dev->nr_planes;
struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
struct sysblk_scan s;
+ unsigned long *blk_bitmap;

- f.blks = kzalloc(factory_nblks(dev->blks_per_lun) * dev->nr_luns,
+ blk_bitmap = kzalloc(factory_nblks(dev->blks_per_lun) * dev->nr_luns,
GFP_KERNEL);
- if (!f.blks)
+ if (!blk_bitmap)
return ret;

ppas = kcalloc(max_ppas, sizeof(struct ppa_addr), GFP_KERNEL);
if (!ppas)
goto err_blks;

- f.dev = dev;
- f.flags = flags;
-
/* create list of blks to be erased */
- ret = nvm_fact_select_blks(dev, &f);
+ ret = nvm_fact_select_blks(dev, blk_bitmap, flags);
if (ret)
goto err_ppas;

/* continue to erase until list of blks until empty */
- while ((ppa_cnt = nvm_fact_get_blks(dev, ppas, max_ppas, &f)) > 0)
+ while ((ppa_cnt =
+ nvm_fact_get_blks(dev, ppas, max_ppas, blk_bitmap)) > 0)
nvm_erase_ppa(dev, ppas, ppa_cnt);

/* mark host reserved blocks free */
@@ -743,7 +737,7 @@ int nvm_dev_factory(struct nvm_dev *dev, int flags)
err_ppas:
kfree(ppas);
err_blks:
- kfree(f.blks);
+ kfree(blk_bitmap);
return ret;
}
EXPORT_SYMBOL(nvm_dev_factory);
--
2.1.4

2016-04-26 10:31:43

by Matias Bjørling

[permalink] [raw]
Subject: [PATCH 5/5] lightnvm: set block priority in nvm_set_rqd_ppalist()

When a ppa_list with vblks is expanded, it can either go block or
plane first. Currently it is been plane first. This works well for a
single vblk that is expanded, but fails for controllers that
require data for all planes is grouped together. Make the ppa_list
expand first expand with block, and then plane to support this
behavior.

Before (2 blocks, 4 planes per vblk)
idx block plane
0 0 0
1 1 0
2 0 1
3 1 1
4 0 2
5 1 2
6 0 3
7 1 3

After
idx block plane
0 0 0
1 0 1
2 0 2
3 0 3
4 1 0
5 1 1
6 1 2
7 1 3

Signed-off-by: Matias Bjørling <[email protected]>
---
drivers/lightnvm/core.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index de5db7b..0e82798 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -279,7 +279,8 @@ int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
for (i = 0; i < nr_ppas; i++) {
for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
ppas[i].g.pl = pl_idx;
- rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppas[i];
+ rqd->ppa_list[(plane_cnt * i) + pl_idx] =
+ ppas[i];
}
}
}
--
2.1.4

2016-04-26 10:32:32

by Matias Bjørling

[permalink] [raw]
Subject: [PATCH 1/5] lightnvm: introduce nvm_for_each_lun_ppa() macro

Users that wish to iterate all luns on a device. Must create a
struct ppa_addr and separate iterators for channels and luns. To set the
iterators, two loops are required, one to iterate channels, and another
to iterate luns. This leads to decrease in readability.

Introduce nvm_for_each_lun_ppa, which implements the nested loop and
sets ppa, channel, and lun variable for each loop body, eliminating
the boilerplate code.

Signed-off-by: Matias Bjørling <[email protected]>
---
drivers/lightnvm/sysblk.c | 56 +++++++++++++++++++----------------------------
include/linux/lightnvm.h | 7 ++++++
2 files changed, 29 insertions(+), 34 deletions(-)

diff --git a/drivers/lightnvm/sysblk.c b/drivers/lightnvm/sysblk.c
index 7fce588..a48093d 100644
--- a/drivers/lightnvm/sysblk.c
+++ b/drivers/lightnvm/sysblk.c
@@ -631,33 +631,28 @@ static int nvm_fact_get_blks(struct nvm_dev *dev, struct ppa_addr *erase_list,

while (!done) {
done = 1;
- for (ch = 0; ch < dev->nr_chnls; ch++) {
- for (lun = 0; lun < dev->luns_per_chnl; lun++) {
- idx = factory_blk_offset(dev, ch, lun);
- offset = &f->blks[idx];
+ nvm_for_each_lun_ppa(dev, ppa, ch, lun) {
+ idx = factory_blk_offset(dev, ch, lun);
+ offset = &f->blks[idx];

- blkid = find_first_zero_bit(offset,
- dev->blks_per_lun);
- if (blkid >= dev->blks_per_lun)
- continue;
- set_bit(blkid, offset);
+ blkid = find_first_zero_bit(offset,
+ dev->blks_per_lun);
+ if (blkid >= dev->blks_per_lun)
+ continue;
+ set_bit(blkid, offset);

- ppa.ppa = 0;
- ppa.g.ch = ch;
- ppa.g.lun = lun;
- ppa.g.blk = blkid;
- pr_debug("nvm: erase ppa (%u %u %u)\n",
- ppa.g.ch,
- ppa.g.lun,
- ppa.g.blk);
+ ppa.g.blk = blkid;
+ pr_debug("nvm: erase ppa (%u %u %u)\n",
+ ppa.g.ch,
+ ppa.g.lun,
+ ppa.g.blk);

- erase_list[ppa_cnt] = ppa;
- ppa_cnt++;
- done = 0;
+ erase_list[ppa_cnt] = ppa;
+ ppa_cnt++;
+ done = 0;

- if (ppa_cnt == max_ppas)
- return ppa_cnt;
- }
+ if (ppa_cnt == max_ppas)
+ return ppa_cnt;
}
}

@@ -684,17 +679,10 @@ static int nvm_fact_select_blks(struct nvm_dev *dev, struct factory_blks *f)
int ch, lun, ret;
struct ppa_addr ppa;

- ppa.ppa = 0;
- for (ch = 0; ch < dev->nr_chnls; ch++) {
- for (lun = 0; lun < dev->luns_per_chnl; lun++) {
- ppa.g.ch = ch;
- ppa.g.lun = lun;
-
- ret = nvm_fact_get_bb_tbl(dev, ppa, nvm_factory_blks,
- f);
- if (ret)
- return ret;
- }
+ nvm_for_each_lun_ppa(dev, ppa, ch, lun) {
+ ret = nvm_fact_get_bb_tbl(dev, ppa, nvm_factory_blks, f);
+ if (ret)
+ return ret;
}

return 0;
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 5eabdba..3f25635 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -559,6 +559,13 @@ extern int nvm_update_sysblock(struct nvm_dev *, struct nvm_sb_info *);
extern int nvm_init_sysblock(struct nvm_dev *, struct nvm_sb_info *);

extern int nvm_dev_factory(struct nvm_dev *, int flags);
+
+#define nvm_for_each_lun_ppa(dev, ppa, chid, lunid) \
+ for ((chid) = 0, (ppa).ppa = 0; (chid) < (dev)->nr_chnls; \
+ (chid)++, (ppa).g.ch = (chid)) \
+ for ((lunid) = 0; (lunid) < (dev)->luns_per_chnl; \
+ (lunid)++, (ppa).g.lun = (lunid))
+
#else /* CONFIG_NVM */
struct nvm_dev_ops;

--
2.1.4

2016-04-26 14:35:20

by Jens Axboe

[permalink] [raw]
Subject: Re: [PATCH 0/5] LightNVM fixes

On 04/26/2016 04:31 AM, Matias Bjørling wrote:
> Hi,
>
> A couple of patches for the 4.7 cycle. The first three are cleanups for
> iterating ppas, better get_bb_tbl interface and removal of struct
> factory_blks. The forth patch enables the caller to control if mapping
> should by a single plane or multiple planes. The last patch fixes a bug
> when unfolding a virtual block onto multiple die planes.

What are these against? Patch #2 fails horribly on both for-4.7/drivers
and current Linus' master.

--
Jens Axboe

2016-04-26 14:57:39

by Matias Bjørling

[permalink] [raw]
Subject: Re: [PATCH 0/5] LightNVM fixes

On 04/26/2016 04:35 PM, Jens Axboe wrote:
> On 04/26/2016 04:31 AM, Matias Bjørling wrote:
>> Hi,
>>
>> A couple of patches for the 4.7 cycle. The first three are cleanups for
>> iterating ppas, better get_bb_tbl interface and removal of struct
>> factory_blks. The forth patch enables the caller to control if mapping
>> should by a single plane or multiple planes. The last patch fixes a bug
>> when unfolding a virtual block onto multiple die planes.
>
> What are these against? Patch #2 fails horribly on both for-4.7/drivers
> and current Linus' master.
>

Ah yes, forgot to add what they were based upon. They are based on the
https://github.com/OpenChannelSSD/linux.git for-next branch.

I wanted to post these before applying. Will send you a combined patch
set around -rc6.