2018-02-05 12:17:34

by Matias Bjørling

[permalink] [raw]
Subject: [PATCH 0/4] lightnvm: base 2.0 implementation

Hi,

A couple of patches for 2.0 support for the lightnvm subsystem. They
form the basis for integrating 2.0 support.

For the rest of the support, Javier has code that implements report
chunk and sets up the LBA format data structure. He also has a bunch
of patches that brings pblk up to speed.

The first two patches is preparation for the 2.0 work. The third patch
implements the 2.0 data structures, the geometry command, and exposes
the sysfs attributes that comes with the 2.0 specification. Note that
the attributes between 1.2 and 2.0 are different, and it is expected
that user-space shall use the version sysfs attribute to know which
attributes will be available.

The last patch implements support for using the nvme namespace logical
block and metadata fields and sync it with the internal lightnvm
identify structures.

-Matias

Matias Bjørling (4):
lightnvm: make 1.2 data structures explicit
lightnvm: flatten nvm_id_group into nvm_id
lightnvm: add 2.0 geometry identification
nvme: lightnvm: add late setup of block size and metadata

drivers/lightnvm/core.c | 27 ++-
drivers/nvme/host/core.c | 2 +
drivers/nvme/host/lightnvm.c | 508 ++++++++++++++++++++++++++++++++-----------
drivers/nvme/host/nvme.h | 2 +
include/linux/lightnvm.h | 64 +++---
5 files changed, 426 insertions(+), 177 deletions(-)

--
2.11.0



2018-02-05 12:16:59

by Matias Bjørling

[permalink] [raw]
Subject: [PATCH 1/4] lightnvm: make 1.2 data structures explicit

Make the 1.2 data structures explicit, so it will be easy to identify
the 2.0 data structures. Also fix the order of which the nvme_nvm_*
are declared, such that they follow the nvme_nvm_command order.

Signed-off-by: Matias Bjørling <[email protected]>
---
drivers/nvme/host/lightnvm.c | 82 ++++++++++++++++++++++----------------------
1 file changed, 41 insertions(+), 41 deletions(-)

diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index dc0b1335c7c6..60db3f1b59da 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -51,6 +51,21 @@ struct nvme_nvm_ph_rw {
__le64 resv;
};

+struct nvme_nvm_erase_blk {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 nsid;
+ __u64 rsvd[2];
+ __le64 prp1;
+ __le64 prp2;
+ __le64 spba;
+ __le16 length;
+ __le16 control;
+ __le32 dsmgmt;
+ __le64 resv;
+};
+
struct nvme_nvm_identity {
__u8 opcode;
__u8 flags;
@@ -89,33 +104,18 @@ struct nvme_nvm_setbbtbl {
__u32 rsvd4[3];
};

-struct nvme_nvm_erase_blk {
- __u8 opcode;
- __u8 flags;
- __u16 command_id;
- __le32 nsid;
- __u64 rsvd[2];
- __le64 prp1;
- __le64 prp2;
- __le64 spba;
- __le16 length;
- __le16 control;
- __le32 dsmgmt;
- __le64 resv;
-};
-
struct nvme_nvm_command {
union {
struct nvme_common_command common;
- struct nvme_nvm_identity identity;
struct nvme_nvm_ph_rw ph_rw;
+ struct nvme_nvm_erase_blk erase;
+ struct nvme_nvm_identity identity;
struct nvme_nvm_getbbtbl get_bb;
struct nvme_nvm_setbbtbl set_bb;
- struct nvme_nvm_erase_blk erase;
};
};

-struct nvme_nvm_id_group {
+struct nvme_nvm_id12_grp {
__u8 mtype;
__u8 fmtype;
__le16 res16;
@@ -141,7 +141,7 @@ struct nvme_nvm_id_group {
__u8 reserved[906];
} __packed;

-struct nvme_nvm_addr_format {
+struct nvme_nvm_id12_addrf {
__u8 ch_offset;
__u8 ch_len;
__u8 lun_offset;
@@ -157,16 +157,16 @@ struct nvme_nvm_addr_format {
__u8 res[4];
} __packed;

-struct nvme_nvm_id {
+struct nvme_nvm_id12 {
__u8 ver_id;
__u8 vmnt;
__u8 cgrps;
__u8 res;
__le32 cap;
__le32 dom;
- struct nvme_nvm_addr_format ppaf;
+ struct nvme_nvm_id12_addrf ppaf;
__u8 resv[228];
- struct nvme_nvm_id_group group;
+ struct nvme_nvm_id12_grp grp;
__u8 resv2[2880];
} __packed;

@@ -191,25 +191,25 @@ static inline void _nvme_nvm_check_size(void)
{
BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
- BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
- BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960);
- BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 16);
- BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != NVME_IDENTIFY_DATA_SIZE);
+ BUILD_BUG_ON(sizeof(struct nvme_nvm_id12_grp) != 960);
+ BUILD_BUG_ON(sizeof(struct nvme_nvm_id12_addrf) != 16);
+ BUILD_BUG_ON(sizeof(struct nvme_nvm_id12) != NVME_IDENTIFY_DATA_SIZE);
BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 64);
}

-static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
+static int init_grp(struct nvm_id *nvm_id, struct nvme_nvm_id12 *id12)
{
- struct nvme_nvm_id_group *src;
+ struct nvme_nvm_id12_grp *src;
struct nvm_id_group *grp;
int sec_per_pg, sec_per_pl, pg_per_blk;

- if (nvme_nvm_id->cgrps != 1)
+ if (id12->cgrps != 1)
return -EINVAL;

- src = &nvme_nvm_id->group;
+ src = &id12->grp;
grp = &nvm_id->grp;

grp->mtype = src->mtype;
@@ -261,34 +261,34 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id)
{
struct nvme_ns *ns = nvmdev->q->queuedata;
- struct nvme_nvm_id *nvme_nvm_id;
+ struct nvme_nvm_id12 *id;
struct nvme_nvm_command c = {};
int ret;

c.identity.opcode = nvme_nvm_admin_identity;
c.identity.nsid = cpu_to_le32(ns->head->ns_id);

- nvme_nvm_id = kmalloc(sizeof(struct nvme_nvm_id), GFP_KERNEL);
- if (!nvme_nvm_id)
+ id = kmalloc(sizeof(struct nvme_nvm_id12), GFP_KERNEL);
+ if (!id)
return -ENOMEM;

ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
- nvme_nvm_id, sizeof(struct nvme_nvm_id));
+ id, sizeof(struct nvme_nvm_id12));
if (ret) {
ret = -EIO;
goto out;
}

- nvm_id->ver_id = nvme_nvm_id->ver_id;
- nvm_id->vmnt = nvme_nvm_id->vmnt;
- nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap);
- nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom);
- memcpy(&nvm_id->ppaf, &nvme_nvm_id->ppaf,
+ nvm_id->ver_id = id->ver_id;
+ nvm_id->vmnt = id->vmnt;
+ nvm_id->cap = le32_to_cpu(id->cap);
+ nvm_id->dom = le32_to_cpu(id->dom);
+ memcpy(&nvm_id->ppaf, &id->ppaf,
sizeof(struct nvm_addr_format));

- ret = init_grps(nvm_id, nvme_nvm_id);
+ ret = init_grp(nvm_id, id);
out:
- kfree(nvme_nvm_id);
+ kfree(id);
return ret;
}

--
2.11.0


2018-02-05 12:17:39

by Matias Bjørling

[permalink] [raw]
Subject: [PATCH 3/4] lightnvm: add 2.0 geometry identification

Implement the geometry data structures for 2.0 and enable a drive
to be identified as one, including exposing the appropriate 2.0
sysfs entries.

Signed-off-by: Matias Bjørling <[email protected]>
---
drivers/lightnvm/core.c | 2 +-
drivers/nvme/host/lightnvm.c | 334 +++++++++++++++++++++++++++++++++++++------
include/linux/lightnvm.h | 11 +-
3 files changed, 295 insertions(+), 52 deletions(-)

diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index c72863b36439..250e74dfa120 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -934,7 +934,7 @@ static int nvm_init(struct nvm_dev *dev)
pr_debug("nvm: ver:%x nvm_vendor:%x\n",
dev->identity.ver_id, dev->identity.vmnt);

- if (dev->identity.ver_id != 1) {
+ if (dev->identity.ver_id != 1 && dev->identity.ver_id != 2) {
pr_err("nvm: device not supported by kernel.");
goto err;
}
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 6412551ecc65..a9c010655ccc 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -184,6 +184,58 @@ struct nvme_nvm_bb_tbl {
__u8 blk[0];
};

+struct nvme_nvm_id20_addrf {
+ __u8 grp_len;
+ __u8 pu_len;
+ __u8 chk_len;
+ __u8 lba_len;
+ __u8 resv[4];
+};
+
+struct nvme_nvm_id20 {
+ __u8 mjr;
+ __u8 mnr;
+ __u8 resv[6];
+
+ struct nvme_nvm_id20_addrf lbaf;
+
+ __u32 mccap;
+ __u8 resv2[12];
+
+ __u8 wit;
+ __u8 resv3[31];
+
+ /* Geometry */
+ __u16 num_grp;
+ __u16 num_pu;
+ __u32 num_chk;
+ __u32 clba;
+ __u8 resv4[52];
+
+ /* Write data requirements */
+ __u32 ws_min;
+ __u32 ws_opt;
+ __u32 mw_cunits;
+ __u32 maxoc;
+ __u32 maxocpu;
+ __u8 resv5[44];
+
+ /* Performance related metrics */
+ __u32 trdt;
+ __u32 trdm;
+ __u32 twrt;
+ __u32 twrm;
+ __u32 tcrst;
+ __u32 tcrsm;
+ __u8 resv6[40];
+
+ /* Reserved area */
+ __u8 resv7[2816];
+
+ /* Vendor specific */
+ __u8 vs[1024];
+};
+
/*
* Check we didn't inadvertently grow the command struct
*/
@@ -198,6 +250,8 @@ static inline void _nvme_nvm_check_size(void)
BUILD_BUG_ON(sizeof(struct nvme_nvm_id12_addrf) != 16);
BUILD_BUG_ON(sizeof(struct nvme_nvm_id12) != NVME_IDENTIFY_DATA_SIZE);
BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_nvm_id20_addrf) != 8);
+ BUILD_BUG_ON(sizeof(struct nvme_nvm_id20) != NVME_IDENTIFY_DATA_SIZE);
}

static int init_grp(struct nvm_id *nvm_id, struct nvme_nvm_id12 *id12)
@@ -256,6 +310,49 @@ static int init_grp(struct nvm_id *nvm_id, struct nvme_nvm_id12 *id12)
return 0;
}

+static int nvme_nvm_setup_12(struct nvm_dev *nvmdev, struct nvm_id *nvm_id,
+ struct nvme_nvm_id12 *id)
+{
+ nvm_id->ver_id = id->ver_id;
+ nvm_id->vmnt = id->vmnt;
+ nvm_id->cap = le32_to_cpu(id->cap);
+ nvm_id->dom = le32_to_cpu(id->dom);
+ memcpy(&nvm_id->ppaf, &id->ppaf,
+ sizeof(struct nvm_addr_format));
+
+ return init_grp(nvm_id, id);
+}
+
+static int nvme_nvm_setup_20(struct nvm_dev *nvmdev, struct nvm_id *nvm_id,
+ struct nvme_nvm_id20 *id)
+{
+ nvm_id->ver_id = id->mjr;
+
+ nvm_id->num_ch = le16_to_cpu(id->num_grp);
+ nvm_id->num_lun = le16_to_cpu(id->num_pu);
+ nvm_id->num_chk = le32_to_cpu(id->num_chk);
+ nvm_id->clba = le32_to_cpu(id->clba);
+
+ nvm_id->ws_min = le32_to_cpu(id->ws_min);
+ nvm_id->ws_opt = le32_to_cpu(id->ws_opt);
+ nvm_id->mw_cunits = le32_to_cpu(id->mw_cunits);
+
+ nvm_id->trdt = le32_to_cpu(id->trdt);
+ nvm_id->trdm = le32_to_cpu(id->trdm);
+ nvm_id->tprt = le32_to_cpu(id->twrt);
+ nvm_id->tprm = le32_to_cpu(id->twrm);
+ nvm_id->tbet = le32_to_cpu(id->tcrst);
+ nvm_id->tbem = le32_to_cpu(id->tcrsm);
+
+ /* calculated values */
+ nvm_id->ws_per_chk = nvm_id->clba / nvm_id->ws_min;
+
+ /* 1.2 compatibility */
+ nvm_id->ws_seq = NVM_IO_SNGL_ACCESS;
+
+ return 0;
+}
+
static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id)
{
struct nvme_ns *ns = nvmdev->q->queuedata;
@@ -277,14 +374,24 @@ static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id)
goto out;
}

- nvm_id->ver_id = id->ver_id;
- nvm_id->vmnt = id->vmnt;
- nvm_id->cap = le32_to_cpu(id->cap);
- nvm_id->dom = le32_to_cpu(id->dom);
- memcpy(&nvm_id->ppaf, &id->ppaf,
- sizeof(struct nvm_addr_format));
-
- ret = init_grp(nvm_id, id);
+ /*
+ * The 1.2 and 2.0 specifications share the first byte in their geometry
+ * command to make it possible to know what version a device implements.
+ */
+ switch (id->ver_id) {
+ case 1:
+ ret = nvme_nvm_setup_12(nvmdev, nvm_id, id);
+ break;
+ case 2:
+ ret = nvme_nvm_setup_20(nvmdev, nvm_id,
+ (struct nvme_nvm_id20 *)id);
+ break;
+ default:
+ dev_err(ns->ctrl->device,
+ "OCSSD revision not supported (%d)\n",
+ nvm_id->ver_id);
+ ret = -EINVAL;
+ }
out:
kfree(id);
return ret;
@@ -733,7 +840,7 @@ void nvme_nvm_unregister(struct nvme_ns *ns)
}

static ssize_t nvm_dev_attr_show(struct device *dev,
- struct device_attribute *dattr, char *page)
+ struct device_attribute *dattr, char *page)
{
struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
struct nvm_dev *ndev = ns->ndev;
@@ -748,10 +855,36 @@ static ssize_t nvm_dev_attr_show(struct device *dev,

if (strcmp(attr->name, "version") == 0) {
return scnprintf(page, PAGE_SIZE, "%u\n", id->ver_id);
- } else if (strcmp(attr->name, "vendor_opcode") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", id->vmnt);
} else if (strcmp(attr->name, "capabilities") == 0) {
return scnprintf(page, PAGE_SIZE, "%u\n", id->cap);
+ } else if (strcmp(attr->name, "read_typ") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->trdt);
+ } else if (strcmp(attr->name, "read_max") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->trdm);
+ } else {
+ return scnprintf(page,
+ PAGE_SIZE,
+ "Unhandled attr(%s) in `nvm_dev_attr_show`\n",
+ attr->name);
+ }
+}
+
+static ssize_t nvm_dev_attr_show_12(struct device *dev,
+ struct device_attribute *dattr, char *page)
+{
+ struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
+ struct nvm_dev *ndev = ns->ndev;
+ struct nvm_id *id;
+ struct attribute *attr;
+
+ if (!ndev)
+ return 0;
+
+ id = &ndev->identity;
+ attr = &dattr->attr;
+
+ if (strcmp(attr->name, "vendor_opcode") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->vmnt);
} else if (strcmp(attr->name, "device_mode") == 0) {
return scnprintf(page, PAGE_SIZE, "%u\n", id->dom);
/* kept for compatibility */
@@ -786,10 +919,6 @@ static ssize_t nvm_dev_attr_show(struct device *dev,
return scnprintf(page, PAGE_SIZE, "%u\n", id->csecs);
} else if (strcmp(attr->name, "oob_sector_size") == 0) {/* u32 */
return scnprintf(page, PAGE_SIZE, "%u\n", id->sos);
- } else if (strcmp(attr->name, "read_typ") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", id->trdt);
- } else if (strcmp(attr->name, "read_max") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", id->trdm);
} else if (strcmp(attr->name, "prog_typ") == 0) {
return scnprintf(page, PAGE_SIZE, "%u\n", id->tprt);
} else if (strcmp(attr->name, "prog_max") == 0) {
@@ -808,48 +937,99 @@ static ssize_t nvm_dev_attr_show(struct device *dev,
} else {
return scnprintf(page,
PAGE_SIZE,
- "Unhandled attr(%s) in `nvm_dev_attr_show`\n",
+ "Unhandled attr(%s) in `nvm_dev_attr_show_12`\n",
attr->name);
}
}

-#define NVM_DEV_ATTR_RO(_name) \
+static ssize_t nvm_dev_attr_show_20(struct device *dev,
+ struct device_attribute *dattr, char *page)
+{
+ struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
+ struct nvm_dev *ndev = ns->ndev;
+ struct nvm_id *id;
+ struct attribute *attr;
+
+ if (!ndev)
+ return 0;
+
+ id = &ndev->identity;
+ attr = &dattr->attr;
+
+ if (strcmp(attr->name, "groups") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->num_ch);
+ } else if (strcmp(attr->name, "punits") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->num_lun);
+ } else if (strcmp(attr->name, "chunks") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->num_chk);
+ } else if (strcmp(attr->name, "clba") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->clba);
+ } else if (strcmp(attr->name, "ws_min") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->ws_min);
+ } else if (strcmp(attr->name, "ws_opt") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->ws_opt);
+ } else if (strcmp(attr->name, "mw_cunits") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->mw_cunits);
+ } else if (strcmp(attr->name, "write_typ") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->tprt);
+ } else if (strcmp(attr->name, "write_max") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->tprm);
+ } else if (strcmp(attr->name, "reset_typ") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->tbet);
+ } else if (strcmp(attr->name, "reset_max") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->tbem);
+ } else {
+ return scnprintf(page,
+ PAGE_SIZE,
+ "Unhandled attr(%s) in `nvm_dev_attr_show_20`\n",
+ attr->name);
+ }
+}
+
+#define NVM_DEV_ATTR_RO(_name) \
DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show, NULL)
+#define NVM_DEV_ATTR_12_RO(_name) \
+ DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show_12, NULL)
+#define NVM_DEV_ATTR_20_RO(_name) \
+ DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show_20, NULL)

+/* general attributes */
static NVM_DEV_ATTR_RO(version);
-static NVM_DEV_ATTR_RO(vendor_opcode);
static NVM_DEV_ATTR_RO(capabilities);
-static NVM_DEV_ATTR_RO(device_mode);
-static NVM_DEV_ATTR_RO(ppa_format);
-static NVM_DEV_ATTR_RO(media_manager);

-static NVM_DEV_ATTR_RO(media_type);
-static NVM_DEV_ATTR_RO(flash_media_type);
-static NVM_DEV_ATTR_RO(num_channels);
-static NVM_DEV_ATTR_RO(num_luns);
-static NVM_DEV_ATTR_RO(num_planes);
-static NVM_DEV_ATTR_RO(num_blocks);
-static NVM_DEV_ATTR_RO(num_pages);
-static NVM_DEV_ATTR_RO(page_size);
-static NVM_DEV_ATTR_RO(hw_sector_size);
-static NVM_DEV_ATTR_RO(oob_sector_size);
static NVM_DEV_ATTR_RO(read_typ);
static NVM_DEV_ATTR_RO(read_max);
-static NVM_DEV_ATTR_RO(prog_typ);
-static NVM_DEV_ATTR_RO(prog_max);
-static NVM_DEV_ATTR_RO(erase_typ);
-static NVM_DEV_ATTR_RO(erase_max);
-static NVM_DEV_ATTR_RO(multiplane_modes);
-static NVM_DEV_ATTR_RO(media_capabilities);
-static NVM_DEV_ATTR_RO(max_phys_secs);

-static struct attribute *nvm_dev_attrs[] = {
+/* 1.2 values */
+static NVM_DEV_ATTR_12_RO(vendor_opcode);
+static NVM_DEV_ATTR_12_RO(device_mode);
+static NVM_DEV_ATTR_12_RO(ppa_format);
+static NVM_DEV_ATTR_12_RO(media_manager);
+static NVM_DEV_ATTR_12_RO(media_type);
+static NVM_DEV_ATTR_12_RO(flash_media_type);
+static NVM_DEV_ATTR_12_RO(num_channels);
+static NVM_DEV_ATTR_12_RO(num_luns);
+static NVM_DEV_ATTR_12_RO(num_planes);
+static NVM_DEV_ATTR_12_RO(num_blocks);
+static NVM_DEV_ATTR_12_RO(num_pages);
+static NVM_DEV_ATTR_12_RO(page_size);
+static NVM_DEV_ATTR_12_RO(hw_sector_size);
+static NVM_DEV_ATTR_12_RO(oob_sector_size);
+static NVM_DEV_ATTR_12_RO(prog_typ);
+static NVM_DEV_ATTR_12_RO(prog_max);
+static NVM_DEV_ATTR_12_RO(erase_typ);
+static NVM_DEV_ATTR_12_RO(erase_max);
+static NVM_DEV_ATTR_12_RO(multiplane_modes);
+static NVM_DEV_ATTR_12_RO(media_capabilities);
+static NVM_DEV_ATTR_12_RO(max_phys_secs);
+
+static struct attribute *nvm_dev_attrs_12[] = {
&dev_attr_version.attr,
+ &dev_attr_capabilities.attr,
+
&dev_attr_vendor_opcode.attr,
- &dev_attr_capabilities.attr,
&dev_attr_device_mode.attr,
&dev_attr_media_manager.attr,
-
&dev_attr_ppa_format.attr,
&dev_attr_media_type.attr,
&dev_attr_flash_media_type.attr,
@@ -870,22 +1050,82 @@ static struct attribute *nvm_dev_attrs[] = {
&dev_attr_multiplane_modes.attr,
&dev_attr_media_capabilities.attr,
&dev_attr_max_phys_secs.attr,
+
+ NULL,
+};
+
+static const struct attribute_group nvm_dev_attr_group_12 = {
+ .name = "lightnvm",
+ .attrs = nvm_dev_attrs_12,
+};
+
+/* 2.0 values */
+static NVM_DEV_ATTR_20_RO(groups);
+static NVM_DEV_ATTR_20_RO(punits);
+static NVM_DEV_ATTR_20_RO(chunks);
+static NVM_DEV_ATTR_20_RO(clba);
+static NVM_DEV_ATTR_20_RO(ws_min);
+static NVM_DEV_ATTR_20_RO(ws_opt);
+static NVM_DEV_ATTR_20_RO(mw_cunits);
+static NVM_DEV_ATTR_20_RO(write_typ);
+static NVM_DEV_ATTR_20_RO(write_max);
+static NVM_DEV_ATTR_20_RO(reset_typ);
+static NVM_DEV_ATTR_20_RO(reset_max);
+
+static struct attribute *nvm_dev_attrs_20[] = {
+ &dev_attr_version.attr,
+ &dev_attr_capabilities.attr,
+
+ &dev_attr_groups.attr,
+ &dev_attr_punits.attr,
+ &dev_attr_chunks.attr,
+ &dev_attr_clba.attr,
+ &dev_attr_ws_min.attr,
+ &dev_attr_ws_opt.attr,
+ &dev_attr_mw_cunits.attr,
+
+ &dev_attr_read_typ.attr,
+ &dev_attr_read_max.attr,
+ &dev_attr_write_typ.attr,
+ &dev_attr_write_max.attr,
+ &dev_attr_reset_typ.attr,
+ &dev_attr_reset_max.attr,
+
NULL,
};

-static const struct attribute_group nvm_dev_attr_group = {
+static const struct attribute_group nvm_dev_attr_group_20 = {
.name = "lightnvm",
- .attrs = nvm_dev_attrs,
+ .attrs = nvm_dev_attrs_20,
};

int nvme_nvm_register_sysfs(struct nvme_ns *ns)
{
- return sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
- &nvm_dev_attr_group);
+ if (!ns->ndev)
+ return -EINVAL;
+
+ switch (ns->ndev->identity.ver_id) {
+ case 1:
+ return sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
+ &nvm_dev_attr_group_12);
+ case 2:
+ return sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
+ &nvm_dev_attr_group_20);
+ }
+
+ return -EINVAL;
}

void nvme_nvm_unregister_sysfs(struct nvme_ns *ns)
{
- sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
- &nvm_dev_attr_group);
+ switch (ns->ndev->identity.ver_id) {
+ case 1:
+ sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
+ &nvm_dev_attr_group_12);
+ break;
+ case 2:
+ sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
+ &nvm_dev_attr_group_20);
+ break;
+ }
}
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 94b704a8d83d..b717c000b712 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -184,10 +184,9 @@ struct nvm_id {
u16 csecs;
u16 sos;

- u16 ws_min;
- u16 ws_opt;
- u16 ws_seq;
- u16 ws_per_chk;
+ u32 ws_min;
+ u32 ws_opt;
+ u32 mw_cunits;

u32 trdt;
u32 trdm;
@@ -199,6 +198,10 @@ struct nvm_id {
u32 mccap;
u16 cpar;

+ /* calculated values */
+ u16 ws_seq;
+ u16 ws_per_chk;
+
/* 1.2 compatibility */
u8 mtype;
u8 fmtype;
--
2.11.0


2018-02-05 12:18:16

by Matias Bjørling

[permalink] [raw]
Subject: [PATCH 4/4] nvme: lightnvm: add late setup of block size and metadata

The nvme driver sets up the size of the nvme namespace in two steps.
First it initializes the device with standard logical block and
metadata sizes, and then sets the correct logical block and metadata
size. Due to the OCSSD 2.0 specification relies on the namespace to
expose these sizes for correct initialization, let it be updated
appropriately on the LightNVM side as well.

Signed-off-by: Matias Bjørling <[email protected]>
---
drivers/nvme/host/core.c | 2 ++
drivers/nvme/host/lightnvm.c | 8 ++++++++
drivers/nvme/host/nvme.h | 2 ++
3 files changed, 12 insertions(+)

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index f837d666cbd4..740ceb28067c 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1379,6 +1379,8 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
if (ns->noiob)
nvme_set_chunk_size(ns);
nvme_update_disk_info(disk, ns, id);
+ if (ns->ndev)
+ nvme_nvm_update_nvm_info(ns);
#ifdef CONFIG_NVME_MULTIPATH
if (ns->head->disk)
nvme_update_disk_info(ns->head->disk, ns, id);
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index a9c010655ccc..8d4301854811 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -814,6 +814,14 @@ int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg)
}
}

+void nvme_nvm_update_nvm_info(struct nvme_ns *ns)
+{
+ struct nvm_dev *ndev = ns->ndev;
+
+ ndev->identity.csecs = ndev->geo.sec_size = 1 << ns->lba_shift;
+ ndev->identity.sos = ndev->geo.oob_size = ns->ms;
+}
+
int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node)
{
struct request_queue *q = ns->queue;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index ea1aa5283e8e..1ca08f4993ba 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -451,12 +451,14 @@ static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
#endif /* CONFIG_NVME_MULTIPATH */

#ifdef CONFIG_NVM
+void nvme_nvm_update_nvm_info(struct nvme_ns *ns);
int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
void nvme_nvm_unregister(struct nvme_ns *ns);
int nvme_nvm_register_sysfs(struct nvme_ns *ns);
void nvme_nvm_unregister_sysfs(struct nvme_ns *ns);
int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg);
#else
+static inline void nvme_nvm_update_nvm_info(struct nvme_ns *ns) {};
static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name,
int node)
{
--
2.11.0


2018-02-05 12:18:38

by Matias Bjørling

[permalink] [raw]
Subject: [PATCH 2/4] lightnvm: flatten nvm_id_group into nvm_id

There are no groups in the 2.0 specification, make sure that the
nvm_id structure is flattened before 2.0 data structures are added.

Signed-off-by: Matias Bjørling <[email protected]>
---
drivers/lightnvm/core.c | 25 ++++++-----
drivers/nvme/host/lightnvm.c | 100 +++++++++++++++++++++----------------------
include/linux/lightnvm.h | 53 +++++++++++------------
3 files changed, 86 insertions(+), 92 deletions(-)

diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index dcc9e621e651..c72863b36439 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -851,33 +851,32 @@ EXPORT_SYMBOL(nvm_get_tgt_bb_tbl);
static int nvm_core_init(struct nvm_dev *dev)
{
struct nvm_id *id = &dev->identity;
- struct nvm_id_group *grp = &id->grp;
struct nvm_geo *geo = &dev->geo;
int ret;

memcpy(&geo->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));

- if (grp->mtype != 0) {
+ if (id->mtype != 0) {
pr_err("nvm: memory type not supported\n");
return -EINVAL;
}

/* Whole device values */
- geo->nr_chnls = grp->num_ch;
- geo->nr_luns = grp->num_lun;
+ geo->nr_chnls = id->num_ch;
+ geo->nr_luns = id->num_lun;

/* Generic device geometry values */
- geo->ws_min = grp->ws_min;
- geo->ws_opt = grp->ws_opt;
- geo->ws_seq = grp->ws_seq;
- geo->ws_per_chk = grp->ws_per_chk;
- geo->nr_chks = grp->num_chk;
- geo->sec_size = grp->csecs;
- geo->oob_size = grp->sos;
- geo->mccap = grp->mccap;
+ geo->ws_min = id->ws_min;
+ geo->ws_opt = id->ws_opt;
+ geo->ws_seq = id->ws_seq;
+ geo->ws_per_chk = id->ws_per_chk;
+ geo->nr_chks = id->num_chk;
+ geo->sec_size = id->csecs;
+ geo->oob_size = id->sos;
+ geo->mccap = id->mccap;
geo->max_rq_size = dev->ops->max_phys_sect * geo->sec_size;

- geo->sec_per_chk = grp->clba;
+ geo->sec_per_chk = id->clba;
geo->sec_per_lun = geo->sec_per_chk * geo->nr_chks;
geo->all_luns = geo->nr_luns * geo->nr_chnls;

diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 60db3f1b59da..6412551ecc65 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -203,57 +203,55 @@ static inline void _nvme_nvm_check_size(void)
static int init_grp(struct nvm_id *nvm_id, struct nvme_nvm_id12 *id12)
{
struct nvme_nvm_id12_grp *src;
- struct nvm_id_group *grp;
int sec_per_pg, sec_per_pl, pg_per_blk;

if (id12->cgrps != 1)
return -EINVAL;

src = &id12->grp;
- grp = &nvm_id->grp;

- grp->mtype = src->mtype;
- grp->fmtype = src->fmtype;
+ nvm_id->mtype = src->mtype;
+ nvm_id->fmtype = src->fmtype;

- grp->num_ch = src->num_ch;
- grp->num_lun = src->num_lun;
+ nvm_id->num_ch = src->num_ch;
+ nvm_id->num_lun = src->num_lun;

- grp->num_chk = le16_to_cpu(src->num_chk);
- grp->csecs = le16_to_cpu(src->csecs);
- grp->sos = le16_to_cpu(src->sos);
+ nvm_id->num_chk = le16_to_cpu(src->num_chk);
+ nvm_id->csecs = le16_to_cpu(src->csecs);
+ nvm_id->sos = le16_to_cpu(src->sos);

pg_per_blk = le16_to_cpu(src->num_pg);
- sec_per_pg = le16_to_cpu(src->fpg_sz) / grp->csecs;
+ sec_per_pg = le16_to_cpu(src->fpg_sz) / nvm_id->csecs;
sec_per_pl = sec_per_pg * src->num_pln;
- grp->clba = sec_per_pl * pg_per_blk;
- grp->ws_per_chk = pg_per_blk;
+ nvm_id->clba = sec_per_pl * pg_per_blk;
+ nvm_id->ws_per_chk = pg_per_blk;

- grp->mpos = le32_to_cpu(src->mpos);
- grp->cpar = le16_to_cpu(src->cpar);
- grp->mccap = le32_to_cpu(src->mccap);
+ nvm_id->mpos = le32_to_cpu(src->mpos);
+ nvm_id->cpar = le16_to_cpu(src->cpar);
+ nvm_id->mccap = le32_to_cpu(src->mccap);

- grp->ws_opt = grp->ws_min = sec_per_pg;
- grp->ws_seq = NVM_IO_SNGL_ACCESS;
+ nvm_id->ws_opt = nvm_id->ws_min = sec_per_pg;
+ nvm_id->ws_seq = NVM_IO_SNGL_ACCESS;

- if (grp->mpos & 0x020202) {
- grp->ws_seq = NVM_IO_DUAL_ACCESS;
- grp->ws_opt <<= 1;
- } else if (grp->mpos & 0x040404) {
- grp->ws_seq = NVM_IO_QUAD_ACCESS;
- grp->ws_opt <<= 2;
+ if (nvm_id->mpos & 0x020202) {
+ nvm_id->ws_seq = NVM_IO_DUAL_ACCESS;
+ nvm_id->ws_opt <<= 1;
+ } else if (nvm_id->mpos & 0x040404) {
+ nvm_id->ws_seq = NVM_IO_QUAD_ACCESS;
+ nvm_id->ws_opt <<= 2;
}

- grp->trdt = le32_to_cpu(src->trdt);
- grp->trdm = le32_to_cpu(src->trdm);
- grp->tprt = le32_to_cpu(src->tprt);
- grp->tprm = le32_to_cpu(src->tprm);
- grp->tbet = le32_to_cpu(src->tbet);
- grp->tbem = le32_to_cpu(src->tbem);
+ nvm_id->trdt = le32_to_cpu(src->trdt);
+ nvm_id->trdm = le32_to_cpu(src->trdm);
+ nvm_id->tprt = le32_to_cpu(src->tprt);
+ nvm_id->tprm = le32_to_cpu(src->tprm);
+ nvm_id->tbet = le32_to_cpu(src->tbet);
+ nvm_id->tbem = le32_to_cpu(src->tbem);

/* 1.2 compatibility */
- grp->num_pln = src->num_pln;
- grp->num_pg = le16_to_cpu(src->num_pg);
- grp->fpg_sz = le16_to_cpu(src->fpg_sz);
+ nvm_id->num_pln = src->num_pln;
+ nvm_id->num_pg = le16_to_cpu(src->num_pg);
+ nvm_id->fpg_sz = le16_to_cpu(src->fpg_sz);

return 0;
}
@@ -740,14 +738,12 @@ static ssize_t nvm_dev_attr_show(struct device *dev,
struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
struct nvm_dev *ndev = ns->ndev;
struct nvm_id *id;
- struct nvm_id_group *grp;
struct attribute *attr;

if (!ndev)
return 0;

id = &ndev->identity;
- grp = &id->grp;
attr = &dattr->attr;

if (strcmp(attr->name, "version") == 0) {
@@ -771,41 +767,41 @@ static ssize_t nvm_dev_attr_show(struct device *dev,
id->ppaf.pg_offset, id->ppaf.pg_len,
id->ppaf.sect_offset, id->ppaf.sect_len);
} else if (strcmp(attr->name, "media_type") == 0) { /* u8 */
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->mtype);
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->mtype);
} else if (strcmp(attr->name, "flash_media_type") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->fmtype);
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->fmtype);
} else if (strcmp(attr->name, "num_channels") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_ch);
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->num_ch);
} else if (strcmp(attr->name, "num_luns") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_lun);
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->num_lun);
} else if (strcmp(attr->name, "num_planes") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pln);
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->num_pln);
} else if (strcmp(attr->name, "num_blocks") == 0) { /* u16 */
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_chk);
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->num_chk);
} else if (strcmp(attr->name, "num_pages") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pg);
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->num_pg);
} else if (strcmp(attr->name, "page_size") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->fpg_sz);
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->fpg_sz);
} else if (strcmp(attr->name, "hw_sector_size") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->csecs);
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->csecs);
} else if (strcmp(attr->name, "oob_sector_size") == 0) {/* u32 */
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->sos);
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->sos);
} else if (strcmp(attr->name, "read_typ") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdt);
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->trdt);
} else if (strcmp(attr->name, "read_max") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdm);
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->trdm);
} else if (strcmp(attr->name, "prog_typ") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprt);
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->tprt);
} else if (strcmp(attr->name, "prog_max") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprm);
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->tprm);
} else if (strcmp(attr->name, "erase_typ") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbet);
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->tbet);
} else if (strcmp(attr->name, "erase_max") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbem);
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->tbem);
} else if (strcmp(attr->name, "multiplane_modes") == 0) {
- return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mpos);
+ return scnprintf(page, PAGE_SIZE, "0x%08x\n", id->mpos);
} else if (strcmp(attr->name, "media_capabilities") == 0) {
- return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mccap);
+ return scnprintf(page, PAGE_SIZE, "0x%08x\n", id->mccap);
} else if (strcmp(attr->name, "max_phys_secs") == 0) {
return scnprintf(page, PAGE_SIZE, "%u\n",
ndev->ops->max_phys_sect);
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 7f4b60abdf27..94b704a8d83d 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -154,9 +154,29 @@ struct nvm_id_lp_tbl {
struct nvm_id_lp_mlc mlc;
};

-struct nvm_id_group {
- u8 mtype;
- u8 fmtype;
+struct nvm_addr_format {
+ u8 ch_offset;
+ u8 ch_len;
+ u8 lun_offset;
+ u8 lun_len;
+ u8 pln_offset;
+ u8 pln_len;
+ u8 blk_offset;
+ u8 blk_len;
+ u8 pg_offset;
+ u8 pg_len;
+ u8 sect_offset;
+ u8 sect_len;
+};
+
+struct nvm_id {
+ u8 ver_id;
+ u8 vmnt;
+ u32 cap;
+ u32 dom;
+
+ struct nvm_addr_format ppaf;
+
u8 num_ch;
u8 num_lun;
u16 num_chk;
@@ -180,33 +200,12 @@ struct nvm_id_group {
u16 cpar;

/* 1.2 compatibility */
+ u8 mtype;
+ u8 fmtype;
+
u8 num_pln;
u16 num_pg;
u16 fpg_sz;
-};
-
-struct nvm_addr_format {
- u8 ch_offset;
- u8 ch_len;
- u8 lun_offset;
- u8 lun_len;
- u8 pln_offset;
- u8 pln_len;
- u8 blk_offset;
- u8 blk_len;
- u8 pg_offset;
- u8 pg_len;
- u8 sect_offset;
- u8 sect_len;
-};
-
-struct nvm_id {
- u8 ver_id;
- u8 vmnt;
- u32 cap;
- u32 dom;
- struct nvm_addr_format ppaf;
- struct nvm_id_group grp;
} __packed;

struct nvm_target {
--
2.11.0


2018-02-05 18:05:42

by Randy Dunlap

[permalink] [raw]
Subject: Re: [PATCH 3/4] lightnvm: add 2.0 geometry identification

On 02/05/2018 04:15 AM, Matias Bjørling wrote:
> Implement the geometry data structures for 2.0 and enable a drive
> to be identified as one, including exposing the appropriate 2.0
> sysfs entries.
>
> Signed-off-by: Matias Bjørling <[email protected]>
> ---
> drivers/lightnvm/core.c | 2 +-
> drivers/nvme/host/lightnvm.c | 334 +++++++++++++++++++++++++++++++++++++------
> include/linux/lightnvm.h | 11 +-
> 3 files changed, 295 insertions(+), 52 deletions(-)
>
> diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
> index c72863b36439..250e74dfa120 100644
> --- a/drivers/lightnvm/core.c
> +++ b/drivers/lightnvm/core.c
> @@ -934,7 +934,7 @@ static int nvm_init(struct nvm_dev *dev)
> pr_debug("nvm: ver:%x nvm_vendor:%x\n",
> dev->identity.ver_id, dev->identity.vmnt);
>
> - if (dev->identity.ver_id != 1) {
> + if (dev->identity.ver_id != 1 && dev->identity.ver_id != 2) {
> pr_err("nvm: device not supported by kernel.");
> goto err;
> }

Hi,
The pr_err() above could be a bit more informative to the user. E.g.,
pr_err("nvm: device ver_id %d not supported by kernel.",
dev->identity.ver_id);

BTW, isn't that line missing a '\n'?

--
~Randy

2018-02-06 09:44:29

by Matias Bjørling

[permalink] [raw]
Subject: Re: [PATCH 3/4] lightnvm: add 2.0 geometry identification

On 02/05/2018 07:04 PM, Randy Dunlap wrote:
> On 02/05/2018 04:15 AM, Matias Bjørling wrote:
>> Implement the geometry data structures for 2.0 and enable a drive
>> to be identified as one, including exposing the appropriate 2.0
>> sysfs entries.
>>
>> Signed-off-by: Matias Bjørling <[email protected]>
>> ---
>> drivers/lightnvm/core.c | 2 +-
>> drivers/nvme/host/lightnvm.c | 334 +++++++++++++++++++++++++++++++++++++------
>> include/linux/lightnvm.h | 11 +-
>> 3 files changed, 295 insertions(+), 52 deletions(-)
>>
>> diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
>> index c72863b36439..250e74dfa120 100644
>> --- a/drivers/lightnvm/core.c
>> +++ b/drivers/lightnvm/core.c
>> @@ -934,7 +934,7 @@ static int nvm_init(struct nvm_dev *dev)
>> pr_debug("nvm: ver:%x nvm_vendor:%x\n",
>> dev->identity.ver_id, dev->identity.vmnt);
>>
>> - if (dev->identity.ver_id != 1) {
>> + if (dev->identity.ver_id != 1 && dev->identity.ver_id != 2) {
>> pr_err("nvm: device not supported by kernel.");
>> goto err;
>> }
>
> Hi,
> The pr_err() above could be a bit more informative to the user. E.g.,
> pr_err("nvm: device ver_id %d not supported by kernel.",
> dev->identity.ver_id);
>
> BTW, isn't that line missing a '\n'?
>

Good point! Thanks. I'll add it in.

2018-02-08 09:37:03

by Javier Gonzalez

[permalink] [raw]
Subject: Re: [PATCH 0/4] lightnvm: base 2.0 implementation

> On 5 Feb 2018, at 13.15, Matias Bjørling <[email protected]> wrote:
>
> Hi,
>
> A couple of patches for 2.0 support for the lightnvm subsystem. They
> form the basis for integrating 2.0 support.
>
> For the rest of the support, Javier has code that implements report
> chunk and sets up the LBA format data structure. He also has a bunch
> of patches that brings pblk up to speed.
>
> The first two patches is preparation for the 2.0 work. The third patch
> implements the 2.0 data structures, the geometry command, and exposes
> the sysfs attributes that comes with the 2.0 specification. Note that
> the attributes between 1.2 and 2.0 are different, and it is expected
> that user-space shall use the version sysfs attribute to know which
> attributes will be available.
>
> The last patch implements support for using the nvme namespace logical
> block and metadata fields and sync it with the internal lightnvm
> identify structures.
>
> -Matias
>
> Matias Bjørling (4):
> lightnvm: make 1.2 data structures explicit
> lightnvm: flatten nvm_id_group into nvm_id
> lightnvm: add 2.0 geometry identification
> nvme: lightnvm: add late setup of block size and metadata
>
> drivers/lightnvm/core.c | 27 ++-
> drivers/nvme/host/core.c | 2 +
> drivers/nvme/host/lightnvm.c | 508 ++++++++++++++++++++++++++++++++-----------
> drivers/nvme/host/nvme.h | 2 +
> include/linux/lightnvm.h | 64 +++---
> 5 files changed, 426 insertions(+), 177 deletions(-)
>
> --
> 2.11.0

Thanks for posting these. I have started rebasing my patches on top of
the new geometry - it is a bit different of how I implemented it, but
I'll take care of it.

I'll review as I go - some of the changes I have might make sense to
squash in your patches to keep a clean history...

I'll add a couple of patches abstracting the geometry so that at core.c
level we only work with a single geometry structure. This is they way it
is done in the early patches I pointe you to before. Then it is patches
building bottom-up support for the new features in 2.0.

Javier


Attachments:
signature.asc (849.00 B)
Message signed with OpenPGP

2018-02-08 09:44:24

by Matias Bjørling

[permalink] [raw]
Subject: Re: [PATCH 0/4] lightnvm: base 2.0 implementation

On 02/08/2018 10:35 AM, Javier Gonzalez wrote:
>> On 5 Feb 2018, at 13.15, Matias Bjørling <[email protected]> wrote:
>>
>> Hi,
>>
>> A couple of patches for 2.0 support for the lightnvm subsystem. They
>> form the basis for integrating 2.0 support.
>>
>> For the rest of the support, Javier has code that implements report
>> chunk and sets up the LBA format data structure. He also has a bunch
>> of patches that brings pblk up to speed.
>>
>> The first two patches is preparation for the 2.0 work. The third patch
>> implements the 2.0 data structures, the geometry command, and exposes
>> the sysfs attributes that comes with the 2.0 specification. Note that
>> the attributes between 1.2 and 2.0 are different, and it is expected
>> that user-space shall use the version sysfs attribute to know which
>> attributes will be available.
>>
>> The last patch implements support for using the nvme namespace logical
>> block and metadata fields and sync it with the internal lightnvm
>> identify structures.
>>
>> -Matias
>>
>> Matias Bjørling (4):
>> lightnvm: make 1.2 data structures explicit
>> lightnvm: flatten nvm_id_group into nvm_id
>> lightnvm: add 2.0 geometry identification
>> nvme: lightnvm: add late setup of block size and metadata
>>
>> drivers/lightnvm/core.c | 27 ++-
>> drivers/nvme/host/core.c | 2 +
>> drivers/nvme/host/lightnvm.c | 508 ++++++++++++++++++++++++++++++++-----------
>> drivers/nvme/host/nvme.h | 2 +
>> include/linux/lightnvm.h | 64 +++---
>> 5 files changed, 426 insertions(+), 177 deletions(-)
>>
>> --
>> 2.11.0
>
> Thanks for posting these. I have started rebasing my patches on top of
> the new geometry - it is a bit different of how I implemented it, but
> I'll take care of it.
>
> I'll review as I go - some of the changes I have might make sense to
> squash in your patches to keep a clean history...
>

Thanks.

> I'll add a couple of patches abstracting the geometry so that at core.c
> level we only work with a single geometry structure. This is they way it
> is done in the early patches I pointe you to before. Then it is patches
> building bottom-up support for the new features in 2.0.
>

Yep, I was expecting that. I skipped that part since it went into pblk
and you already had some patches for it.

> Javier
>


2018-02-08 13:36:11

by Javier Gonzalez

[permalink] [raw]
Subject: Re: [PATCH 3/4] lightnvm: add 2.0 geometry identification

> On 5 Feb 2018, at 13.15, Matias Bjørling <[email protected]> wrote:
>
> Implement the geometry data structures for 2.0 and enable a drive
> to be identified as one, including exposing the appropriate 2.0
> sysfs entries.
>
> Signed-off-by: Matias Bjørling <[email protected]>
> ---
> drivers/lightnvm/core.c | 2 +-
> drivers/nvme/host/lightnvm.c | 334 +++++++++++++++++++++++++++++++++++++------
> include/linux/lightnvm.h | 11 +-
> 3 files changed, 295 insertions(+), 52 deletions(-)
>
> diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
> index c72863b36439..250e74dfa120 100644
> --- a/drivers/lightnvm/core.c
> +++ b/drivers/lightnvm/core.c
> @@ -934,7 +934,7 @@ static int nvm_init(struct nvm_dev *dev)
> pr_debug("nvm: ver:%x nvm_vendor:%x\n",
> dev->identity.ver_id, dev->identity.vmnt);
>
> - if (dev->identity.ver_id != 1) {
> + if (dev->identity.ver_id != 1 && dev->identity.ver_id != 2) {
> pr_err("nvm: device not supported by kernel.");
> goto err;
> }
> diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
> index 6412551ecc65..a9c010655ccc 100644
> --- a/drivers/nvme/host/lightnvm.c
> +++ b/drivers/nvme/host/lightnvm.c
> @@ -184,6 +184,58 @@ struct nvme_nvm_bb_tbl {
> __u8 blk[0];
> };
>
> +struct nvme_nvm_id20_addrf {
> + __u8 grp_len;
> + __u8 pu_len;
> + __u8 chk_len;
> + __u8 lba_len;
> + __u8 resv[4];
> +};
> +
> +struct nvme_nvm_id20 {
> + __u8 mjr;
> + __u8 mnr;
> + __u8 resv[6];
> +
> + struct nvme_nvm_id20_addrf lbaf;
> +
> + __u32 mccap;
> + __u8 resv2[12];
> +
> + __u8 wit;
> + __u8 resv3[31];
> +
> + /* Geometry */
> + __u16 num_grp;
> + __u16 num_pu;
> + __u32 num_chk;
> + __u32 clba;
> + __u8 resv4[52];
> +
> + /* Write data requirements */
> + __u32 ws_min;
> + __u32 ws_opt;
> + __u32 mw_cunits;
> + __u32 maxoc;
> + __u32 maxocpu;
> + __u8 resv5[44];
> +
> + /* Performance related metrics */
> + __u32 trdt;
> + __u32 trdm;
> + __u32 twrt;
> + __u32 twrm;
> + __u32 tcrst;
> + __u32 tcrsm;
> + __u8 resv6[40];
> +
> + /* Reserved area */
> + __u8 resv7[2816];
> +
> + /* Vendor specific */
> + __u8 vs[1024];
> +};
>

All __u16, __u32 should be __le16, __le32

Javier


Attachments:
signature.asc (849.00 B)
Message signed with OpenPGP

2018-03-23 11:01:27

by Matias Bjørling

[permalink] [raw]
Subject: Re: [PATCH 4/4] nvme: lightnvm: add late setup of block size and metadata

On 02/05/2018 01:15 PM, Matias Bjørling wrote:
> The nvme driver sets up the size of the nvme namespace in two steps.
> First it initializes the device with standard logical block and
> metadata sizes, and then sets the correct logical block and metadata
> size. Due to the OCSSD 2.0 specification relies on the namespace to
> expose these sizes for correct initialization, let it be updated
> appropriately on the LightNVM side as well.
>
> Signed-off-by: Matias Bjørling <[email protected]>
> ---
> drivers/nvme/host/core.c | 2 ++
> drivers/nvme/host/lightnvm.c | 8 ++++++++
> drivers/nvme/host/nvme.h | 2 ++
> 3 files changed, 12 insertions(+)
>
> diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
> index f837d666cbd4..740ceb28067c 100644
> --- a/drivers/nvme/host/core.c
> +++ b/drivers/nvme/host/core.c
> @@ -1379,6 +1379,8 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
> if (ns->noiob)
> nvme_set_chunk_size(ns);
> nvme_update_disk_info(disk, ns, id);
> + if (ns->ndev)
> + nvme_nvm_update_nvm_info(ns);
> #ifdef CONFIG_NVME_MULTIPATH
> if (ns->head->disk)
> nvme_update_disk_info(ns->head->disk, ns, id);
> diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
> index a9c010655ccc..8d4301854811 100644
> --- a/drivers/nvme/host/lightnvm.c
> +++ b/drivers/nvme/host/lightnvm.c
> @@ -814,6 +814,14 @@ int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg)
> }
> }
>
> +void nvme_nvm_update_nvm_info(struct nvme_ns *ns)
> +{
> + struct nvm_dev *ndev = ns->ndev;
> +
> + ndev->identity.csecs = ndev->geo.sec_size = 1 << ns->lba_shift;
> + ndev->identity.sos = ndev->geo.oob_size = ns->ms;
> +}
> +
> int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node)
> {
> struct request_queue *q = ns->queue;
> diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
> index ea1aa5283e8e..1ca08f4993ba 100644
> --- a/drivers/nvme/host/nvme.h
> +++ b/drivers/nvme/host/nvme.h
> @@ -451,12 +451,14 @@ static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
> #endif /* CONFIG_NVME_MULTIPATH */
>
> #ifdef CONFIG_NVM
> +void nvme_nvm_update_nvm_info(struct nvme_ns *ns);
> int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
> void nvme_nvm_unregister(struct nvme_ns *ns);
> int nvme_nvm_register_sysfs(struct nvme_ns *ns);
> void nvme_nvm_unregister_sysfs(struct nvme_ns *ns);
> int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg);
> #else
> +static inline void nvme_nvm_update_nvm_info(struct nvme_ns *ns) {};
> static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name,
> int node)
> {
>

Hi Keith,

When going through the patches for 4.17, I forgot to run this patch by
you. It is part of adding OCSSD2.0 support to the kernel, and slides in
between a large refactoring, and the 2.0 part. May I add your reviewed
by and let Jens pick it up after the nvme patches for 4.17 has gone up?

Thanks!

-Matias

2018-03-28 08:29:55

by Christoph Hellwig

[permalink] [raw]
Subject: Re: [PATCH 4/4] nvme: lightnvm: add late setup of block size and metadata

I really don't want more lightnvm cruft in the core. We'll need
a proper abstraction.c

On Fri, Mar 23, 2018 at 12:00:08PM +0100, Matias Bj?rling wrote:
> On 02/05/2018 01:15 PM, Matias Bj?rling wrote:
> > The nvme driver sets up the size of the nvme namespace in two steps.
> > First it initializes the device with standard logical block and
> > metadata sizes, and then sets the correct logical block and metadata
> > size. Due to the OCSSD 2.0 specification relies on the namespace to
> > expose these sizes for correct initialization, let it be updated
> > appropriately on the LightNVM side as well.
> >
> > Signed-off-by: Matias Bj?rling <[email protected]>
> > ---
> > drivers/nvme/host/core.c | 2 ++
> > drivers/nvme/host/lightnvm.c | 8 ++++++++
> > drivers/nvme/host/nvme.h | 2 ++
> > 3 files changed, 12 insertions(+)
> >
> > diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
> > index f837d666cbd4..740ceb28067c 100644
> > --- a/drivers/nvme/host/core.c
> > +++ b/drivers/nvme/host/core.c
> > @@ -1379,6 +1379,8 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
> > if (ns->noiob)
> > nvme_set_chunk_size(ns);
> > nvme_update_disk_info(disk, ns, id);
> > + if (ns->ndev)
> > + nvme_nvm_update_nvm_info(ns);
> > #ifdef CONFIG_NVME_MULTIPATH
> > if (ns->head->disk)
> > nvme_update_disk_info(ns->head->disk, ns, id);
> > diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
> > index a9c010655ccc..8d4301854811 100644
> > --- a/drivers/nvme/host/lightnvm.c
> > +++ b/drivers/nvme/host/lightnvm.c
> > @@ -814,6 +814,14 @@ int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg)
> > }
> > }
> > +void nvme_nvm_update_nvm_info(struct nvme_ns *ns)
> > +{
> > + struct nvm_dev *ndev = ns->ndev;
> > +
> > + ndev->identity.csecs = ndev->geo.sec_size = 1 << ns->lba_shift;
> > + ndev->identity.sos = ndev->geo.oob_size = ns->ms;
> > +}
> > +
> > int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node)
> > {
> > struct request_queue *q = ns->queue;
> > diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
> > index ea1aa5283e8e..1ca08f4993ba 100644
> > --- a/drivers/nvme/host/nvme.h
> > +++ b/drivers/nvme/host/nvme.h
> > @@ -451,12 +451,14 @@ static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
> > #endif /* CONFIG_NVME_MULTIPATH */
> > #ifdef CONFIG_NVM
> > +void nvme_nvm_update_nvm_info(struct nvme_ns *ns);
> > int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
> > void nvme_nvm_unregister(struct nvme_ns *ns);
> > int nvme_nvm_register_sysfs(struct nvme_ns *ns);
> > void nvme_nvm_unregister_sysfs(struct nvme_ns *ns);
> > int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg);
> > #else
> > +static inline void nvme_nvm_update_nvm_info(struct nvme_ns *ns) {};
> > static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name,
> > int node)
> > {
> >
>
> Hi Keith,
>
> When going through the patches for 4.17, I forgot to run this patch by you.
> It is part of adding OCSSD2.0 support to the kernel, and slides in between a
> large refactoring, and the 2.0 part. May I add your reviewed by and let Jens
> pick it up after the nvme patches for 4.17 has gone up?
>
> Thanks!
>
> -Matias
>
> _______________________________________________
> Linux-nvme mailing list
> [email protected]
> http://lists.infradead.org/mailman/listinfo/linux-nvme
---end quoted text---

2018-03-28 12:38:03

by Matias Bjørling

[permalink] [raw]
Subject: Re: [PATCH 4/4] nvme: lightnvm: add late setup of block size and metadata

On 28/03/2018 10.28, Christoph Hellwig wrote:
> I really don't want more lightnvm cruft in the core. We'll need
> a proper abstraction.c
>

I agree, we should get that moving, and make a proper abstraction for
it. Also with respect to how an SMR interface in general is integrated
into NVMe.

The patch is necessary due to the revalidate event for retrieving the
lba format from the nvme drive. It might not be available the first time
around, and we have to hook in when the drive is revalidated the second
time.

> On Fri, Mar 23, 2018 at 12:00:08PM +0100, Matias Bjørling wrote:
>> On 02/05/2018 01:15 PM, Matias Bjørling wrote:
>>> The nvme driver sets up the size of the nvme namespace in two steps.
>>> First it initializes the device with standard logical block and
>>> metadata sizes, and then sets the correct logical block and metadata
>>> size. Due to the OCSSD 2.0 specification relies on the namespace to
>>> expose these sizes for correct initialization, let it be updated
>>> appropriately on the LightNVM side as well.
>>>
>>> Signed-off-by: Matias Bjørling <[email protected]>
>>> ---
>>> drivers/nvme/host/core.c | 2 ++
>>> drivers/nvme/host/lightnvm.c | 8 ++++++++
>>> drivers/nvme/host/nvme.h | 2 ++
>>> 3 files changed, 12 insertions(+)
>>>
>>> diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
>>> index f837d666cbd4..740ceb28067c 100644
>>> --- a/drivers/nvme/host/core.c
>>> +++ b/drivers/nvme/host/core.c
>>> @@ -1379,6 +1379,8 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
>>> if (ns->noiob)
>>> nvme_set_chunk_size(ns);
>>> nvme_update_disk_info(disk, ns, id);
>>> + if (ns->ndev)
>>> + nvme_nvm_update_nvm_info(ns);
>>> #ifdef CONFIG_NVME_MULTIPATH
>>> if (ns->head->disk)
>>> nvme_update_disk_info(ns->head->disk, ns, id);
>>> diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
>>> index a9c010655ccc..8d4301854811 100644
>>> --- a/drivers/nvme/host/lightnvm.c
>>> +++ b/drivers/nvme/host/lightnvm.c
>>> @@ -814,6 +814,14 @@ int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg)
>>> }
>>> }
>>> +void nvme_nvm_update_nvm_info(struct nvme_ns *ns)
>>> +{
>>> + struct nvm_dev *ndev = ns->ndev;
>>> +
>>> + ndev->identity.csecs = ndev->geo.sec_size = 1 << ns->lba_shift;
>>> + ndev->identity.sos = ndev->geo.oob_size = ns->ms;
>>> +}
>>> +
>>> int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node)
>>> {
>>> struct request_queue *q = ns->queue;
>>> diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
>>> index ea1aa5283e8e..1ca08f4993ba 100644
>>> --- a/drivers/nvme/host/nvme.h
>>> +++ b/drivers/nvme/host/nvme.h
>>> @@ -451,12 +451,14 @@ static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
>>> #endif /* CONFIG_NVME_MULTIPATH */
>>> #ifdef CONFIG_NVM
>>> +void nvme_nvm_update_nvm_info(struct nvme_ns *ns);
>>> int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
>>> void nvme_nvm_unregister(struct nvme_ns *ns);
>>> int nvme_nvm_register_sysfs(struct nvme_ns *ns);
>>> void nvme_nvm_unregister_sysfs(struct nvme_ns *ns);
>>> int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg);
>>> #else
>>> +static inline void nvme_nvm_update_nvm_info(struct nvme_ns *ns) {};
>>> static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name,
>>> int node)
>>> {
>>>
>>
>> Hi Keith,
>>
>> When going through the patches for 4.17, I forgot to run this patch by you.
>> It is part of adding OCSSD2.0 support to the kernel, and slides in between a
>> large refactoring, and the 2.0 part. May I add your reviewed by and let Jens
>> pick it up after the nvme patches for 4.17 has gone up?
>>
>> Thanks!
>>
>> -Matias
>>
>> _______________________________________________
>> Linux-nvme mailing list
>> [email protected]
>> http://lists.infradead.org/mailman/listinfo/linux-nvme
> ---end quoted text---
>