Make the nvme code more uniform by initializing struct members at
declaration time. This change is done both in drivers/nvme/host/ and
drivers/nvme/target/.
This is how the design pattern was in nvme, before workarounds for a gcc
bug were introduced in commit e44ac588cd61 ("drivers/block/nvme-core.c:
fix build with gcc-4.4.4").
Since the minimum gcc version needed to build the kernel is now gcc 4.8.0,
which does not have this bug, revert to the previous design pattern,
which matches how the rest of the nvme code handles initialization
of struct members (excluding the cases where anonymous unions were
involved).
If, for some reason, we want to allow builds with gcc < 4.6.0
even though the minimum gcc version is now 4.8.0,
there is another less intrusive workaround where you add an extra pair of
curly braces, see e.g. commit 6cc65be4f6f2 ("locking/qspinlock: Fix build
for anonymous union in older GCC compilers").
Changes since v1:
-Fixed RDMA build error.
Niklas Cassel (2):
nvme: remove workarounds for gcc bug wrt unnamed fields in
initializers
nvmet: remove workarounds for gcc bug wrt unnamed fields in
initializers
drivers/nvme/host/core.c | 59 ++++++++++++++++++------------------
drivers/nvme/host/lightnvm.c | 32 +++++++++----------
drivers/nvme/host/rdma.c | 28 ++++++++---------
drivers/nvme/target/rdma.c | 23 +++++++-------
4 files changed, 71 insertions(+), 71 deletions(-)
--
2.26.2
Workarounds for gcc issues with initializers and anon unions was first
introduced in commit e44ac588cd61 ("drivers/block/nvme-core.c: fix build
with gcc-4.4.4").
The gcc bug in question has been fixed since gcc 4.6.0:
https://gcc.gnu.org/bugzilla/show_bug.cgi?id=10676
The minimum gcc version for building the kernel has been 4.6.0 since
commit cafa0010cd51 ("Raise the minimum required gcc version to 4.6"),
and has since been updated to gcc 4.8.0 in
commit 5429ef62bcf3 ("compiler/gcc: Raise minimum GCC version for
kernel builds to 4.8").
For that reason, it should now be safe to remove these workarounds
and make the code look like it did before
commit e44ac588cd61 ("drivers/block/nvme-core.c: fix build with gcc-4.4.4")
was introduced.
Signed-off-by: Niklas Cassel <[email protected]>
---
drivers/nvme/host/core.c | 59 ++++++++++++++++++------------------
drivers/nvme/host/lightnvm.c | 32 +++++++++----------
drivers/nvme/host/rdma.c | 28 ++++++++---------
3 files changed, 59 insertions(+), 60 deletions(-)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 9491dbcfe81a..99059340d723 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1038,13 +1038,12 @@ static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl)
static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
{
- struct nvme_command c = { };
+ struct nvme_command c = {
+ .identify.opcode = nvme_admin_identify,
+ .identify.cns = NVME_ID_CNS_CTRL,
+ };
int error;
- /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
- c.identify.opcode = nvme_admin_identify;
- c.identify.cns = NVME_ID_CNS_CTRL;
-
*id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
if (!*id)
return -ENOMEM;
@@ -1096,16 +1095,16 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
struct nvme_ns_ids *ids)
{
- struct nvme_command c = { };
+ struct nvme_command c = {
+ .identify.opcode = nvme_admin_identify,
+ .identify.nsid = cpu_to_le32(nsid),
+ .identify.cns = NVME_ID_CNS_NS_DESC_LIST,
+ };
int status;
void *data;
int pos;
int len;
- c.identify.opcode = nvme_admin_identify;
- c.identify.nsid = cpu_to_le32(nsid);
- c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;
-
data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -1143,11 +1142,12 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list)
{
- struct nvme_command c = { };
+ struct nvme_command c = {
+ .identify.opcode = nvme_admin_identify,
+ .identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST,
+ .identify.nsid = cpu_to_le32(nsid),
+ };
- c.identify.opcode = nvme_admin_identify;
- c.identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST;
- c.identify.nsid = cpu_to_le32(nsid);
return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list,
NVME_IDENTIFY_DATA_SIZE);
}
@@ -1155,14 +1155,13 @@ static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *n
static int nvme_identify_ns(struct nvme_ctrl *ctrl,
unsigned nsid, struct nvme_id_ns **id)
{
- struct nvme_command c = { };
+ struct nvme_command c = {
+ .identify.opcode = nvme_admin_identify,
+ .identify.nsid = cpu_to_le32(nsid),
+ .identify.cns = NVME_ID_CNS_NS,
+ };
int error;
- /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
- c.identify.opcode = nvme_admin_identify;
- c.identify.nsid = cpu_to_le32(nsid);
- c.identify.cns = NVME_ID_CNS_NS;
-
*id = kmalloc(sizeof(**id), GFP_KERNEL);
if (!*id)
return -ENOMEM;
@@ -2815,17 +2814,17 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp,
void *log, size_t size, u64 offset)
{
- struct nvme_command c = { };
u32 dwlen = nvme_bytes_to_numd(size);
-
- c.get_log_page.opcode = nvme_admin_get_log_page;
- c.get_log_page.nsid = cpu_to_le32(nsid);
- c.get_log_page.lid = log_page;
- c.get_log_page.lsp = lsp;
- c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1));
- c.get_log_page.numdu = cpu_to_le16(dwlen >> 16);
- c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset));
- c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset));
+ struct nvme_command c = {
+ .get_log_page.opcode = nvme_admin_get_log_page,
+ .get_log_page.nsid = cpu_to_le32(nsid),
+ .get_log_page.lid = log_page,
+ .get_log_page.lsp = lsp,
+ .get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1)),
+ .get_log_page.numdu = cpu_to_le16(dwlen >> 16),
+ .get_log_page.lpol = cpu_to_le32(lower_32_bits(offset)),
+ .get_log_page.lpou = cpu_to_le32(upper_32_bits(offset)),
+ };
return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size);
}
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 69608755d415..7c44eca78f0d 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -432,12 +432,12 @@ static int nvme_nvm_identity(struct nvm_dev *nvmdev)
{
struct nvme_ns *ns = nvmdev->q->queuedata;
struct nvme_nvm_id12 *id;
- struct nvme_nvm_command c = {};
+ struct nvme_nvm_command c = {
+ .identity.opcode = nvme_nvm_admin_identity,
+ .identity.nsid = cpu_to_le32(ns->head->ns_id),
+ };
int ret;
- c.identity.opcode = nvme_nvm_admin_identity;
- c.identity.nsid = cpu_to_le32(ns->head->ns_id);
-
id = kmalloc(sizeof(struct nvme_nvm_id12), GFP_KERNEL);
if (!id)
return -ENOMEM;
@@ -479,16 +479,16 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
struct nvm_geo *geo = &nvmdev->geo;
struct nvme_ns *ns = q->queuedata;
struct nvme_ctrl *ctrl = ns->ctrl;
- struct nvme_nvm_command c = {};
+ struct nvme_nvm_command c = {
+ .get_bb.opcode = nvme_nvm_admin_get_bb_tbl,
+ .get_bb.nsid = cpu_to_le32(ns->head->ns_id),
+ .get_bb.spba = cpu_to_le64(ppa.ppa),
+ };
struct nvme_nvm_bb_tbl *bb_tbl;
int nr_blks = geo->num_chk * geo->num_pln;
int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks;
int ret = 0;
- c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
- c.get_bb.nsid = cpu_to_le32(ns->head->ns_id);
- c.get_bb.spba = cpu_to_le64(ppa.ppa);
-
bb_tbl = kzalloc(tblsz, GFP_KERNEL);
if (!bb_tbl)
return -ENOMEM;
@@ -532,15 +532,15 @@ static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr *ppas,
int nr_ppas, int type)
{
struct nvme_ns *ns = nvmdev->q->queuedata;
- struct nvme_nvm_command c = {};
+ struct nvme_nvm_command c = {
+ .set_bb.opcode = nvme_nvm_admin_set_bb_tbl,
+ .set_bb.nsid = cpu_to_le32(ns->head->ns_id),
+ .set_bb.spba = cpu_to_le64(ppas->ppa),
+ .set_bb.nlb = cpu_to_le16(nr_ppas - 1),
+ .set_bb.value = type,
+ };
int ret = 0;
- c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
- c.set_bb.nsid = cpu_to_le32(ns->head->ns_id);
- c.set_bb.spba = cpu_to_le64(ppas->ppa);
- c.set_bb.nlb = cpu_to_le16(nr_ppas - 1);
- c.set_bb.value = type;
-
ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
NULL, 0);
if (ret)
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index f8f856dc0c67..bdf2ecaddbb6 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1804,22 +1804,22 @@ static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue)
static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
{
struct nvme_rdma_ctrl *ctrl = queue->ctrl;
- struct rdma_conn_param param = { };
- struct nvme_rdma_cm_req priv = { };
+ struct nvme_rdma_cm_req priv = {
+ .recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0),
+ .qid = cpu_to_le16(nvme_rdma_queue_idx(queue)),
+ };
+ struct rdma_conn_param param = {
+ .qp_num = queue->qp->qp_num,
+ .flow_control = 1,
+ .responder_resources = queue->device->dev->attrs.max_qp_rd_atom,
+ /* maximum retry count */
+ .retry_count = 7,
+ .rnr_retry_count = 7,
+ .private_data = &priv,
+ .private_data_len = sizeof(priv),
+ };
int ret;
- param.qp_num = queue->qp->qp_num;
- param.flow_control = 1;
-
- param.responder_resources = queue->device->dev->attrs.max_qp_rd_atom;
- /* maximum retry count */
- param.retry_count = 7;
- param.rnr_retry_count = 7;
- param.private_data = &priv;
- param.private_data_len = sizeof(priv);
-
- priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
- priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue));
/*
* set the admin queue depth to the minimum size
* specified by the Fabrics standard.
--
2.26.2
Workarounds for gcc issues with initializers and anon unions was first
introduced in commit e44ac588cd61 ("drivers/block/nvme-core.c: fix build
with gcc-4.4.4").
The gcc bug in question has been fixed since gcc 4.6.0:
https://gcc.gnu.org/bugzilla/show_bug.cgi?id=10676
The minimum gcc version for building the kernel has been 4.6.0 since
commit cafa0010cd51 ("Raise the minimum required gcc version to 4.6"),
and has since been updated to gcc 4.8.0 in
commit 5429ef62bcf3 ("compiler/gcc: Raise minimum GCC version for
kernel builds to 4.8").
For that reason, it should now be safe to remove these workarounds
and make the code look like it did before
commit e44ac588cd61 ("drivers/block/nvme-core.c: fix build with gcc-4.4.4")
was introduced.
Signed-off-by: Niklas Cassel <[email protected]>
---
drivers/nvme/target/rdma.c | 23 ++++++++++++-----------
1 file changed, 12 insertions(+), 11 deletions(-)
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 6731e0349480..238bc55de561 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -1535,19 +1535,20 @@ static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id,
struct nvmet_rdma_queue *queue,
struct rdma_conn_param *p)
{
- struct rdma_conn_param param = { };
- struct nvme_rdma_cm_rep priv = { };
+ struct nvme_rdma_cm_rep priv = {
+ .recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0),
+ .crqsize = cpu_to_le16(queue->recv_queue_size),
+ };
+ struct rdma_conn_param param = {
+ .rnr_retry_count = 7,
+ .flow_control = 1,
+ .initiator_depth = min_t(u8, p->initiator_depth,
+ queue->dev->device->attrs.max_qp_init_rd_atom),
+ .private_data = &priv,
+ .private_data_len = sizeof(priv),
+ };
int ret = -ENOMEM;
- param.rnr_retry_count = 7;
- param.flow_control = 1;
- param.initiator_depth = min_t(u8, p->initiator_depth,
- queue->dev->device->attrs.max_qp_init_rd_atom);
- param.private_data = &priv;
- param.private_data_len = sizeof(priv);
- priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
- priv.crqsize = cpu_to_le16(queue->recv_queue_size);
-
ret = rdma_accept(cm_id, ¶m);
if (ret)
pr_err("rdma_accept failed (error code = %d)\n", ret);
--
2.26.2
This looks good to me, but I'd rather wait a few releases to
avoid too mush backporting pain.
On Wed, Jun 24, 2020 at 06:44:41PM +0200, Christoph Hellwig wrote:
> This looks good to me, but I'd rather wait a few releases to
> avoid too mush backporting pain.
Chaitanya made me realize that about half of the nvme functions
are using "struct nvme_command c" on the stack, and then memsets
it, and half of the nvme functions are using an initializer.
IMHO, using an initializer is more clear.
memset has to be used if the function needs to reset an
existing struct, but in none of the functions that I've seen,
are we given an existing nvme_command that we need to reset.
All the functions that I've seen declares a new nvme_command
on the stack (so an initializer makes more sense).
What do you think about me unifying this later on?
Kind regards,
Niklas
On Wed, Jun 24, 2020 at 04:57:48PM +0000, Niklas Cassel wrote:
> On Wed, Jun 24, 2020 at 06:44:41PM +0200, Christoph Hellwig wrote:
> > This looks good to me, but I'd rather wait a few releases to
> > avoid too mush backporting pain.
>
> Chaitanya made me realize that about half of the nvme functions
> are using "struct nvme_command c" on the stack, and then memsets
> it, and half of the nvme functions are using an initializer.
>
> IMHO, using an initializer is more clear.
>
> memset has to be used if the function needs to reset an
> existing struct, but in none of the functions that I've seen,
> are we given an existing nvme_command that we need to reset.
> All the functions that I've seen declares a new nvme_command
> on the stack (so an initializer makes more sense).
>
> What do you think about me unifying this later on?
I like the initializers a lot. But as I said I'd rather wait a
bit for now.
On Wed, Jun 24, 2020 at 07:02:11PM +0200, Christoph Hellwig wrote:
> On Wed, Jun 24, 2020 at 04:57:48PM +0000, Niklas Cassel wrote:
> > On Wed, Jun 24, 2020 at 06:44:41PM +0200, Christoph Hellwig wrote:
> > > This looks good to me, but I'd rather wait a few releases to
> > > avoid too mush backporting pain.
> >
> > Chaitanya made me realize that about half of the nvme functions
> > are using "struct nvme_command c" on the stack, and then memsets
> > it, and half of the nvme functions are using an initializer.
> >
> > IMHO, using an initializer is more clear.
> >
> > memset has to be used if the function needs to reset an
> > existing struct, but in none of the functions that I've seen,
> > are we given an existing nvme_command that we need to reset.
> > All the functions that I've seen declares a new nvme_command
> > on the stack (so an initializer makes more sense).
> >
> > What do you think about me unifying this later on?
>
> I like the initializers a lot. But as I said I'd rather wait a
> bit for now.
Just to be clear:
Even with these patches, about half of the nvme functions are using
memset rather than initializers.
But sure, I'll wait a couple of releases, and then rebase this,
and additionally convert the "struct nvme_command c" memset users
to use initializers.
Kind regards,
Niklas
Christoph, Sagi and Keith,
On 6/24/20 9:44 AM, Christoph Hellwig wrote:
> This looks good to me, but I'd rather wait a few releases to
> avoid too mush backporting pain.
>
Here is a summary, for longer explanation please have a look at the
end [1] :-
Pros:
1. Code looks uniform and follows strict policy.
Cons:
1. Adds a tab + more char [1] which can lead to line breaks and that can
be avoided without following declare-init pattern, less bugs and
no pressure to fit the initializer in ~72 char given that we do have
some long names and who knows what is in the future.
2. Issue with older version can lead to adding additional braces which
does not look good.
3. Writing a new code becomes inflexible and pressure to fit initializer
will not allow users to use meaningful names in the nested structures
and anon unions.
4. Future patches will be needed for backward compatibility.
Also code is perfectly readable as it is so why change ?
If everyone is okay with above cons I'm fine adding this.
Regards,
Chaitanya
[1] Explanation :-
I'm not against unifying the code. This will enforce struct
initialization to be done at the time of declaration and is inflexible
given that we have different transports and meaningful structure names.
Also, no one knows how many new structures will be coming since protocol
still has a room for improvement.
Consider following :-
e.g. 1
static void nvme_xxx_func()
{
struct nvme_XXX_YYY_ZZZ abcde {
.member1 = line of the initializer calculation = X,
.member2 = AAAAAAAAAAAAA + BBBBBBBBB + CCCCCC + DDDD +
EEEEE,
.member3 = AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAa,
}
}
e.g. 2
From code :-
+ struct rdma_conn_param param = {
+ .rnr_retry_count = 7,
+ .flow_control = 1,
+ .initiator_depth = min_t(u8, p->initiator_depth,
+ here ->>>queue->dev->device->attrs.max_qp_init_rd_atom),
+ .private_data = &priv,
+ .private_data_len = sizeof(priv),
+ };
In above case (e.g.1, e.g.2) we loose 8 character = 1 tab for every
declaration-initialization, now if we have a member to be initialized
with complex calculations then it comes down to the next line and again
we loose 8 char of tab + (number of characters = name) of the member
(member2 in nvme_xx_func()) and whole things looks ugly, in contrast if
we do it outside of the declaration we still get 8 more characters
before we reach line limit. With 80 char limit we should avoid line
breaks and tabs as and when possible, this policy goes against it.
On Wed, Jun 24, 2020 at 10:40:21PM +0000, Chaitanya Kulkarni wrote:
> Christoph, Sagi and Keith,
>
> On 6/24/20 9:44 AM, Christoph Hellwig wrote:
> > This looks good to me, but I'd rather wait a few releases to
> > avoid too mush backporting pain.
> >
>
> Here is a summary, for longer explanation please have a look at the
> end [1] :-
>
> Pros:
> 1. Code looks uniform and follows strict policy.
>
> Cons:
> 1. Adds a tab + more char [1] which can lead to line breaks and that can
> be avoided without following declare-init pattern, less bugs and
> no pressure to fit the initializer in ~72 char given that we do have
> some long names and who knows what is in the future.
[BEGIN being silly.. sorry guys, I couldn't resist.. :)]
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/scripts/checkpatch.pl?id=bdc48fa11e46f867ea4d75fa59ee87a7f48be144
Isn't 100 the new 80? ;)
[END being silly]