2020-06-18 20:34:45

by Niklas Cassel

[permalink] [raw]
Subject: [PATCH 1/2] nvme: remove workarounds for gcc bug wrt unnamed fields in initializers

Workarounds for gcc issues with initializers and anon unions was first
introduced in commit e44ac588cd61 ("drivers/block/nvme-core.c: fix build
with gcc-4.4.4").

The gcc bug in question has been fixed since gcc 4.6.0:
https://gcc.gnu.org/bugzilla/show_bug.cgi?id=10676

The minimum gcc version for building the kernel has been 4.6.0 since
commit cafa0010cd51 ("Raise the minimum required gcc version to 4.6"),
and has since been updated to gcc 4.8.0 in
commit 5429ef62bcf3 ("compiler/gcc: Raise minimum GCC version for
kernel builds to 4.8").

For that reason, it should now be safe to remove these workarounds
and make the code look like it did before
commit e44ac588cd61 ("drivers/block/nvme-core.c: fix build with gcc-4.4.4")
was introduced.

Signed-off-by: Niklas Cassel <[email protected]>
---
If, for some reason, we want to allow builds with gcc < 4.6.0
even though the minimum gcc version is now 4.8.0,
there is another less intrusive workaround where you add an extra pair of
curly braces, see e.g. commit 6cc65be4f6f2 ("locking/qspinlock: Fix build
for anonymous union in older GCC compilers").

drivers/nvme/host/core.c | 59 ++++++++++++++++++------------------
drivers/nvme/host/lightnvm.c | 32 +++++++++----------
drivers/nvme/host/rdma.c | 28 ++++++++---------
3 files changed, 59 insertions(+), 60 deletions(-)

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 9491dbcfe81a..99059340d723 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1038,13 +1038,12 @@ static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl)

static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
{
- struct nvme_command c = { };
+ struct nvme_command c = {
+ .identify.opcode = nvme_admin_identify,
+ .identify.cns = NVME_ID_CNS_CTRL,
+ };
int error;

- /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
- c.identify.opcode = nvme_admin_identify;
- c.identify.cns = NVME_ID_CNS_CTRL;
-
*id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
if (!*id)
return -ENOMEM;
@@ -1096,16 +1095,16 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
struct nvme_ns_ids *ids)
{
- struct nvme_command c = { };
+ struct nvme_command c = {
+ .identify.opcode = nvme_admin_identify,
+ .identify.nsid = cpu_to_le32(nsid),
+ .identify.cns = NVME_ID_CNS_NS_DESC_LIST,
+ };
int status;
void *data;
int pos;
int len;

- c.identify.opcode = nvme_admin_identify;
- c.identify.nsid = cpu_to_le32(nsid);
- c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;
-
data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -1143,11 +1142,12 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,

static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list)
{
- struct nvme_command c = { };
+ struct nvme_command c = {
+ .identify.opcode = nvme_admin_identify,
+ .identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST,
+ .identify.nsid = cpu_to_le32(nsid),
+ };

- c.identify.opcode = nvme_admin_identify;
- c.identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST;
- c.identify.nsid = cpu_to_le32(nsid);
return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list,
NVME_IDENTIFY_DATA_SIZE);
}
@@ -1155,14 +1155,13 @@ static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *n
static int nvme_identify_ns(struct nvme_ctrl *ctrl,
unsigned nsid, struct nvme_id_ns **id)
{
- struct nvme_command c = { };
+ struct nvme_command c = {
+ .identify.opcode = nvme_admin_identify,
+ .identify.nsid = cpu_to_le32(nsid),
+ .identify.cns = NVME_ID_CNS_NS,
+ };
int error;

- /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
- c.identify.opcode = nvme_admin_identify;
- c.identify.nsid = cpu_to_le32(nsid);
- c.identify.cns = NVME_ID_CNS_NS;
-
*id = kmalloc(sizeof(**id), GFP_KERNEL);
if (!*id)
return -ENOMEM;
@@ -2815,17 +2814,17 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp,
void *log, size_t size, u64 offset)
{
- struct nvme_command c = { };
u32 dwlen = nvme_bytes_to_numd(size);
-
- c.get_log_page.opcode = nvme_admin_get_log_page;
- c.get_log_page.nsid = cpu_to_le32(nsid);
- c.get_log_page.lid = log_page;
- c.get_log_page.lsp = lsp;
- c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1));
- c.get_log_page.numdu = cpu_to_le16(dwlen >> 16);
- c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset));
- c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset));
+ struct nvme_command c = {
+ .get_log_page.opcode = nvme_admin_get_log_page,
+ .get_log_page.nsid = cpu_to_le32(nsid),
+ .get_log_page.lid = log_page,
+ .get_log_page.lsp = lsp,
+ .get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1)),
+ .get_log_page.numdu = cpu_to_le16(dwlen >> 16),
+ .get_log_page.lpol = cpu_to_le32(lower_32_bits(offset)),
+ .get_log_page.lpou = cpu_to_le32(upper_32_bits(offset)),
+ };

return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size);
}
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 69608755d415..7c44eca78f0d 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -432,12 +432,12 @@ static int nvme_nvm_identity(struct nvm_dev *nvmdev)
{
struct nvme_ns *ns = nvmdev->q->queuedata;
struct nvme_nvm_id12 *id;
- struct nvme_nvm_command c = {};
+ struct nvme_nvm_command c = {
+ .identity.opcode = nvme_nvm_admin_identity,
+ .identity.nsid = cpu_to_le32(ns->head->ns_id),
+ };
int ret;

- c.identity.opcode = nvme_nvm_admin_identity;
- c.identity.nsid = cpu_to_le32(ns->head->ns_id);
-
id = kmalloc(sizeof(struct nvme_nvm_id12), GFP_KERNEL);
if (!id)
return -ENOMEM;
@@ -479,16 +479,16 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
struct nvm_geo *geo = &nvmdev->geo;
struct nvme_ns *ns = q->queuedata;
struct nvme_ctrl *ctrl = ns->ctrl;
- struct nvme_nvm_command c = {};
+ struct nvme_nvm_command c = {
+ .get_bb.opcode = nvme_nvm_admin_get_bb_tbl,
+ .get_bb.nsid = cpu_to_le32(ns->head->ns_id),
+ .get_bb.spba = cpu_to_le64(ppa.ppa),
+ };
struct nvme_nvm_bb_tbl *bb_tbl;
int nr_blks = geo->num_chk * geo->num_pln;
int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks;
int ret = 0;

- c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
- c.get_bb.nsid = cpu_to_le32(ns->head->ns_id);
- c.get_bb.spba = cpu_to_le64(ppa.ppa);
-
bb_tbl = kzalloc(tblsz, GFP_KERNEL);
if (!bb_tbl)
return -ENOMEM;
@@ -532,15 +532,15 @@ static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr *ppas,
int nr_ppas, int type)
{
struct nvme_ns *ns = nvmdev->q->queuedata;
- struct nvme_nvm_command c = {};
+ struct nvme_nvm_command c = {
+ .set_bb.opcode = nvme_nvm_admin_set_bb_tbl,
+ .set_bb.nsid = cpu_to_le32(ns->head->ns_id),
+ .set_bb.spba = cpu_to_le64(ppas->ppa),
+ .set_bb.nlb = cpu_to_le16(nr_ppas - 1),
+ .set_bb.value = type,
+ };
int ret = 0;

- c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
- c.set_bb.nsid = cpu_to_le32(ns->head->ns_id);
- c.set_bb.spba = cpu_to_le64(ppas->ppa);
- c.set_bb.nlb = cpu_to_le16(nr_ppas - 1);
- c.set_bb.value = type;
-
ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
NULL, 0);
if (ret)
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index f8f856dc0c67..08c8728b3b29 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1804,22 +1804,22 @@ static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue)
static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
{
struct nvme_rdma_ctrl *ctrl = queue->ctrl;
- struct rdma_conn_param param = { };
- struct nvme_rdma_cm_req priv = { };
+ struct rdma_conn_param param = {
+ .qp_num = queue->qp->qp_num,
+ .flow_control = 1,
+ .responder_resources = queue->device->dev->attrs.max_qp_rd_atom,
+ /* maximum retry count */
+ .retry_count = 7,
+ .rnr_retry_count = 7,
+ .private_data = &priv,
+ .private_data_len = sizeof(priv),
+ };
+ struct nvme_rdma_cm_req priv = {
+ .recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0),
+ .qid = cpu_to_le16(nvme_rdma_queue_idx(queue)),
+ };
int ret;

- param.qp_num = queue->qp->qp_num;
- param.flow_control = 1;
-
- param.responder_resources = queue->device->dev->attrs.max_qp_rd_atom;
- /* maximum retry count */
- param.retry_count = 7;
- param.rnr_retry_count = 7;
- param.private_data = &priv;
- param.private_data_len = sizeof(priv);
-
- priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
- priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue));
/*
* set the admin queue depth to the minimum size
* specified by the Fabrics standard.
--
2.26.2


2020-06-19 03:22:58

by kernel test robot

[permalink] [raw]
Subject: Re: [PATCH 1/2] nvme: remove workarounds for gcc bug wrt unnamed fields in initializers

Hi Niklas,

I love your patch! Yet something to improve:

[auto build test ERROR on block/for-next]
[also build test ERROR on linus/master v5.8-rc1 next-20200618]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use as documented in
https://git-scm.com/docs/git-format-patch]

url: https://github.com/0day-ci/linux/commits/Niklas-Cassel/nvme-remove-workarounds-for-gcc-bug-wrt-unnamed-fields-in-initializers/20200618-223525
base: https://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git for-next
config: x86_64-allyesconfig (attached as .config)
compiler: clang version 11.0.0 (https://github.com/llvm/llvm-project 487ca07fcc75d52755c9fe2ee05bcb3b6eeeec44)
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# install x86_64 cross compiling tool for clang build
# apt-get install binutils-x86-64-linux-gnu
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=x86_64

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <[email protected]>

All errors (new ones prefixed by >>):

>> drivers/nvme/host/rdma.c:1814:20: error: use of undeclared identifier 'priv'
.private_data = &priv,
^
drivers/nvme/host/rdma.c:1815:30: error: use of undeclared identifier 'priv'
.private_data_len = sizeof(priv),
^
2 errors generated.

vim +/priv +1814 drivers/nvme/host/rdma.c

1803
1804 static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
1805 {
1806 struct nvme_rdma_ctrl *ctrl = queue->ctrl;
1807 struct rdma_conn_param param = {
1808 .qp_num = queue->qp->qp_num,
1809 .flow_control = 1,
1810 .responder_resources = queue->device->dev->attrs.max_qp_rd_atom,
1811 /* maximum retry count */
1812 .retry_count = 7,
1813 .rnr_retry_count = 7,
> 1814 .private_data = &priv,
1815 .private_data_len = sizeof(priv),
1816 };
1817 struct nvme_rdma_cm_req priv = {
1818 .recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0),
1819 .qid = cpu_to_le16(nvme_rdma_queue_idx(queue)),
1820 };
1821 int ret;
1822
1823 /*
1824 * set the admin queue depth to the minimum size
1825 * specified by the Fabrics standard.
1826 */
1827 if (priv.qid == 0) {
1828 priv.hrqsize = cpu_to_le16(NVME_AQ_DEPTH);
1829 priv.hsqsize = cpu_to_le16(NVME_AQ_DEPTH - 1);
1830 } else {
1831 /*
1832 * current interpretation of the fabrics spec
1833 * is at minimum you make hrqsize sqsize+1, or a
1834 * 1's based representation of sqsize.
1835 */
1836 priv.hrqsize = cpu_to_le16(queue->queue_size);
1837 priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize);
1838 }
1839
1840 ret = rdma_connect(queue->cm_id, &param);
1841 if (ret) {
1842 dev_err(ctrl->ctrl.device,
1843 "rdma_connect failed (%d).\n", ret);
1844 goto out_destroy_queue_ib;
1845 }
1846
1847 return 0;
1848
1849 out_destroy_queue_ib:
1850 nvme_rdma_destroy_queue_ib(queue);
1851 return ret;
1852 }
1853

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/[email protected]


Attachments:
(No filename) (3.48 kB)
.config.gz (73.53 kB)
Download all attachments