2023-07-26 20:30:00

by sharmaajay

[permalink] [raw]
Subject: [Patch v3 0/4] RDMA/mana_ib Read Capabilities

From: Ajay Sharma <[email protected]>

This patch series introduces some cleanup changes and
resource control changes. The mana and mana_ib devices
are used at common places so a consistent naming is
introduced. Adapter object container to have a common
point of object release for resources and query the
management software to prevent resource overflow.
It also introduces async channel for management to
notify the clients in case of errors/info.

Ajay Sharma (4):
RDMA/mana_ib : Rename all mana_ib_dev type variables to mib_dev
RDMA/mana_ib : Register Mana IB device with Management SW
RDMA/mana_ib : Create adapter and Add error eq
RDMA/mana_ib : Query adapter capabilities

drivers/infiniband/hw/mana/cq.c | 12 +-
drivers/infiniband/hw/mana/device.c | 72 +++--
drivers/infiniband/hw/mana/main.c | 282 +++++++++++++-----
drivers/infiniband/hw/mana/mana_ib.h | 96 +++++-
drivers/infiniband/hw/mana/mr.c | 42 ++-
drivers/infiniband/hw/mana/qp.c | 82 ++---
drivers/infiniband/hw/mana/wq.c | 21 +-
.../net/ethernet/microsoft/mana/gdma_main.c | 151 ++++++----
drivers/net/ethernet/microsoft/mana/mana_en.c | 3 +
include/net/mana/gdma.h | 16 +-
10 files changed, 529 insertions(+), 248 deletions(-)

--
2.25.1



2023-07-26 20:30:25

by sharmaajay

[permalink] [raw]
Subject: [Patch v3 1/4] RDMA/mana_ib : Rename all mana_ib_dev type variables to mib_dev

From: Ajay Sharma <[email protected]>

This patch does not introduce any functional changes. It
creates naming convention to distinguish especially when
used in the same function.Renaming all mana_ib_dev type
variables to mib_dev to have clean separation between
eth dev and ibdev variables.

Signed-off-by: Ajay Sharma <[email protected]>
---
drivers/infiniband/hw/mana/cq.c | 12 ++--
drivers/infiniband/hw/mana/device.c | 34 +++++------
drivers/infiniband/hw/mana/main.c | 87 ++++++++++++++--------------
drivers/infiniband/hw/mana/mana_ib.h | 9 +--
drivers/infiniband/hw/mana/mr.c | 29 +++++-----
drivers/infiniband/hw/mana/qp.c | 82 +++++++++++++-------------
drivers/infiniband/hw/mana/wq.c | 21 +++----
7 files changed, 140 insertions(+), 134 deletions(-)

diff --git a/drivers/infiniband/hw/mana/cq.c b/drivers/infiniband/hw/mana/cq.c
index d141cab8a1e6..1aed4e6360ba 100644
--- a/drivers/infiniband/hw/mana/cq.c
+++ b/drivers/infiniband/hw/mana/cq.c
@@ -11,10 +11,10 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
struct ib_device *ibdev = ibcq->device;
struct mana_ib_create_cq ucmd = {};
- struct mana_ib_dev *mdev;
+ struct mana_ib_dev *mib_dev;
int err;

- mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+ mib_dev = container_of(ibdev, struct mana_ib_dev, ib_dev);

if (udata->inlen < sizeof(ucmd))
return -EINVAL;
@@ -41,7 +41,7 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
return err;
}

- err = mana_ib_gd_create_dma_region(mdev, cq->umem, &cq->gdma_region);
+ err = mana_ib_gd_create_dma_region(mib_dev, cq->umem, &cq->gdma_region);
if (err) {
ibdev_dbg(ibdev,
"Failed to create dma region for create cq, %d\n",
@@ -68,11 +68,11 @@ int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
{
struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
struct ib_device *ibdev = ibcq->device;
- struct mana_ib_dev *mdev;
+ struct mana_ib_dev *mib_dev;

- mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+ mib_dev = container_of(ibdev, struct mana_ib_dev, ib_dev);

- mana_ib_gd_destroy_dma_region(mdev, cq->gdma_region);
+ mana_ib_gd_destroy_dma_region(mib_dev, cq->gdma_region);
ib_umem_release(cq->umem);

return 0;
diff --git a/drivers/infiniband/hw/mana/device.c b/drivers/infiniband/hw/mana/device.c
index d4541b8707e4..083f27246ba8 100644
--- a/drivers/infiniband/hw/mana/device.c
+++ b/drivers/infiniband/hw/mana/device.c
@@ -51,51 +51,51 @@ static int mana_ib_probe(struct auxiliary_device *adev,
{
struct mana_adev *madev = container_of(adev, struct mana_adev, adev);
struct gdma_dev *mdev = madev->mdev;
+ struct mana_ib_dev *mib_dev;
struct mana_context *mc;
- struct mana_ib_dev *dev;
int ret;

mc = mdev->driver_data;

- dev = ib_alloc_device(mana_ib_dev, ib_dev);
- if (!dev)
+ mib_dev = ib_alloc_device(mana_ib_dev, ib_dev);
+ if (!mib_dev)
return -ENOMEM;

- ib_set_device_ops(&dev->ib_dev, &mana_ib_dev_ops);
+ ib_set_device_ops(&mib_dev->ib_dev, &mana_ib_dev_ops);

- dev->ib_dev.phys_port_cnt = mc->num_ports;
+ mib_dev->ib_dev.phys_port_cnt = mc->num_ports;

- ibdev_dbg(&dev->ib_dev, "mdev=%p id=%d num_ports=%d\n", mdev,
- mdev->dev_id.as_uint32, dev->ib_dev.phys_port_cnt);
+ ibdev_dbg(&mib_dev->ib_dev, "mdev=%p id=%d num_ports=%d\n", mdev,
+ mdev->dev_id.as_uint32, mib_dev->ib_dev.phys_port_cnt);

- dev->gdma_dev = mdev;
- dev->ib_dev.node_type = RDMA_NODE_IB_CA;
+ mib_dev->gdma_dev = mdev;
+ mib_dev->ib_dev.node_type = RDMA_NODE_IB_CA;

/*
* num_comp_vectors needs to set to the max MSIX index
* when interrupts and event queues are implemented
*/
- dev->ib_dev.num_comp_vectors = 1;
- dev->ib_dev.dev.parent = mdev->gdma_context->dev;
+ mib_dev->ib_dev.num_comp_vectors = 1;
+ mib_dev->ib_dev.dev.parent = mdev->gdma_context->dev;

- ret = ib_register_device(&dev->ib_dev, "mana_%d",
+ ret = ib_register_device(&mib_dev->ib_dev, "mana_%d",
mdev->gdma_context->dev);
if (ret) {
- ib_dealloc_device(&dev->ib_dev);
+ ib_dealloc_device(&mib_dev->ib_dev);
return ret;
}

- dev_set_drvdata(&adev->dev, dev);
+ dev_set_drvdata(&adev->dev, mib_dev);

return 0;
}

static void mana_ib_remove(struct auxiliary_device *adev)
{
- struct mana_ib_dev *dev = dev_get_drvdata(&adev->dev);
+ struct mana_ib_dev *mib_dev = dev_get_drvdata(&adev->dev);

- ib_unregister_device(&dev->ib_dev);
- ib_dealloc_device(&dev->ib_dev);
+ ib_unregister_device(&mib_dev->ib_dev);
+ ib_dealloc_device(&mib_dev->ib_dev);
}

static const struct auxiliary_device_id mana_id_table[] = {
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index 7be4c3adb4e2..189e774cdab6 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -5,10 +5,10 @@

#include "mana_ib.h"

-void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd,
+void mana_ib_uncfg_vport(struct mana_ib_dev *mib_dev, struct mana_ib_pd *pd,
u32 port)
{
- struct gdma_dev *gd = dev->gdma_dev;
+ struct gdma_dev *gd = mib_dev->gdma_dev;
struct mana_port_context *mpc;
struct net_device *ndev;
struct mana_context *mc;
@@ -28,10 +28,11 @@ void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd,
mutex_unlock(&pd->vport_mutex);
}

-int mana_ib_cfg_vport(struct mana_ib_dev *dev, u32 port, struct mana_ib_pd *pd,
+int mana_ib_cfg_vport(struct mana_ib_dev *mib_dev, u32 port,
+ struct mana_ib_pd *pd,
u32 doorbell_id)
{
- struct gdma_dev *mdev = dev->gdma_dev;
+ struct gdma_dev *mdev = mib_dev->gdma_dev;
struct mana_port_context *mpc;
struct mana_context *mc;
struct net_device *ndev;
@@ -45,7 +46,7 @@ int mana_ib_cfg_vport(struct mana_ib_dev *dev, u32 port, struct mana_ib_pd *pd,

pd->vport_use_count++;
if (pd->vport_use_count > 1) {
- ibdev_dbg(&dev->ib_dev,
+ ibdev_dbg(&mib_dev->ib_dev,
"Skip as this PD is already configured vport\n");
mutex_unlock(&pd->vport_mutex);
return 0;
@@ -56,7 +57,8 @@ int mana_ib_cfg_vport(struct mana_ib_dev *dev, u32 port, struct mana_ib_pd *pd,
pd->vport_use_count--;
mutex_unlock(&pd->vport_mutex);

- ibdev_dbg(&dev->ib_dev, "Failed to configure vPort %d\n", err);
+ ibdev_dbg(&mib_dev->ib_dev, "Failed to configure vPort %d\n",
+ err);
return err;
}

@@ -65,7 +67,7 @@ int mana_ib_cfg_vport(struct mana_ib_dev *dev, u32 port, struct mana_ib_pd *pd,
pd->tx_shortform_allowed = mpc->tx_shortform_allowed;
pd->tx_vp_offset = mpc->tx_vp_offset;

- ibdev_dbg(&dev->ib_dev, "vport handle %llx pdid %x doorbell_id %x\n",
+ ibdev_dbg(&mib_dev->ib_dev, "vport handle %llx pdid %x doorbell_id %x\n",
mpc->port_handle, pd->pdn, doorbell_id);

return 0;
@@ -78,12 +80,12 @@ int mana_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
struct gdma_create_pd_resp resp = {};
struct gdma_create_pd_req req = {};
enum gdma_pd_flags flags = 0;
- struct mana_ib_dev *dev;
+ struct mana_ib_dev *mib_dev;
struct gdma_dev *mdev;
int err;

- dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
- mdev = dev->gdma_dev;
+ mib_dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+ mdev = mib_dev->gdma_dev;

mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_PD, sizeof(req),
sizeof(resp));
@@ -93,7 +95,7 @@ int mana_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
sizeof(resp), &resp);

if (err || resp.hdr.status) {
- ibdev_dbg(&dev->ib_dev,
+ ibdev_dbg(&mib_dev->ib_dev,
"Failed to get pd_id err %d status %u\n", err,
resp.hdr.status);
if (!err)
@@ -104,7 +106,7 @@ int mana_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)

pd->pd_handle = resp.pd_handle;
pd->pdn = resp.pd_id;
- ibdev_dbg(&dev->ib_dev, "pd_handle 0x%llx pd_id %d\n",
+ ibdev_dbg(&mib_dev->ib_dev, "pd_handle 0x%llx pd_id %d\n",
pd->pd_handle, pd->pdn);

mutex_init(&pd->vport_mutex);
@@ -118,12 +120,12 @@ int mana_ib_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
struct ib_device *ibdev = ibpd->device;
struct gdma_destory_pd_resp resp = {};
struct gdma_destroy_pd_req req = {};
- struct mana_ib_dev *dev;
+ struct mana_ib_dev *mib_dev;
struct gdma_dev *mdev;
int err;

- dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
- mdev = dev->gdma_dev;
+ mib_dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+ mdev = mib_dev->gdma_dev;

mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_PD, sizeof(req),
sizeof(resp));
@@ -133,7 +135,7 @@ int mana_ib_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
sizeof(resp), &resp);

if (err || resp.hdr.status) {
- ibdev_dbg(&dev->ib_dev,
+ ibdev_dbg(&mib_dev->ib_dev,
"Failed to destroy pd_handle 0x%llx err %d status %u",
pd->pd_handle, err, resp.hdr.status);
if (!err)
@@ -204,14 +206,14 @@ int mana_ib_alloc_ucontext(struct ib_ucontext *ibcontext,
struct mana_ib_ucontext *ucontext =
container_of(ibcontext, struct mana_ib_ucontext, ibucontext);
struct ib_device *ibdev = ibcontext->device;
- struct mana_ib_dev *mdev;
+ struct mana_ib_dev *mib_dev;
struct gdma_context *gc;
struct gdma_dev *dev;
int doorbell_page;
int ret;

- mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
- dev = mdev->gdma_dev;
+ mib_dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+ dev = mib_dev->gdma_dev;
gc = dev->gdma_context;

/* Allocate a doorbell page index */
@@ -233,12 +235,12 @@ void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
struct mana_ib_ucontext *mana_ucontext =
container_of(ibcontext, struct mana_ib_ucontext, ibucontext);
struct ib_device *ibdev = ibcontext->device;
- struct mana_ib_dev *mdev;
+ struct mana_ib_dev *mib_dev;
struct gdma_context *gc;
int ret;

- mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
- gc = mdev->gdma_dev->gdma_context;
+ mib_dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+ gc = mib_dev->gdma_dev->gdma_context;

ret = mana_gd_destroy_doorbell_page(gc, mana_ucontext->doorbell);
if (ret)
@@ -246,7 +248,7 @@ void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
}

static int
-mana_ib_gd_first_dma_region(struct mana_ib_dev *dev,
+mana_ib_gd_first_dma_region(struct mana_ib_dev *mib_dev,
struct gdma_context *gc,
struct gdma_create_dma_region_req *create_req,
size_t num_pages, mana_handle_t *gdma_region,
@@ -263,7 +265,7 @@ mana_ib_gd_first_dma_region(struct mana_ib_dev *dev,
err = mana_gd_send_request(gc, create_req_msg_size, create_req,
sizeof(create_resp), &create_resp);
if (err || create_resp.hdr.status != expected_status) {
- ibdev_dbg(&dev->ib_dev,
+ ibdev_dbg(&mib_dev->ib_dev,
"Failed to create DMA region: %d, 0x%x\n",
err, create_resp.hdr.status);
if (!err)
@@ -273,14 +275,14 @@ mana_ib_gd_first_dma_region(struct mana_ib_dev *dev,
}

*gdma_region = create_resp.dma_region_handle;
- ibdev_dbg(&dev->ib_dev, "Created DMA region handle 0x%llx\n",
+ ibdev_dbg(&mib_dev->ib_dev, "Created DMA region handle 0x%llx\n",
*gdma_region);

return 0;
}

static int
-mana_ib_gd_add_dma_region(struct mana_ib_dev *dev, struct gdma_context *gc,
+mana_ib_gd_add_dma_region(struct mana_ib_dev *mib_dev, struct gdma_context *gc,
struct gdma_dma_region_add_pages_req *add_req,
unsigned int num_pages, u32 expected_status)
{
@@ -296,7 +298,7 @@ mana_ib_gd_add_dma_region(struct mana_ib_dev *dev, struct gdma_context *gc,
err = mana_gd_send_request(gc, add_req_msg_size, add_req,
sizeof(add_resp), &add_resp);
if (err || add_resp.hdr.status != expected_status) {
- ibdev_dbg(&dev->ib_dev,
+ ibdev_dbg(&mib_dev->ib_dev,
"Failed to create DMA region: %d, 0x%x\n",
err, add_resp.hdr.status);

@@ -309,7 +311,8 @@ mana_ib_gd_add_dma_region(struct mana_ib_dev *dev, struct gdma_context *gc,
return 0;
}

-int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
+int mana_ib_gd_create_dma_region(struct mana_ib_dev *mib_dev,
+ struct ib_umem *umem,
mana_handle_t *gdma_region)
{
struct gdma_dma_region_add_pages_req *add_req = NULL;
@@ -329,14 +332,14 @@ int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
void *request_buf;
int err;

- mdev = dev->gdma_dev;
+ mdev = mib_dev->gdma_dev;
gc = mdev->gdma_context;
hwc = gc->hwc.driver_data;

/* Hardware requires dma region to align to chosen page size */
page_sz = ib_umem_find_best_pgsz(umem, PAGE_SZ_BM, 0);
if (!page_sz) {
- ibdev_dbg(&dev->ib_dev, "failed to find page size.\n");
+ ibdev_dbg(&mib_dev->ib_dev, "failed to find page size.\n");
return -ENOMEM;
}
num_pages_total = ib_umem_num_dma_blocks(umem, page_sz);
@@ -362,13 +365,13 @@ int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
create_req->gdma_page_type = order_base_2(page_sz) - PAGE_SHIFT;
create_req->page_count = num_pages_total;

- ibdev_dbg(&dev->ib_dev, "size_dma_region %lu num_pages_total %lu\n",
+ ibdev_dbg(&mib_dev->ib_dev, "size_dma_region %lu num_pages_total %lu\n",
umem->length, num_pages_total);

- ibdev_dbg(&dev->ib_dev, "page_sz %lu offset_in_page %u\n",
+ ibdev_dbg(&mib_dev->ib_dev, "page_sz %lu offset_in_page %u\n",
page_sz, create_req->offset_in_page);

- ibdev_dbg(&dev->ib_dev, "num_pages_to_handle %lu, gdma_page_type %u",
+ ibdev_dbg(&mib_dev->ib_dev, "num_pages_to_handle %lu, gdma_page_type %u",
num_pages_to_handle, create_req->gdma_page_type);

page_addr_list = create_req->page_addr_list;
@@ -385,7 +388,7 @@ int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,

if (!num_pages_processed) {
/* First create message */
- err = mana_ib_gd_first_dma_region(dev, gc, create_req,
+ err = mana_ib_gd_first_dma_region(mib_dev, gc, create_req,
tail, gdma_region,
expected_status);
if (err)
@@ -400,7 +403,7 @@ int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
page_addr_list = add_req->page_addr_list;
} else {
/* Subsequent create messages */
- err = mana_ib_gd_add_dma_region(dev, gc, add_req, tail,
+ err = mana_ib_gd_add_dma_region(mib_dev, gc, add_req, tail,
expected_status);
if (err)
break;
@@ -417,20 +420,20 @@ int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
}

if (err)
- mana_ib_gd_destroy_dma_region(dev, *gdma_region);
+ mana_ib_gd_destroy_dma_region(mib_dev, *gdma_region);

out:
kfree(request_buf);
return err;
}

-int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev, u64 gdma_region)
+int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *mib_dev, u64 gdma_region)
{
- struct gdma_dev *mdev = dev->gdma_dev;
+ struct gdma_dev *mdev = mib_dev->gdma_dev;
struct gdma_context *gc;

gc = mdev->gdma_context;
- ibdev_dbg(&dev->ib_dev, "destroy dma region 0x%llx\n", gdma_region);
+ ibdev_dbg(&mib_dev->ib_dev, "destroy dma region 0x%llx\n", gdma_region);

return mana_gd_destroy_dma_region(gc, gdma_region);
}
@@ -440,14 +443,14 @@ int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
struct mana_ib_ucontext *mana_ucontext =
container_of(ibcontext, struct mana_ib_ucontext, ibucontext);
struct ib_device *ibdev = ibcontext->device;
- struct mana_ib_dev *mdev;
+ struct mana_ib_dev *mib_dev;
struct gdma_context *gc;
phys_addr_t pfn;
pgprot_t prot;
int ret;

- mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
- gc = mdev->gdma_dev->gdma_context;
+ mib_dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+ gc = mib_dev->gdma_dev->gdma_context;

if (vma->vm_pgoff != 0) {
ibdev_dbg(ibdev, "Unexpected vm_pgoff %lu\n", vma->vm_pgoff);
diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h
index 502cc8672eef..ee4efd0af278 100644
--- a/drivers/infiniband/hw/mana/mana_ib.h
+++ b/drivers/infiniband/hw/mana/mana_ib.h
@@ -92,10 +92,11 @@ struct mana_ib_rwq_ind_table {
struct ib_rwq_ind_table ib_ind_table;
};

-int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
+int mana_ib_gd_create_dma_region(struct mana_ib_dev *mib_dev,
+ struct ib_umem *umem,
mana_handle_t *gdma_region);

-int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev,
+int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *mib_dev,
mana_handle_t gdma_region);

struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
@@ -129,9 +130,9 @@ int mana_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,

int mana_ib_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);

-int mana_ib_cfg_vport(struct mana_ib_dev *dev, u32 port_id,
+int mana_ib_cfg_vport(struct mana_ib_dev *mib_dev, u32 port_id,
struct mana_ib_pd *pd, u32 doorbell_id);
-void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd,
+void mana_ib_uncfg_vport(struct mana_ib_dev *mib_dev, struct mana_ib_pd *pd,
u32 port);

int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
diff --git a/drivers/infiniband/hw/mana/mr.c b/drivers/infiniband/hw/mana/mr.c
index 351207c60eb6..f6a53906204d 100644
--- a/drivers/infiniband/hw/mana/mr.c
+++ b/drivers/infiniband/hw/mana/mr.c
@@ -25,12 +25,13 @@ mana_ib_verbs_to_gdma_access_flags(int access_flags)
return flags;
}

-static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct mana_ib_mr *mr,
+static int mana_ib_gd_create_mr(struct mana_ib_dev *mib_dev,
+ struct mana_ib_mr *mr,
struct gdma_create_mr_params *mr_params)
{
+ struct gdma_dev *mdev = mib_dev->gdma_dev;
struct gdma_create_mr_response resp = {};
struct gdma_create_mr_request req = {};
- struct gdma_dev *mdev = dev->gdma_dev;
struct gdma_context *gc;
int err;

@@ -49,7 +50,7 @@ static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct mana_ib_mr *mr,
break;

default:
- ibdev_dbg(&dev->ib_dev,
+ ibdev_dbg(&mib_dev->ib_dev,
"invalid param (GDMA_MR_TYPE) passed, type %d\n",
req.mr_type);
return -EINVAL;
@@ -58,7 +59,7 @@ static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct mana_ib_mr *mr,
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);

if (err || resp.hdr.status) {
- ibdev_dbg(&dev->ib_dev, "Failed to create mr %d, %u", err,
+ ibdev_dbg(&mib_dev->ib_dev, "Failed to create mr %d, %u", err,
resp.hdr.status);
if (!err)
err = -EPROTO;
@@ -73,11 +74,11 @@ static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct mana_ib_mr *mr,
return 0;
}

-static int mana_ib_gd_destroy_mr(struct mana_ib_dev *dev, u64 mr_handle)
+static int mana_ib_gd_destroy_mr(struct mana_ib_dev *mib_dev, u64 mr_handle)
{
struct gdma_destroy_mr_response resp = {};
+ struct gdma_dev *mdev = mib_dev->gdma_dev;
struct gdma_destroy_mr_request req = {};
- struct gdma_dev *mdev = dev->gdma_dev;
struct gdma_context *gc;
int err;

@@ -107,12 +108,12 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
struct gdma_create_mr_params mr_params = {};
struct ib_device *ibdev = ibpd->device;
- struct mana_ib_dev *dev;
+ struct mana_ib_dev *mib_dev;
struct mana_ib_mr *mr;
u64 dma_region_handle;
int err;

- dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+ mib_dev = container_of(ibdev, struct mana_ib_dev, ib_dev);

ibdev_dbg(ibdev,
"start 0x%llx, iova 0x%llx length 0x%llx access_flags 0x%x",
@@ -133,7 +134,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
goto err_free;
}

- err = mana_ib_gd_create_dma_region(dev, mr->umem, &dma_region_handle);
+ err = mana_ib_gd_create_dma_region(mib_dev, mr->umem, &dma_region_handle);
if (err) {
ibdev_dbg(ibdev, "Failed create dma region for user-mr, %d\n",
err);
@@ -151,7 +152,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
mr_params.gva.access_flags =
mana_ib_verbs_to_gdma_access_flags(access_flags);

- err = mana_ib_gd_create_mr(dev, mr, &mr_params);
+ err = mana_ib_gd_create_mr(mib_dev, mr, &mr_params);
if (err)
goto err_dma_region;

@@ -164,7 +165,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
return &mr->ibmr;

err_dma_region:
- mana_gd_destroy_dma_region(dev->gdma_dev->gdma_context,
+ mana_gd_destroy_dma_region(mib_dev->gdma_dev->gdma_context,
dma_region_handle);

err_umem:
@@ -179,12 +180,12 @@ int mana_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{
struct mana_ib_mr *mr = container_of(ibmr, struct mana_ib_mr, ibmr);
struct ib_device *ibdev = ibmr->device;
- struct mana_ib_dev *dev;
+ struct mana_ib_dev *mib_dev;
int err;

- dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+ mib_dev = container_of(ibdev, struct mana_ib_dev, ib_dev);

- err = mana_ib_gd_destroy_mr(dev, mr->mr_handle);
+ err = mana_ib_gd_destroy_mr(mib_dev, mr->mr_handle);
if (err)
return err;

diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
index 4b3b5b274e84..2e3a57123ed7 100644
--- a/drivers/infiniband/hw/mana/qp.c
+++ b/drivers/infiniband/hw/mana/qp.c
@@ -5,7 +5,7 @@

#include "mana_ib.h"

-static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
+static int mana_ib_cfg_vport_steering(struct mana_ib_dev *mib_dev,
struct net_device *ndev,
mana_handle_t default_rxobj,
mana_handle_t ind_table[],
@@ -21,7 +21,7 @@ static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
u32 req_buf_size;
int i, err;

- mdev = dev->gdma_dev;
+ mdev = mib_dev->gdma_dev;
gc = mdev->gdma_context;

req_buf_size =
@@ -55,10 +55,10 @@ static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
* MANA_INDIRECT_TABLE_SIZE entries. Adjust the verb
* ind_table to MANA_INDIRECT_TABLE_SIZE if required
*/
- ibdev_dbg(&dev->ib_dev, "ind table size %u\n", 1 << log_ind_tbl_size);
+ ibdev_dbg(&mib_dev->ib_dev, "ind table size %u\n", 1 << log_ind_tbl_size);
for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
req_indir_tab[i] = ind_table[i % (1 << log_ind_tbl_size)];
- ibdev_dbg(&dev->ib_dev, "index %u handle 0x%llx\n", i,
+ ibdev_dbg(&mib_dev->ib_dev, "index %u handle 0x%llx\n", i,
req_indir_tab[i]);
}

@@ -68,7 +68,7 @@ static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
else
netdev_rss_key_fill(req->hashkey, MANA_HASH_KEY_SIZE);

- ibdev_dbg(&dev->ib_dev, "vport handle %llu default_rxobj 0x%llx\n",
+ ibdev_dbg(&mib_dev->ib_dev, "vport handle %llu default_rxobj 0x%llx\n",
req->vport, default_rxobj);

err = mana_gd_send_request(gc, req_buf_size, req, sizeof(resp), &resp);
@@ -97,12 +97,12 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
struct ib_udata *udata)
{
struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
- struct mana_ib_dev *mdev =
+ struct mana_ib_dev *mib_dev =
container_of(pd->device, struct mana_ib_dev, ib_dev);
struct ib_rwq_ind_table *ind_tbl = attr->rwq_ind_tbl;
struct mana_ib_create_qp_rss_resp resp = {};
struct mana_ib_create_qp_rss ucmd = {};
- struct gdma_dev *gd = mdev->gdma_dev;
+ struct gdma_dev *gd = mib_dev->gdma_dev;
mana_handle_t *mana_ind_table;
struct mana_port_context *mpc;
struct mana_context *mc;
@@ -123,21 +123,21 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,

ret = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
if (ret) {
- ibdev_dbg(&mdev->ib_dev,
+ ibdev_dbg(&mib_dev->ib_dev,
"Failed copy from udata for create rss-qp, err %d\n",
ret);
return ret;
}

if (attr->cap.max_recv_wr > MAX_SEND_BUFFERS_PER_QUEUE) {
- ibdev_dbg(&mdev->ib_dev,
+ ibdev_dbg(&mib_dev->ib_dev,
"Requested max_recv_wr %d exceeding limit\n",
attr->cap.max_recv_wr);
return -EINVAL;
}

if (attr->cap.max_recv_sge > MAX_RX_WQE_SGL_ENTRIES) {
- ibdev_dbg(&mdev->ib_dev,
+ ibdev_dbg(&mib_dev->ib_dev,
"Requested max_recv_sge %d exceeding limit\n",
attr->cap.max_recv_sge);
return -EINVAL;
@@ -145,14 +145,14 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,

ind_tbl_size = 1 << ind_tbl->log_ind_tbl_size;
if (ind_tbl_size > MANA_INDIRECT_TABLE_SIZE) {
- ibdev_dbg(&mdev->ib_dev,
+ ibdev_dbg(&mib_dev->ib_dev,
"Indirect table size %d exceeding limit\n",
ind_tbl_size);
return -EINVAL;
}

if (ucmd.rx_hash_function != MANA_IB_RX_HASH_FUNC_TOEPLITZ) {
- ibdev_dbg(&mdev->ib_dev,
+ ibdev_dbg(&mib_dev->ib_dev,
"RX Hash function is not supported, %d\n",
ucmd.rx_hash_function);
return -EINVAL;
@@ -161,14 +161,14 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
/* IB ports start with 1, MANA start with 0 */
port = ucmd.port;
if (port < 1 || port > mc->num_ports) {
- ibdev_dbg(&mdev->ib_dev, "Invalid port %u in creating qp\n",
+ ibdev_dbg(&mib_dev->ib_dev, "Invalid port %u in creating qp\n",
port);
return -EINVAL;
}
ndev = mc->ports[port - 1];
mpc = netdev_priv(ndev);

- ibdev_dbg(&mdev->ib_dev, "rx_hash_function %d port %d\n",
+ ibdev_dbg(&mib_dev->ib_dev, "rx_hash_function %d port %d\n",
ucmd.rx_hash_function, port);

mana_ind_table = kcalloc(ind_tbl_size, sizeof(mana_handle_t),
@@ -210,7 +210,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
wq->id = wq_spec.queue_index;
cq->id = cq_spec.queue_index;

- ibdev_dbg(&mdev->ib_dev,
+ ibdev_dbg(&mib_dev->ib_dev,
"ret %d rx_object 0x%llx wq id %llu cq id %llu\n",
ret, wq->rx_object, wq->id, cq->id);

@@ -221,7 +221,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
}
resp.num_entries = i;

- ret = mana_ib_cfg_vport_steering(mdev, ndev, wq->rx_object,
+ ret = mana_ib_cfg_vport_steering(mib_dev, ndev, wq->rx_object,
mana_ind_table,
ind_tbl->log_ind_tbl_size,
ucmd.rx_hash_key_len,
@@ -231,7 +231,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,

ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
if (ret) {
- ibdev_dbg(&mdev->ib_dev,
+ ibdev_dbg(&mib_dev->ib_dev,
"Failed to copy to udata create rss-qp, %d\n",
ret);
goto fail;
@@ -259,7 +259,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
{
struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
- struct mana_ib_dev *mdev =
+ struct mana_ib_dev *mib_dev =
container_of(ibpd->device, struct mana_ib_dev, ib_dev);
struct mana_ib_cq *send_cq =
container_of(attr->send_cq, struct mana_ib_cq, ibcq);
@@ -267,7 +267,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
rdma_udata_to_drv_context(udata, struct mana_ib_ucontext,
ibucontext);
struct mana_ib_create_qp_resp resp = {};
- struct gdma_dev *gd = mdev->gdma_dev;
+ struct gdma_dev *gd = mib_dev->gdma_dev;
struct mana_ib_create_qp ucmd = {};
struct mana_obj_spec wq_spec = {};
struct mana_obj_spec cq_spec = {};
@@ -285,7 +285,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,

err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
if (err) {
- ibdev_dbg(&mdev->ib_dev,
+ ibdev_dbg(&mib_dev->ib_dev,
"Failed to copy from udata create qp-raw, %d\n", err);
return err;
}
@@ -296,14 +296,14 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
return -EINVAL;

if (attr->cap.max_send_wr > MAX_SEND_BUFFERS_PER_QUEUE) {
- ibdev_dbg(&mdev->ib_dev,
+ ibdev_dbg(&mib_dev->ib_dev,
"Requested max_send_wr %d exceeding limit\n",
attr->cap.max_send_wr);
return -EINVAL;
}

if (attr->cap.max_send_sge > MAX_TX_WQE_SGL_ENTRIES) {
- ibdev_dbg(&mdev->ib_dev,
+ ibdev_dbg(&mib_dev->ib_dev,
"Requested max_send_sge %d exceeding limit\n",
attr->cap.max_send_sge);
return -EINVAL;
@@ -311,38 +311,38 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,

ndev = mc->ports[port - 1];
mpc = netdev_priv(ndev);
- ibdev_dbg(&mdev->ib_dev, "port %u ndev %p mpc %p\n", port, ndev, mpc);
+ ibdev_dbg(&mib_dev->ib_dev, "port %u ndev %p mpc %p\n", port, ndev, mpc);

- err = mana_ib_cfg_vport(mdev, port - 1, pd, mana_ucontext->doorbell);
+ err = mana_ib_cfg_vport(mib_dev, port - 1, pd, mana_ucontext->doorbell);
if (err)
return -ENODEV;

qp->port = port;

- ibdev_dbg(&mdev->ib_dev, "ucmd sq_buf_addr 0x%llx port %u\n",
+ ibdev_dbg(&mib_dev->ib_dev, "ucmd sq_buf_addr 0x%llx port %u\n",
ucmd.sq_buf_addr, ucmd.port);

umem = ib_umem_get(ibpd->device, ucmd.sq_buf_addr, ucmd.sq_buf_size,
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem)) {
err = PTR_ERR(umem);
- ibdev_dbg(&mdev->ib_dev,
+ ibdev_dbg(&mib_dev->ib_dev,
"Failed to get umem for create qp-raw, err %d\n",
err);
goto err_free_vport;
}
qp->sq_umem = umem;

- err = mana_ib_gd_create_dma_region(mdev, qp->sq_umem,
+ err = mana_ib_gd_create_dma_region(mib_dev, qp->sq_umem,
&qp->sq_gdma_region);
if (err) {
- ibdev_dbg(&mdev->ib_dev,
+ ibdev_dbg(&mib_dev->ib_dev,
"Failed to create dma region for create qp-raw, %d\n",
err);
goto err_release_umem;
}

- ibdev_dbg(&mdev->ib_dev,
+ ibdev_dbg(&mib_dev->ib_dev,
"mana_ib_gd_create_dma_region ret %d gdma_region 0x%llx\n",
err, qp->sq_gdma_region);

@@ -358,7 +358,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
err = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_SQ, &wq_spec,
&cq_spec, &qp->tx_object);
if (err) {
- ibdev_dbg(&mdev->ib_dev,
+ ibdev_dbg(&mib_dev->ib_dev,
"Failed to create wq for create raw-qp, err %d\n",
err);
goto err_destroy_dma_region;
@@ -371,7 +371,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
qp->sq_id = wq_spec.queue_index;
send_cq->id = cq_spec.queue_index;

- ibdev_dbg(&mdev->ib_dev,
+ ibdev_dbg(&mib_dev->ib_dev,
"ret %d qp->tx_object 0x%llx sq id %llu cq id %llu\n", err,
qp->tx_object, qp->sq_id, send_cq->id);

@@ -381,7 +381,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,

err = ib_copy_to_udata(udata, &resp, sizeof(resp));
if (err) {
- ibdev_dbg(&mdev->ib_dev,
+ ibdev_dbg(&mib_dev->ib_dev,
"Failed copy udata for create qp-raw, %d\n",
err);
goto err_destroy_wq_obj;
@@ -393,13 +393,13 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object);

err_destroy_dma_region:
- mana_ib_gd_destroy_dma_region(mdev, qp->sq_gdma_region);
+ mana_ib_gd_destroy_dma_region(mib_dev, qp->sq_gdma_region);

err_release_umem:
ib_umem_release(umem);

err_free_vport:
- mana_ib_uncfg_vport(mdev, pd, port - 1);
+ mana_ib_uncfg_vport(mib_dev, pd, port - 1);

return err;
}
@@ -435,9 +435,9 @@ static int mana_ib_destroy_qp_rss(struct mana_ib_qp *qp,
struct ib_rwq_ind_table *ind_tbl,
struct ib_udata *udata)
{
- struct mana_ib_dev *mdev =
+ struct mana_ib_dev *mib_dev =
container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
- struct gdma_dev *gd = mdev->gdma_dev;
+ struct gdma_dev *gd = mib_dev->gdma_dev;
struct mana_port_context *mpc;
struct mana_context *mc;
struct net_device *ndev;
@@ -452,7 +452,7 @@ static int mana_ib_destroy_qp_rss(struct mana_ib_qp *qp,
for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) {
ibwq = ind_tbl->ind_tbl[i];
wq = container_of(ibwq, struct mana_ib_wq, ibwq);
- ibdev_dbg(&mdev->ib_dev, "destroying wq->rx_object %llu\n",
+ ibdev_dbg(&mib_dev->ib_dev, "destroying wq->rx_object %llu\n",
wq->rx_object);
mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
}
@@ -462,9 +462,9 @@ static int mana_ib_destroy_qp_rss(struct mana_ib_qp *qp,

static int mana_ib_destroy_qp_raw(struct mana_ib_qp *qp, struct ib_udata *udata)
{
- struct mana_ib_dev *mdev =
+ struct mana_ib_dev *mib_dev =
container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
- struct gdma_dev *gd = mdev->gdma_dev;
+ struct gdma_dev *gd = mib_dev->gdma_dev;
struct ib_pd *ibpd = qp->ibqp.pd;
struct mana_port_context *mpc;
struct mana_context *mc;
@@ -479,11 +479,11 @@ static int mana_ib_destroy_qp_raw(struct mana_ib_qp *qp, struct ib_udata *udata)
mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object);

if (qp->sq_umem) {
- mana_ib_gd_destroy_dma_region(mdev, qp->sq_gdma_region);
+ mana_ib_gd_destroy_dma_region(mib_dev, qp->sq_gdma_region);
ib_umem_release(qp->sq_umem);
}

- mana_ib_uncfg_vport(mdev, pd, qp->port - 1);
+ mana_ib_uncfg_vport(mib_dev, pd, qp->port - 1);

return 0;
}
diff --git a/drivers/infiniband/hw/mana/wq.c b/drivers/infiniband/hw/mana/wq.c
index 372d361510e0..56bc2b8b6690 100644
--- a/drivers/infiniband/hw/mana/wq.c
+++ b/drivers/infiniband/hw/mana/wq.c
@@ -9,7 +9,7 @@ struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr,
struct ib_udata *udata)
{
- struct mana_ib_dev *mdev =
+ struct mana_ib_dev *mib_dev =
container_of(pd->device, struct mana_ib_dev, ib_dev);
struct mana_ib_create_wq ucmd = {};
struct mana_ib_wq *wq;
@@ -21,7 +21,7 @@ struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,

err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
if (err) {
- ibdev_dbg(&mdev->ib_dev,
+ ibdev_dbg(&mib_dev->ib_dev,
"Failed to copy from udata for create wq, %d\n", err);
return ERR_PTR(err);
}
@@ -30,13 +30,14 @@ struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
if (!wq)
return ERR_PTR(-ENOMEM);

- ibdev_dbg(&mdev->ib_dev, "ucmd wq_buf_addr 0x%llx\n", ucmd.wq_buf_addr);
+ ibdev_dbg(&mib_dev->ib_dev, "ucmd wq_buf_addr 0x%llx\n",
+ ucmd.wq_buf_addr);

umem = ib_umem_get(pd->device, ucmd.wq_buf_addr, ucmd.wq_buf_size,
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem)) {
err = PTR_ERR(umem);
- ibdev_dbg(&mdev->ib_dev,
+ ibdev_dbg(&mib_dev->ib_dev,
"Failed to get umem for create wq, err %d\n", err);
goto err_free_wq;
}
@@ -46,15 +47,15 @@ struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
wq->wq_buf_size = ucmd.wq_buf_size;
wq->rx_object = INVALID_MANA_HANDLE;

- err = mana_ib_gd_create_dma_region(mdev, wq->umem, &wq->gdma_region);
+ err = mana_ib_gd_create_dma_region(mib_dev, wq->umem, &wq->gdma_region);
if (err) {
- ibdev_dbg(&mdev->ib_dev,
+ ibdev_dbg(&mib_dev->ib_dev,
"Failed to create dma region for create wq, %d\n",
err);
goto err_release_umem;
}

- ibdev_dbg(&mdev->ib_dev,
+ ibdev_dbg(&mib_dev->ib_dev,
"mana_ib_gd_create_dma_region ret %d gdma_region 0x%llx\n",
err, wq->gdma_region);

@@ -82,11 +83,11 @@ int mana_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
{
struct mana_ib_wq *wq = container_of(ibwq, struct mana_ib_wq, ibwq);
struct ib_device *ib_dev = ibwq->device;
- struct mana_ib_dev *mdev;
+ struct mana_ib_dev *mib_dev;

- mdev = container_of(ib_dev, struct mana_ib_dev, ib_dev);
+ mib_dev = container_of(ib_dev, struct mana_ib_dev, ib_dev);

- mana_ib_gd_destroy_dma_region(mdev, wq->gdma_region);
+ mana_ib_gd_destroy_dma_region(mib_dev, wq->gdma_region);
ib_umem_release(wq->umem);

kfree(wq);
--
2.25.1


2023-07-26 20:32:53

by sharmaajay

[permalink] [raw]
Subject: [Patch v3 3/4] RDMA/mana_ib : Create adapter and Add error eq

From: Ajay Sharma <[email protected]>

Create adapter object as nice container for VF resources.
Add error eq needed for adapter creation and later used
for notification from Management SW. The management
software uses this channel to send messages or error
notifications back to the Client.

Signed-off-by: Ajay Sharma <[email protected]>
---
drivers/infiniband/hw/mana/device.c | 22 ++-
drivers/infiniband/hw/mana/main.c | 95 ++++++++++++
drivers/infiniband/hw/mana/mana_ib.h | 33 ++++
.../net/ethernet/microsoft/mana/gdma_main.c | 146 ++++++++++--------
drivers/net/ethernet/microsoft/mana/mana_en.c | 3 +
include/net/mana/gdma.h | 13 +-
6 files changed, 242 insertions(+), 70 deletions(-)

diff --git a/drivers/infiniband/hw/mana/device.c b/drivers/infiniband/hw/mana/device.c
index ea4c8c8fc10d..4077e440657a 100644
--- a/drivers/infiniband/hw/mana/device.c
+++ b/drivers/infiniband/hw/mana/device.c
@@ -68,7 +68,7 @@ static int mana_ib_probe(struct auxiliary_device *adev,
ibdev_dbg(&mib_dev->ib_dev, "mdev=%p id=%d num_ports=%d\n", mdev,
mdev->dev_id.as_uint32, mib_dev->ib_dev.phys_port_cnt);

- mib_dev->gdma_dev = mdev;
+ mib_dev->gc = mdev->gdma_context;
mib_dev->ib_dev.node_type = RDMA_NODE_IB_CA;

/*
@@ -85,15 +85,31 @@ static int mana_ib_probe(struct auxiliary_device *adev,
goto free_ib_device;
}

+ ret = mana_ib_create_error_eq(mib_dev);
+ if (ret) {
+ ibdev_err(&mib_dev->ib_dev, "Failed to allocate err eq");
+ goto deregister_device;
+ }
+
+ ret = mana_ib_create_adapter(mib_dev);
+ if (ret) {
+ ibdev_err(&mib_dev->ib_dev, "Failed to create adapter");
+ goto free_error_eq;
+ }
+
ret = ib_register_device(&mib_dev->ib_dev, "mana_%d",
mdev->gdma_context->dev);
if (ret)
- goto deregister_device;
+ goto destroy_adapter;

dev_set_drvdata(&adev->dev, mib_dev);

return 0;

+destroy_adapter:
+ mana_ib_destroy_adapter(mib_dev);
+free_error_eq:
+ mana_gd_destroy_queue(mib_dev->gc, mib_dev->fatal_err_eq);
deregister_device:
mana_gd_deregister_device(&mib_dev->gc->mana_ib);
free_ib_device:
@@ -105,6 +121,8 @@ static void mana_ib_remove(struct auxiliary_device *adev)
{
struct mana_ib_dev *mib_dev = dev_get_drvdata(&adev->dev);

+ mana_gd_destroy_queue(mib_dev->gc, mib_dev->fatal_err_eq);
+ mana_ib_destroy_adapter(mib_dev);
mana_gd_deregister_device(&mib_dev->gc->mana_ib);
ib_unregister_device(&mib_dev->ib_dev);
ib_dealloc_device(&mib_dev->ib_dev);
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index 2c4e3c496644..1b1a8670d0fa 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -504,3 +504,98 @@ int mana_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
{
}
+
+int mana_ib_destroy_adapter(struct mana_ib_dev *mib_dev)
+{
+ struct mana_ib_destroy_adapter_resp resp = {};
+ struct mana_ib_destroy_adapter_req req = {};
+ struct gdma_context *gc;
+ int err;
+
+ gc = mib_dev->gc;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_ADAPTER, sizeof(req),
+ sizeof(resp));
+ req.adapter = mib_dev->adapter_handle;
+ req.hdr.dev_id = gc->mana_ib.dev_id;
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+
+ if (err) {
+ ibdev_err(&mib_dev->ib_dev, "Failed to destroy adapter err %d", err);
+ return err;
+ }
+
+ return 0;
+}
+
+int mana_ib_create_adapter(struct mana_ib_dev *mib_dev)
+{
+ struct mana_ib_create_adapter_resp resp = {};
+ struct mana_ib_create_adapter_req req = {};
+ struct gdma_context *gc;
+ int err;
+
+ gc = mib_dev->gc;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_ADAPTER, sizeof(req),
+ sizeof(resp));
+ req.notify_eq_id = mib_dev->fatal_err_eq->id;
+ req.hdr.dev_id = gc->mana_ib.dev_id;
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+
+ if (err) {
+ ibdev_err(&mib_dev->ib_dev, "Failed to create adapter err %d",
+ err);
+ return err;
+ }
+
+ mib_dev->adapter_handle = resp.adapter;
+
+ return 0;
+}
+
+static void mana_ib_soc_event_handler(void *ctx, struct gdma_queue *queue,
+ struct gdma_event *event)
+{
+ struct mana_ib_dev *mib_dev = (struct mana_ib_dev *)ctx;
+
+ switch (event->type) {
+ case GDMA_EQE_SOC_EVENT_NOTIFICATION:
+ ibdev_info(&mib_dev->ib_dev, "Received SOC Notification");
+ break;
+ case GDMA_EQE_SOC_EVENT_TEST:
+ ibdev_info(&mib_dev->ib_dev, "Received SoC Test");
+ break;
+ default:
+ ibdev_dbg(&mib_dev->ib_dev, "Received unsolicited evt %d",
+ event->type);
+ }
+}
+
+int mana_ib_create_error_eq(struct mana_ib_dev *mib_dev)
+{
+ struct gdma_queue_spec spec = {};
+ int err;
+
+ spec.type = GDMA_EQ;
+ spec.monitor_avl_buf = false;
+ spec.queue_size = EQ_SIZE;
+ spec.eq.callback = mana_ib_soc_event_handler;
+ spec.eq.context = mib_dev;
+ spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
+ spec.eq.msix_allocated = true;
+ spec.eq.msix_index = 0;
+ spec.doorbell = mib_dev->gc->mana_ib.doorbell;
+ spec.pdid = mib_dev->gc->mana_ib.pdid;
+
+ err = mana_gd_create_mana_eq(&mib_dev->gc->mana_ib, &spec,
+ &mib_dev->fatal_err_eq);
+ if (err)
+ return err;
+
+ mib_dev->fatal_err_eq->eq.disable_needed = true;
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h
index 3a2ba6b96f15..8a652bccd978 100644
--- a/drivers/infiniband/hw/mana/mana_ib.h
+++ b/drivers/infiniband/hw/mana/mana_ib.h
@@ -31,6 +31,8 @@ struct mana_ib_dev {
struct ib_device ib_dev;
struct gdma_dev *gdma_dev;
struct gdma_context *gc;
+ struct gdma_queue *fatal_err_eq;
+ mana_handle_t adapter_handle;
};

struct mana_ib_wq {
@@ -93,6 +95,31 @@ struct mana_ib_rwq_ind_table {
struct ib_rwq_ind_table ib_ind_table;
};

+enum mana_ib_command_code {
+ MANA_IB_CREATE_ADAPTER = 0x30002,
+ MANA_IB_DESTROY_ADAPTER = 0x30003,
+};
+
+struct mana_ib_create_adapter_req {
+ struct gdma_req_hdr hdr;
+ u32 notify_eq_id;
+ u32 reserved;
+}; /*HW Data */
+
+struct mana_ib_create_adapter_resp {
+ struct gdma_resp_hdr hdr;
+ mana_handle_t adapter;
+}; /* HW Data */
+
+struct mana_ib_destroy_adapter_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t adapter;
+}; /*HW Data */
+
+struct mana_ib_destroy_adapter_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW Data */
+
int mana_ib_gd_create_dma_region(struct mana_ib_dev *mib_dev,
struct ib_umem *umem,
mana_handle_t *gdma_region);
@@ -161,4 +188,10 @@ int mana_ib_query_gid(struct ib_device *ibdev, u32 port, int index,

void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext);

+int mana_ib_create_error_eq(struct mana_ib_dev *mib_dev);
+
+int mana_ib_create_adapter(struct mana_ib_dev *mib_dev);
+
+int mana_ib_destroy_adapter(struct mana_ib_dev *mib_dev);
+
#endif
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 9fa7a2d6c2b2..55e194c9d84e 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -185,7 +185,8 @@ void mana_gd_free_memory(struct gdma_mem_info *gmi)
}

static int mana_gd_create_hw_eq(struct gdma_context *gc,
- struct gdma_queue *queue)
+ struct gdma_queue *queue,
+ u32 doorbell, u32 pdid)
{
struct gdma_create_queue_resp resp = {};
struct gdma_create_queue_req req = {};
@@ -199,8 +200,8 @@ static int mana_gd_create_hw_eq(struct gdma_context *gc,

req.hdr.dev_id = queue->gdma_dev->dev_id;
req.type = queue->type;
- req.pdid = queue->gdma_dev->pdid;
- req.doolbell_id = queue->gdma_dev->doorbell;
+ req.pdid = pdid;
+ req.doolbell_id = doorbell;
req.gdma_region = queue->mem_info.dma_region_handle;
req.queue_size = queue->queue_size;
req.log2_throttle_limit = queue->eq.log2_throttle_limit;
@@ -371,53 +372,51 @@ static void mana_gd_process_eqe(struct gdma_queue *eq)
}
}

-static void mana_gd_process_eq_events(void *arg)
+static void mana_gd_process_eq_events(struct list_head *eq_list)
{
u32 owner_bits, new_bits, old_bits;
union gdma_eqe_info eqe_info;
struct gdma_eqe *eq_eqe_ptr;
- struct gdma_queue *eq = arg;
+ struct gdma_queue *eq;
struct gdma_context *gc;
struct gdma_eqe *eqe;
u32 head, num_eqe;
int i;

- gc = eq->gdma_dev->gdma_context;
-
- num_eqe = eq->queue_size / GDMA_EQE_SIZE;
- eq_eqe_ptr = eq->queue_mem_ptr;
-
- /* Process up to 5 EQEs at a time, and update the HW head. */
- for (i = 0; i < 5; i++) {
- eqe = &eq_eqe_ptr[eq->head % num_eqe];
- eqe_info.as_uint32 = eqe->eqe_info;
- owner_bits = eqe_info.owner_bits;
-
- old_bits = (eq->head / num_eqe - 1) & GDMA_EQE_OWNER_MASK;
- /* No more entries */
- if (owner_bits == old_bits)
- break;
-
- new_bits = (eq->head / num_eqe) & GDMA_EQE_OWNER_MASK;
- if (owner_bits != new_bits) {
- dev_err(gc->dev, "EQ %d: overflow detected\n", eq->id);
- break;
+ list_for_each_entry_rcu(eq, eq_list, entry) {
+ gc = eq->gdma_dev->gdma_context;
+
+ num_eqe = eq->queue_size / GDMA_EQE_SIZE;
+ eq_eqe_ptr = eq->queue_mem_ptr;
+ /* Process up to 5 EQEs at a time, and update the HW head. */
+ for (i = 0; i < 5; i++) {
+ eqe = &eq_eqe_ptr[eq->head % num_eqe];
+ eqe_info.as_uint32 = eqe->eqe_info;
+ owner_bits = eqe_info.owner_bits;
+
+ old_bits = (eq->head / num_eqe - 1) & GDMA_EQE_OWNER_MASK;
+ /* No more entries */
+ if (owner_bits == old_bits)
+ break;
+
+ new_bits = (eq->head / num_eqe) & GDMA_EQE_OWNER_MASK;
+ if (owner_bits != new_bits) {
+ dev_err(gc->dev, "EQ %d: overflow detected\n",
+ eq->id);
+ break;
+ }
+ /* Per GDMA spec, rmb is necessary after checking owner_bits, before
+ * reading eqe.
+ */
+ rmb();
+ mana_gd_process_eqe(eq);
+ eq->head++;
}

- /* Per GDMA spec, rmb is necessary after checking owner_bits, before
- * reading eqe.
- */
- rmb();
-
- mana_gd_process_eqe(eq);
-
- eq->head++;
+ head = eq->head % (num_eqe << GDMA_EQE_OWNER_BITS);
+ mana_gd_ring_doorbell(gc, eq->gdma_dev->doorbell, eq->type,
+ eq->id, head, SET_ARM_BIT);
}
-
- head = eq->head % (num_eqe << GDMA_EQE_OWNER_BITS);
-
- mana_gd_ring_doorbell(gc, eq->gdma_dev->doorbell, eq->type, eq->id,
- head, SET_ARM_BIT);
}

static int mana_gd_register_irq(struct gdma_queue *queue,
@@ -435,44 +434,47 @@ static int mana_gd_register_irq(struct gdma_queue *queue,
gc = gd->gdma_context;
r = &gc->msix_resource;
dev = gc->dev;
+ msi_index = spec->eq.msix_index;

spin_lock_irqsave(&r->lock, flags);

- msi_index = find_first_zero_bit(r->map, r->size);
- if (msi_index >= r->size || msi_index >= gc->num_msix_usable) {
- err = -ENOSPC;
- } else {
- bitmap_set(r->map, msi_index, 1);
- queue->eq.msix_index = msi_index;
- }
-
- spin_unlock_irqrestore(&r->lock, flags);
+ if (!spec->eq.msix_allocated) {
+ msi_index = find_first_zero_bit(r->map, r->size);

- if (err) {
- dev_err(dev, "Register IRQ err:%d, msi:%u rsize:%u, nMSI:%u",
- err, msi_index, r->size, gc->num_msix_usable);
+ if (msi_index >= r->size ||
+ msi_index >= gc->num_msix_usable)
+ err = -ENOSPC;
+ else
+ bitmap_set(r->map, msi_index, 1);

- return err;
+ if (err) {
+ dev_err(dev, "Register IRQ err:%d, msi:%u rsize:%u, nMSI:%u",
+ err, msi_index, r->size, gc->num_msix_usable);
+ goto out;
+ }
}

+ queue->eq.msix_index = msi_index;
gic = &gc->irq_contexts[msi_index];

- WARN_ON(gic->handler || gic->arg);
-
- gic->arg = queue;
+ list_add_rcu(&queue->entry, &gic->eq_list);

gic->handler = mana_gd_process_eq_events;

- return 0;
+out:
+ spin_unlock_irqrestore(&r->lock, flags);
+ return err;
}

-static void mana_gd_deregiser_irq(struct gdma_queue *queue)
+static void mana_gd_deregister_irq(struct gdma_queue *queue)
{
struct gdma_dev *gd = queue->gdma_dev;
struct gdma_irq_context *gic;
struct gdma_context *gc;
struct gdma_resource *r;
unsigned int msix_index;
+ struct list_head *p, *n;
+ struct gdma_queue *eq;
unsigned long flags;

gc = gd->gdma_context;
@@ -483,14 +485,23 @@ static void mana_gd_deregiser_irq(struct gdma_queue *queue)
if (WARN_ON(msix_index >= gc->num_msix_usable))
return;

+ spin_lock_irqsave(&r->lock, flags);
+
gic = &gc->irq_contexts[msix_index];
- gic->handler = NULL;
- gic->arg = NULL;
+ list_for_each_safe(p, n, &gic->eq_list) {
+ eq = list_entry(p, struct gdma_queue, entry);
+ if (queue == eq) {
+ list_del(&eq->entry);
+ break;
+ }
+ }

- spin_lock_irqsave(&r->lock, flags);
- bitmap_clear(r->map, msix_index, 1);
- spin_unlock_irqrestore(&r->lock, flags);
+ if (list_empty(&gic->eq_list)) {
+ gic->handler = NULL;
+ bitmap_clear(r->map, msix_index, 1);
+ }

+ spin_unlock_irqrestore(&r->lock, flags);
queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
}

@@ -553,7 +564,7 @@ static void mana_gd_destroy_eq(struct gdma_context *gc, bool flush_evenets,
dev_warn(gc->dev, "Failed to flush EQ: %d\n", err);
}

- mana_gd_deregiser_irq(queue);
+ mana_gd_deregister_irq(queue);

if (queue->eq.disable_needed)
mana_gd_disable_queue(queue);
@@ -568,7 +579,7 @@ static int mana_gd_create_eq(struct gdma_dev *gd,
u32 log2_num_entries;
int err;

- queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
+ queue->eq.msix_index = spec->eq.msix_index;

log2_num_entries = ilog2(queue->queue_size / GDMA_EQE_SIZE);

@@ -590,7 +601,8 @@ static int mana_gd_create_eq(struct gdma_dev *gd,
queue->eq.log2_throttle_limit = spec->eq.log2_throttle_limit ?: 1;

if (create_hwq) {
- err = mana_gd_create_hw_eq(gc, queue);
+ err = mana_gd_create_hw_eq(gc, queue,
+ spec->doorbell, spec->pdid);
if (err)
goto out;

@@ -800,6 +812,7 @@ int mana_gd_create_mana_eq(struct gdma_dev *gd,
kfree(queue);
return err;
}
+EXPORT_SYMBOL(mana_gd_create_mana_eq);

int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
const struct gdma_queue_spec *spec,
@@ -876,6 +889,7 @@ void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
mana_gd_free_memory(gmi);
kfree(queue);
}
+EXPORT_SYMBOL(mana_gd_destroy_queue);

int mana_gd_verify_vf_version(struct pci_dev *pdev)
{
@@ -1193,7 +1207,7 @@ static irqreturn_t mana_gd_intr(int irq, void *arg)
struct gdma_irq_context *gic = arg;

if (gic->handler)
- gic->handler(gic->arg);
+ gic->handler(&gic->eq_list);

return IRQ_HANDLED;
}
@@ -1246,7 +1260,7 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
for (i = 0; i < nvec; i++) {
gic = &gc->irq_contexts[i];
gic->handler = NULL;
- gic->arg = NULL;
+ INIT_LIST_HEAD(&gic->eq_list);

if (!i)
snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_hwc@pci:%s",
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index a499e460594b..d2ba7de8b512 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -1167,6 +1167,9 @@ static int mana_create_eq(struct mana_context *ac)
spec.eq.callback = NULL;
spec.eq.context = ac->eqs;
spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
+ spec.eq.msix_allocated = false;
+ spec.doorbell = gd->doorbell;
+ spec.pdid = gd->pdid;

for (i = 0; i < gc->max_num_queues; i++) {
err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h
index e2b212dd722b..aee8e8fa1ea6 100644
--- a/include/net/mana/gdma.h
+++ b/include/net/mana/gdma.h
@@ -57,6 +57,10 @@ enum gdma_eqe_type {
GDMA_EQE_HWC_INIT_EQ_ID_DB = 129,
GDMA_EQE_HWC_INIT_DATA = 130,
GDMA_EQE_HWC_INIT_DONE = 131,
+
+ /* IB NiC Events start at 176*/
+ GDMA_EQE_SOC_EVENT_NOTIFICATION = 176,
+ GDMA_EQE_SOC_EVENT_TEST,
};

enum {
@@ -291,6 +295,7 @@ struct gdma_queue {

u32 head;
u32 tail;
+ struct list_head entry;

/* Extra fields specific to EQ/CQ. */
union {
@@ -318,6 +323,8 @@ struct gdma_queue_spec {
enum gdma_queue_type type;
bool monitor_avl_buf;
unsigned int queue_size;
+ u32 doorbell;
+ u32 pdid;

/* Extra fields specific to EQ/CQ. */
union {
@@ -326,6 +333,8 @@ struct gdma_queue_spec {
void *context;

unsigned long log2_throttle_limit;
+ bool msix_allocated;
+ unsigned int msix_index;
} eq;

struct {
@@ -341,8 +350,8 @@ struct gdma_queue_spec {
#define MANA_IRQ_NAME_SZ 32

struct gdma_irq_context {
- void (*handler)(void *arg);
- void *arg;
+ void (*handler)(struct list_head *arg);
+ struct list_head eq_list;
char name[MANA_IRQ_NAME_SZ];
};

--
2.25.1


2023-07-26 20:41:59

by sharmaajay

[permalink] [raw]
Subject: [Patch v3 4/4] RDMA/mana_ib : Query adapter capabilities

From: Ajay Sharma <[email protected]>

Query the adapter capabilities to expose to
other clients and VF. This checks against
the user supplied values and protects against
overflows.

Signed-off-by: Ajay Sharma <[email protected]>
---
drivers/infiniband/hw/mana/device.c | 4 ++
drivers/infiniband/hw/mana/main.c | 66 +++++++++++++++++++++++++---
drivers/infiniband/hw/mana/mana_ib.h | 53 +++++++++++++++++++++-
3 files changed, 115 insertions(+), 8 deletions(-)

diff --git a/drivers/infiniband/hw/mana/device.c b/drivers/infiniband/hw/mana/device.c
index 4077e440657a..e15da43c73a0 100644
--- a/drivers/infiniband/hw/mana/device.c
+++ b/drivers/infiniband/hw/mana/device.c
@@ -97,6 +97,10 @@ static int mana_ib_probe(struct auxiliary_device *adev,
goto free_error_eq;
}

+ ret = mana_ib_query_adapter_caps(mib_dev);
+ if (ret)
+ ibdev_dbg(&mib_dev->ib_dev, "Failed to get caps, use defaults");
+
ret = ib_register_device(&mib_dev->ib_dev, "mana_%d",
mdev->gdma_context->dev);
if (ret)
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index 1b1a8670d0fa..512815e1e64d 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -469,21 +469,27 @@ int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num,
int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
struct ib_udata *uhw)
{
+ struct mana_ib_dev *mib_dev = container_of(ibdev,
+ struct mana_ib_dev, ib_dev);
+
props->max_qp = MANA_MAX_NUM_QUEUES;
props->max_qp_wr = MAX_SEND_BUFFERS_PER_QUEUE;
-
- /*
- * max_cqe could be potentially much bigger.
- * As this version of driver only support RAW QP, set it to the same
- * value as max_qp_wr
- */
props->max_cqe = MAX_SEND_BUFFERS_PER_QUEUE;
-
props->max_mr_size = MANA_IB_MAX_MR_SIZE;
props->max_mr = MANA_IB_MAX_MR;
props->max_send_sge = MAX_TX_WQE_SGL_ENTRIES;
props->max_recv_sge = MAX_RX_WQE_SGL_ENTRIES;

+ /* If the Management SW is updated and supports adapter creation */
+ if (mib_dev->adapter_handle) {
+ props->max_qp = mib_dev->adapter_caps.max_qp_count;
+ props->max_qp_wr = mib_dev->adapter_caps.max_requester_sq_size;
+ props->max_cqe = mib_dev->adapter_caps.max_requester_sq_size;
+ props->max_mr = mib_dev->adapter_caps.max_mr_count;
+ props->max_send_sge = mib_dev->adapter_caps.max_send_wqe_size;
+ props->max_recv_sge = mib_dev->adapter_caps.max_recv_wqe_size;
+ }
+
return 0;
}

@@ -599,3 +605,49 @@ int mana_ib_create_error_eq(struct mana_ib_dev *mib_dev)

return 0;
}
+
+static void assign_caps(struct mana_ib_adapter_caps *caps,
+ struct mana_ib_query_adapter_caps_resp *resp)
+{
+ caps->max_sq_id = resp->max_sq_id;
+ caps->max_rq_id = resp->max_rq_id;
+ caps->max_cq_id = resp->max_cq_id;
+ caps->max_qp_count = resp->max_qp_count;
+ caps->max_cq_count = resp->max_cq_count;
+ caps->max_mr_count = resp->max_mr_count;
+ caps->max_pd_count = resp->max_pd_count;
+ caps->max_inbound_read_limit = resp->max_inbound_read_limit;
+ caps->max_outbound_read_limit = resp->max_outbound_read_limit;
+ caps->mw_count = resp->mw_count;
+ caps->max_srq_count = resp->max_srq_count;
+ caps->max_requester_sq_size = resp->max_requester_sq_size;
+ caps->max_responder_sq_size = resp->max_responder_sq_size;
+ caps->max_requester_rq_size = resp->max_requester_rq_size;
+ caps->max_responder_rq_size = resp->max_responder_rq_size;
+ caps->max_send_wqe_size = resp->max_send_wqe_size;
+ caps->max_recv_wqe_size = resp->max_recv_wqe_size;
+ caps->max_inline_data_size = resp->max_inline_data_size;
+}
+
+int mana_ib_query_adapter_caps(struct mana_ib_dev *mib_dev)
+{
+ struct mana_ib_query_adapter_caps_resp resp = {};
+ struct mana_ib_query_adapter_caps_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_IB_GET_ADAPTER_CAP, sizeof(req),
+ sizeof(resp));
+ req.hdr.resp.msg_version = MANA_IB__GET_ADAPTER_CAP_RESPONSE_V3;
+ req.hdr.dev_id = mib_dev->gc->mana_ib.dev_id;
+
+ err = mana_gd_send_request(mib_dev->gc, sizeof(req), &req,
+ sizeof(resp), &resp);
+
+ if (err) {
+ ibdev_err(&mib_dev->ib_dev, "Failed to query adapter caps err %d", err);
+ return err;
+ }
+
+ assign_caps(&mib_dev->adapter_caps, &resp);
+ return 0;
+}
diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h
index 8a652bccd978..1044358230d3 100644
--- a/drivers/infiniband/hw/mana/mana_ib.h
+++ b/drivers/infiniband/hw/mana/mana_ib.h
@@ -20,19 +20,41 @@

/* MANA doesn't have any limit for MR size */
#define MANA_IB_MAX_MR_SIZE U64_MAX
-
+#define MANA_IB__GET_ADAPTER_CAP_RESPONSE_V3 3
/*
* The hardware limit of number of MRs is greater than maximum number of MRs
* that can possibly represent in 24 bits
*/
#define MANA_IB_MAX_MR 0xFFFFFFu

+struct mana_ib_adapter_caps {
+ u32 max_sq_id;
+ u32 max_rq_id;
+ u32 max_cq_id;
+ u32 max_qp_count;
+ u32 max_cq_count;
+ u32 max_mr_count;
+ u32 max_pd_count;
+ u32 max_inbound_read_limit;
+ u32 max_outbound_read_limit;
+ u32 mw_count;
+ u32 max_srq_count;
+ u32 max_requester_sq_size;
+ u32 max_responder_sq_size;
+ u32 max_requester_rq_size;
+ u32 max_responder_rq_size;
+ u32 max_send_wqe_size;
+ u32 max_recv_wqe_size;
+ u32 max_inline_data_size;
+};
+
struct mana_ib_dev {
struct ib_device ib_dev;
struct gdma_dev *gdma_dev;
struct gdma_context *gc;
struct gdma_queue *fatal_err_eq;
mana_handle_t adapter_handle;
+ struct mana_ib_adapter_caps adapter_caps;
};

struct mana_ib_wq {
@@ -96,6 +118,7 @@ struct mana_ib_rwq_ind_table {
};

enum mana_ib_command_code {
+ MANA_IB_GET_ADAPTER_CAP = 0x30001,
MANA_IB_CREATE_ADAPTER = 0x30002,
MANA_IB_DESTROY_ADAPTER = 0x30003,
};
@@ -120,6 +143,32 @@ struct mana_ib_destroy_adapter_resp {
struct gdma_resp_hdr hdr;
}; /* HW Data */

+struct mana_ib_query_adapter_caps_req {
+ struct gdma_req_hdr hdr;
+}; /*HW Data */
+
+struct mana_ib_query_adapter_caps_resp {
+ struct gdma_resp_hdr hdr;
+ u32 max_sq_id;
+ u32 max_rq_id;
+ u32 max_cq_id;
+ u32 max_qp_count;
+ u32 max_cq_count;
+ u32 max_mr_count;
+ u32 max_pd_count;
+ u32 max_inbound_read_limit;
+ u32 max_outbound_read_limit;
+ u32 mw_count;
+ u32 max_srq_count;
+ u32 max_requester_sq_size;
+ u32 max_responder_sq_size;
+ u32 max_requester_rq_size;
+ u32 max_responder_rq_size;
+ u32 max_send_wqe_size;
+ u32 max_recv_wqe_size;
+ u32 max_inline_data_size;
+}; /* HW Data */
+
int mana_ib_gd_create_dma_region(struct mana_ib_dev *mib_dev,
struct ib_umem *umem,
mana_handle_t *gdma_region);
@@ -194,4 +243,6 @@ int mana_ib_create_adapter(struct mana_ib_dev *mib_dev);

int mana_ib_destroy_adapter(struct mana_ib_dev *mib_dev);

+int mana_ib_query_adapter_caps(struct mana_ib_dev *mib_dev);
+
#endif
--
2.25.1


2023-07-28 03:22:27

by Ajay Sharma

[permalink] [raw]
Subject: RE: [EXTERNAL] [Patch v3 0/4] RDMA/mana_ib Read Capabilities

+Long

> -----Original Message-----
> From: [email protected] <[email protected]>
> Sent: Wednesday, July 26, 2023 3:08 PM
> To: Jason Gunthorpe <[email protected]>; Leon Romanovsky <[email protected]>;
> Dexuan Cui <[email protected]>; Wei Liu <[email protected]>; David S.
> Miller <[email protected]>; Eric Dumazet <[email protected]>;
> Jakub Kicinski <[email protected]>; Paolo Abeni <[email protected]>
> Cc: [email protected]; [email protected];
> [email protected]; [email protected]; Ajay Sharma
> <[email protected]>
> Subject: [EXTERNAL] [Patch v3 0/4] RDMA/mana_ib Read Capabilities
>
> From: Ajay Sharma <[email protected]>
>
> This patch series introduces some cleanup changes and resource control
> changes. The mana and mana_ib devices are used at common places so a
> consistent naming is introduced. Adapter object container to have a common
> point of object release for resources and query the management software to
> prevent resource overflow.
> It also introduces async channel for management to notify the clients in case of
> errors/info.
>
> Ajay Sharma (4):
> RDMA/mana_ib : Rename all mana_ib_dev type variables to mib_dev
> RDMA/mana_ib : Register Mana IB device with Management SW
> RDMA/mana_ib : Create adapter and Add error eq
> RDMA/mana_ib : Query adapter capabilities
>
> drivers/infiniband/hw/mana/cq.c | 12 +-
> drivers/infiniband/hw/mana/device.c | 72 +++--
> drivers/infiniband/hw/mana/main.c | 282 +++++++++++++-----
> drivers/infiniband/hw/mana/mana_ib.h | 96 +++++-
> drivers/infiniband/hw/mana/mr.c | 42 ++-
> drivers/infiniband/hw/mana/qp.c | 82 ++---
> drivers/infiniband/hw/mana/wq.c | 21 +-
> .../net/ethernet/microsoft/mana/gdma_main.c | 151 ++++++----
> drivers/net/ethernet/microsoft/mana/mana_en.c | 3 +
> include/net/mana/gdma.h | 16 +-
> 10 files changed, 529 insertions(+), 248 deletions(-)
>
> --
> 2.25.1


2023-07-28 03:42:00

by Ajay Sharma

[permalink] [raw]
Subject: RE: [EXTERNAL] [Patch v3 3/4] RDMA/mana_ib : Create adapter and Add error eq

+Long

> -----Original Message-----
> From: [email protected] <[email protected]>
> Sent: Wednesday, July 26, 2023 3:08 PM
> To: Jason Gunthorpe <[email protected]>; Leon Romanovsky <[email protected]>;
> Dexuan Cui <[email protected]>; Wei Liu <[email protected]>; David S.
> Miller <[email protected]>; Eric Dumazet <[email protected]>;
> Jakub Kicinski <[email protected]>; Paolo Abeni <[email protected]>
> Cc: [email protected]; [email protected];
> [email protected]; [email protected]; Ajay Sharma
> <[email protected]>
> Subject: [EXTERNAL] [Patch v3 3/4] RDMA/mana_ib : Create adapter and Add
> error eq
>
> From: Ajay Sharma <[email protected]>
>
> Create adapter object as nice container for VF resources.
> Add error eq needed for adapter creation and later used for notification from
> Management SW. The management software uses this channel to send
> messages or error notifications back to the Client.
>
> Signed-off-by: Ajay Sharma <[email protected]>
> ---
> drivers/infiniband/hw/mana/device.c | 22 ++-
> drivers/infiniband/hw/mana/main.c | 95 ++++++++++++
> drivers/infiniband/hw/mana/mana_ib.h | 33 ++++
> .../net/ethernet/microsoft/mana/gdma_main.c | 146 ++++++++++--------
> drivers/net/ethernet/microsoft/mana/mana_en.c | 3 +
> include/net/mana/gdma.h | 13 +-
> 6 files changed, 242 insertions(+), 70 deletions(-)
>
> diff --git a/drivers/infiniband/hw/mana/device.c
> b/drivers/infiniband/hw/mana/device.c
> index ea4c8c8fc10d..4077e440657a 100644
> --- a/drivers/infiniband/hw/mana/device.c
> +++ b/drivers/infiniband/hw/mana/device.c
> @@ -68,7 +68,7 @@ static int mana_ib_probe(struct auxiliary_device *adev,
> ibdev_dbg(&mib_dev->ib_dev, "mdev=%p id=%d num_ports=%d\n",
> mdev,
> mdev->dev_id.as_uint32, mib_dev->ib_dev.phys_port_cnt);
>
> - mib_dev->gdma_dev = mdev;
> + mib_dev->gc = mdev->gdma_context;
> mib_dev->ib_dev.node_type = RDMA_NODE_IB_CA;
>
> /*
> @@ -85,15 +85,31 @@ static int mana_ib_probe(struct auxiliary_device
> *adev,
> goto free_ib_device;
> }
>
> + ret = mana_ib_create_error_eq(mib_dev);
> + if (ret) {
> + ibdev_err(&mib_dev->ib_dev, "Failed to allocate err eq");
> + goto deregister_device;
> + }
> +
> + ret = mana_ib_create_adapter(mib_dev);
> + if (ret) {
> + ibdev_err(&mib_dev->ib_dev, "Failed to create adapter");
> + goto free_error_eq;
> + }
> +
> ret = ib_register_device(&mib_dev->ib_dev, "mana_%d",
> mdev->gdma_context->dev);
> if (ret)
> - goto deregister_device;
> + goto destroy_adapter;
>
> dev_set_drvdata(&adev->dev, mib_dev);
>
> return 0;
>
> +destroy_adapter:
> + mana_ib_destroy_adapter(mib_dev);
> +free_error_eq:
> + mana_gd_destroy_queue(mib_dev->gc, mib_dev->fatal_err_eq);
> deregister_device:
> mana_gd_deregister_device(&mib_dev->gc->mana_ib);
> free_ib_device:
> @@ -105,6 +121,8 @@ static void mana_ib_remove(struct auxiliary_device
> *adev) {
> struct mana_ib_dev *mib_dev = dev_get_drvdata(&adev->dev);
>
> + mana_gd_destroy_queue(mib_dev->gc, mib_dev->fatal_err_eq);
> + mana_ib_destroy_adapter(mib_dev);
> mana_gd_deregister_device(&mib_dev->gc->mana_ib);
> ib_unregister_device(&mib_dev->ib_dev);
> ib_dealloc_device(&mib_dev->ib_dev);
> diff --git a/drivers/infiniband/hw/mana/main.c
> b/drivers/infiniband/hw/mana/main.c
> index 2c4e3c496644..1b1a8670d0fa 100644
> --- a/drivers/infiniband/hw/mana/main.c
> +++ b/drivers/infiniband/hw/mana/main.c
> @@ -504,3 +504,98 @@ int mana_ib_query_gid(struct ib_device *ibdev, u32
> port, int index, void mana_ib_disassociate_ucontext(struct ib_ucontext
> *ibcontext) { }
> +
> +int mana_ib_destroy_adapter(struct mana_ib_dev *mib_dev) {
> + struct mana_ib_destroy_adapter_resp resp = {};
> + struct mana_ib_destroy_adapter_req req = {};
> + struct gdma_context *gc;
> + int err;
> +
> + gc = mib_dev->gc;
> +
> + mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_ADAPTER,
> sizeof(req),
> + sizeof(resp));
> + req.adapter = mib_dev->adapter_handle;
> + req.hdr.dev_id = gc->mana_ib.dev_id;
> +
> + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp),
> +&resp);
> +
> + if (err) {
> + ibdev_err(&mib_dev->ib_dev, "Failed to destroy adapter err
> %d", err);
> + return err;
> + }
> +
> + return 0;
> +}
> +
> +int mana_ib_create_adapter(struct mana_ib_dev *mib_dev) {
> + struct mana_ib_create_adapter_resp resp = {};
> + struct mana_ib_create_adapter_req req = {};
> + struct gdma_context *gc;
> + int err;
> +
> + gc = mib_dev->gc;
> +
> + mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_ADAPTER,
> sizeof(req),
> + sizeof(resp));
> + req.notify_eq_id = mib_dev->fatal_err_eq->id;
> + req.hdr.dev_id = gc->mana_ib.dev_id;
> +
> + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp),
> +&resp);
> +
> + if (err) {
> + ibdev_err(&mib_dev->ib_dev, "Failed to create adapter err
> %d",
> + err);
> + return err;
> + }
> +
> + mib_dev->adapter_handle = resp.adapter;
> +
> + return 0;
> +}
> +
> +static void mana_ib_soc_event_handler(void *ctx, struct gdma_queue
> *queue,
> + struct gdma_event *event)
> +{
> + struct mana_ib_dev *mib_dev = (struct mana_ib_dev *)ctx;
> +
> + switch (event->type) {
> + case GDMA_EQE_SOC_EVENT_NOTIFICATION:
> + ibdev_info(&mib_dev->ib_dev, "Received SOC Notification");
> + break;
> + case GDMA_EQE_SOC_EVENT_TEST:
> + ibdev_info(&mib_dev->ib_dev, "Received SoC Test");
> + break;
> + default:
> + ibdev_dbg(&mib_dev->ib_dev, "Received unsolicited evt %d",
> + event->type);
> + }
> +}
> +
> +int mana_ib_create_error_eq(struct mana_ib_dev *mib_dev) {
> + struct gdma_queue_spec spec = {};
> + int err;
> +
> + spec.type = GDMA_EQ;
> + spec.monitor_avl_buf = false;
> + spec.queue_size = EQ_SIZE;
> + spec.eq.callback = mana_ib_soc_event_handler;
> + spec.eq.context = mib_dev;
> + spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
> + spec.eq.msix_allocated = true;
> + spec.eq.msix_index = 0;
> + spec.doorbell = mib_dev->gc->mana_ib.doorbell;
> + spec.pdid = mib_dev->gc->mana_ib.pdid;
> +
> + err = mana_gd_create_mana_eq(&mib_dev->gc->mana_ib, &spec,
> + &mib_dev->fatal_err_eq);
> + if (err)
> + return err;
> +
> + mib_dev->fatal_err_eq->eq.disable_needed = true;
> +
> + return 0;
> +}
> diff --git a/drivers/infiniband/hw/mana/mana_ib.h
> b/drivers/infiniband/hw/mana/mana_ib.h
> index 3a2ba6b96f15..8a652bccd978 100644
> --- a/drivers/infiniband/hw/mana/mana_ib.h
> +++ b/drivers/infiniband/hw/mana/mana_ib.h
> @@ -31,6 +31,8 @@ struct mana_ib_dev {
> struct ib_device ib_dev;
> struct gdma_dev *gdma_dev;
> struct gdma_context *gc;
> + struct gdma_queue *fatal_err_eq;
> + mana_handle_t adapter_handle;
> };
>
> struct mana_ib_wq {
> @@ -93,6 +95,31 @@ struct mana_ib_rwq_ind_table {
> struct ib_rwq_ind_table ib_ind_table;
> };
>
> +enum mana_ib_command_code {
> + MANA_IB_CREATE_ADAPTER = 0x30002,
> + MANA_IB_DESTROY_ADAPTER = 0x30003,
> +};
> +
> +struct mana_ib_create_adapter_req {
> + struct gdma_req_hdr hdr;
> + u32 notify_eq_id;
> + u32 reserved;
> +}; /*HW Data */
> +
> +struct mana_ib_create_adapter_resp {
> + struct gdma_resp_hdr hdr;
> + mana_handle_t adapter;
> +}; /* HW Data */
> +
> +struct mana_ib_destroy_adapter_req {
> + struct gdma_req_hdr hdr;
> + mana_handle_t adapter;
> +}; /*HW Data */
> +
> +struct mana_ib_destroy_adapter_resp {
> + struct gdma_resp_hdr hdr;
> +}; /* HW Data */
> +
> int mana_ib_gd_create_dma_region(struct mana_ib_dev *mib_dev,
> struct ib_umem *umem,
> mana_handle_t *gdma_region);
> @@ -161,4 +188,10 @@ int mana_ib_query_gid(struct ib_device *ibdev, u32
> port, int index,
>
> void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext);
>
> +int mana_ib_create_error_eq(struct mana_ib_dev *mib_dev);
> +
> +int mana_ib_create_adapter(struct mana_ib_dev *mib_dev);
> +
> +int mana_ib_destroy_adapter(struct mana_ib_dev *mib_dev);
> +
> #endif
> diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c
> b/drivers/net/ethernet/microsoft/mana/gdma_main.c
> index 9fa7a2d6c2b2..55e194c9d84e 100644
> --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
> +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
> @@ -185,7 +185,8 @@ void mana_gd_free_memory(struct gdma_mem_info
> *gmi) }
>
> static int mana_gd_create_hw_eq(struct gdma_context *gc,
> - struct gdma_queue *queue)
> + struct gdma_queue *queue,
> + u32 doorbell, u32 pdid)
> {
> struct gdma_create_queue_resp resp = {};
> struct gdma_create_queue_req req = {}; @@ -199,8 +200,8 @@ static
> int mana_gd_create_hw_eq(struct gdma_context *gc,
>
> req.hdr.dev_id = queue->gdma_dev->dev_id;
> req.type = queue->type;
> - req.pdid = queue->gdma_dev->pdid;
> - req.doolbell_id = queue->gdma_dev->doorbell;
> + req.pdid = pdid;
> + req.doolbell_id = doorbell;
> req.gdma_region = queue->mem_info.dma_region_handle;
> req.queue_size = queue->queue_size;
> req.log2_throttle_limit = queue->eq.log2_throttle_limit; @@ -371,53
> +372,51 @@ static void mana_gd_process_eqe(struct gdma_queue *eq)
> }
> }
>
> -static void mana_gd_process_eq_events(void *arg)
> +static void mana_gd_process_eq_events(struct list_head *eq_list)
> {
> u32 owner_bits, new_bits, old_bits;
> union gdma_eqe_info eqe_info;
> struct gdma_eqe *eq_eqe_ptr;
> - struct gdma_queue *eq = arg;
> + struct gdma_queue *eq;
> struct gdma_context *gc;
> struct gdma_eqe *eqe;
> u32 head, num_eqe;
> int i;
>
> - gc = eq->gdma_dev->gdma_context;
> -
> - num_eqe = eq->queue_size / GDMA_EQE_SIZE;
> - eq_eqe_ptr = eq->queue_mem_ptr;
> -
> - /* Process up to 5 EQEs at a time, and update the HW head. */
> - for (i = 0; i < 5; i++) {
> - eqe = &eq_eqe_ptr[eq->head % num_eqe];
> - eqe_info.as_uint32 = eqe->eqe_info;
> - owner_bits = eqe_info.owner_bits;
> -
> - old_bits = (eq->head / num_eqe - 1) &
> GDMA_EQE_OWNER_MASK;
> - /* No more entries */
> - if (owner_bits == old_bits)
> - break;
> -
> - new_bits = (eq->head / num_eqe) &
> GDMA_EQE_OWNER_MASK;
> - if (owner_bits != new_bits) {
> - dev_err(gc->dev, "EQ %d: overflow detected\n", eq-
> >id);
> - break;
> + list_for_each_entry_rcu(eq, eq_list, entry) {
> + gc = eq->gdma_dev->gdma_context;
> +
> + num_eqe = eq->queue_size / GDMA_EQE_SIZE;
> + eq_eqe_ptr = eq->queue_mem_ptr;
> + /* Process up to 5 EQEs at a time, and update the HW head. */
> + for (i = 0; i < 5; i++) {
> + eqe = &eq_eqe_ptr[eq->head % num_eqe];
> + eqe_info.as_uint32 = eqe->eqe_info;
> + owner_bits = eqe_info.owner_bits;
> +
> + old_bits = (eq->head / num_eqe - 1) &
> GDMA_EQE_OWNER_MASK;
> + /* No more entries */
> + if (owner_bits == old_bits)
> + break;
> +
> + new_bits = (eq->head / num_eqe) &
> GDMA_EQE_OWNER_MASK;
> + if (owner_bits != new_bits) {
> + dev_err(gc->dev, "EQ %d: overflow
> detected\n",
> + eq->id);
> + break;
> + }
> + /* Per GDMA spec, rmb is necessary after checking
> owner_bits, before
> + * reading eqe.
> + */
> + rmb();
> + mana_gd_process_eqe(eq);
> + eq->head++;
> }
>
> - /* Per GDMA spec, rmb is necessary after checking owner_bits,
> before
> - * reading eqe.
> - */
> - rmb();
> -
> - mana_gd_process_eqe(eq);
> -
> - eq->head++;
> + head = eq->head % (num_eqe << GDMA_EQE_OWNER_BITS);
> + mana_gd_ring_doorbell(gc, eq->gdma_dev->doorbell, eq-
> >type,
> + eq->id, head, SET_ARM_BIT);
> }
> -
> - head = eq->head % (num_eqe << GDMA_EQE_OWNER_BITS);
> -
> - mana_gd_ring_doorbell(gc, eq->gdma_dev->doorbell, eq->type, eq-
> >id,
> - head, SET_ARM_BIT);
> }
>
> static int mana_gd_register_irq(struct gdma_queue *queue, @@ -435,44
> +434,47 @@ static int mana_gd_register_irq(struct gdma_queue *queue,
> gc = gd->gdma_context;
> r = &gc->msix_resource;
> dev = gc->dev;
> + msi_index = spec->eq.msix_index;
>
> spin_lock_irqsave(&r->lock, flags);
>
> - msi_index = find_first_zero_bit(r->map, r->size);
> - if (msi_index >= r->size || msi_index >= gc->num_msix_usable) {
> - err = -ENOSPC;
> - } else {
> - bitmap_set(r->map, msi_index, 1);
> - queue->eq.msix_index = msi_index;
> - }
> -
> - spin_unlock_irqrestore(&r->lock, flags);
> + if (!spec->eq.msix_allocated) {
> + msi_index = find_first_zero_bit(r->map, r->size);
>
> - if (err) {
> - dev_err(dev, "Register IRQ err:%d, msi:%u rsize:%u, nMSI:%u",
> - err, msi_index, r->size, gc->num_msix_usable);
> + if (msi_index >= r->size ||
> + msi_index >= gc->num_msix_usable)
> + err = -ENOSPC;
> + else
> + bitmap_set(r->map, msi_index, 1);
>
> - return err;
> + if (err) {
> + dev_err(dev, "Register IRQ err:%d, msi:%u rsize:%u,
> nMSI:%u",
> + err, msi_index, r->size, gc->num_msix_usable);
> + goto out;
> + }
> }
>
> + queue->eq.msix_index = msi_index;
> gic = &gc->irq_contexts[msi_index];
>
> - WARN_ON(gic->handler || gic->arg);
> -
> - gic->arg = queue;
> + list_add_rcu(&queue->entry, &gic->eq_list);
>
> gic->handler = mana_gd_process_eq_events;
>
> - return 0;
> +out:
> + spin_unlock_irqrestore(&r->lock, flags);
> + return err;
> }
>
> -static void mana_gd_deregiser_irq(struct gdma_queue *queue)
> +static void mana_gd_deregister_irq(struct gdma_queue *queue)
> {
> struct gdma_dev *gd = queue->gdma_dev;
> struct gdma_irq_context *gic;
> struct gdma_context *gc;
> struct gdma_resource *r;
> unsigned int msix_index;
> + struct list_head *p, *n;
> + struct gdma_queue *eq;
> unsigned long flags;
>
> gc = gd->gdma_context;
> @@ -483,14 +485,23 @@ static void mana_gd_deregiser_irq(struct
> gdma_queue *queue)
> if (WARN_ON(msix_index >= gc->num_msix_usable))
> return;
>
> + spin_lock_irqsave(&r->lock, flags);
> +
> gic = &gc->irq_contexts[msix_index];
> - gic->handler = NULL;
> - gic->arg = NULL;
> + list_for_each_safe(p, n, &gic->eq_list) {
> + eq = list_entry(p, struct gdma_queue, entry);
> + if (queue == eq) {
> + list_del(&eq->entry);
> + break;
> + }
> + }
>
> - spin_lock_irqsave(&r->lock, flags);
> - bitmap_clear(r->map, msix_index, 1);
> - spin_unlock_irqrestore(&r->lock, flags);
> + if (list_empty(&gic->eq_list)) {
> + gic->handler = NULL;
> + bitmap_clear(r->map, msix_index, 1);
> + }
>
> + spin_unlock_irqrestore(&r->lock, flags);
> queue->eq.msix_index = INVALID_PCI_MSIX_INDEX; }
>
> @@ -553,7 +564,7 @@ static void mana_gd_destroy_eq(struct gdma_context
> *gc, bool flush_evenets,
> dev_warn(gc->dev, "Failed to flush EQ: %d\n", err);
> }
>
> - mana_gd_deregiser_irq(queue);
> + mana_gd_deregister_irq(queue);
>
> if (queue->eq.disable_needed)
> mana_gd_disable_queue(queue);
> @@ -568,7 +579,7 @@ static int mana_gd_create_eq(struct gdma_dev *gd,
> u32 log2_num_entries;
> int err;
>
> - queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
> + queue->eq.msix_index = spec->eq.msix_index;
>
> log2_num_entries = ilog2(queue->queue_size / GDMA_EQE_SIZE);
>
> @@ -590,7 +601,8 @@ static int mana_gd_create_eq(struct gdma_dev *gd,
> queue->eq.log2_throttle_limit = spec->eq.log2_throttle_limit ?: 1;
>
> if (create_hwq) {
> - err = mana_gd_create_hw_eq(gc, queue);
> + err = mana_gd_create_hw_eq(gc, queue,
> + spec->doorbell, spec->pdid);
> if (err)
> goto out;
>
> @@ -800,6 +812,7 @@ int mana_gd_create_mana_eq(struct gdma_dev *gd,
> kfree(queue);
> return err;
> }
> +EXPORT_SYMBOL(mana_gd_create_mana_eq);
>
> int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
> const struct gdma_queue_spec *spec, @@ -876,6
> +889,7 @@ void mana_gd_destroy_queue(struct gdma_context *gc, struct
> gdma_queue *queue)
> mana_gd_free_memory(gmi);
> kfree(queue);
> }
> +EXPORT_SYMBOL(mana_gd_destroy_queue);
>
> int mana_gd_verify_vf_version(struct pci_dev *pdev) { @@ -1193,7 +1207,7
> @@ static irqreturn_t mana_gd_intr(int irq, void *arg)
> struct gdma_irq_context *gic = arg;
>
> if (gic->handler)
> - gic->handler(gic->arg);
> + gic->handler(&gic->eq_list);
>
> return IRQ_HANDLED;
> }
> @@ -1246,7 +1260,7 @@ static int mana_gd_setup_irqs(struct pci_dev
> *pdev)
> for (i = 0; i < nvec; i++) {
> gic = &gc->irq_contexts[i];
> gic->handler = NULL;
> - gic->arg = NULL;
> + INIT_LIST_HEAD(&gic->eq_list);
>
> if (!i)
> snprintf(gic->name, MANA_IRQ_NAME_SZ,
> "mana_hwc@pci:%s", diff --git
> a/drivers/net/ethernet/microsoft/mana/mana_en.c
> b/drivers/net/ethernet/microsoft/mana/mana_en.c
> index a499e460594b..d2ba7de8b512 100644
> --- a/drivers/net/ethernet/microsoft/mana/mana_en.c
> +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
> @@ -1167,6 +1167,9 @@ static int mana_create_eq(struct mana_context
> *ac)
> spec.eq.callback = NULL;
> spec.eq.context = ac->eqs;
> spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
> + spec.eq.msix_allocated = false;
> + spec.doorbell = gd->doorbell;
> + spec.pdid = gd->pdid;
>
> for (i = 0; i < gc->max_num_queues; i++) {
> err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
> diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h index
> e2b212dd722b..aee8e8fa1ea6 100644
> --- a/include/net/mana/gdma.h
> +++ b/include/net/mana/gdma.h
> @@ -57,6 +57,10 @@ enum gdma_eqe_type {
> GDMA_EQE_HWC_INIT_EQ_ID_DB = 129,
> GDMA_EQE_HWC_INIT_DATA = 130,
> GDMA_EQE_HWC_INIT_DONE = 131,
> +
> + /* IB NiC Events start at 176*/
> + GDMA_EQE_SOC_EVENT_NOTIFICATION = 176,
> + GDMA_EQE_SOC_EVENT_TEST,
> };
>
> enum {
> @@ -291,6 +295,7 @@ struct gdma_queue {
>
> u32 head;
> u32 tail;
> + struct list_head entry;
>
> /* Extra fields specific to EQ/CQ. */
> union {
> @@ -318,6 +323,8 @@ struct gdma_queue_spec {
> enum gdma_queue_type type;
> bool monitor_avl_buf;
> unsigned int queue_size;
> + u32 doorbell;
> + u32 pdid;
>
> /* Extra fields specific to EQ/CQ. */
> union {
> @@ -326,6 +333,8 @@ struct gdma_queue_spec {
> void *context;
>
> unsigned long log2_throttle_limit;
> + bool msix_allocated;
> + unsigned int msix_index;
> } eq;
>
> struct {
> @@ -341,8 +350,8 @@ struct gdma_queue_spec { #define
> MANA_IRQ_NAME_SZ 32
>
> struct gdma_irq_context {
> - void (*handler)(void *arg);
> - void *arg;
> + void (*handler)(struct list_head *arg);
> + struct list_head eq_list;
> char name[MANA_IRQ_NAME_SZ];
> };
>
> --
> 2.25.1


2023-07-28 03:59:53

by Ajay Sharma

[permalink] [raw]
Subject: RE: [EXTERNAL] [Patch v3 1/4] RDMA/mana_ib : Rename all mana_ib_dev type variables to mib_dev

+Long

> -----Original Message-----
> From: [email protected] <[email protected]>
> Sent: Wednesday, July 26, 2023 3:08 PM
> To: Jason Gunthorpe <[email protected]>; Leon Romanovsky <[email protected]>;
> Dexuan Cui <[email protected]>; Wei Liu <[email protected]>; David S.
> Miller <[email protected]>; Eric Dumazet <[email protected]>;
> Jakub Kicinski <[email protected]>; Paolo Abeni <[email protected]>
> Cc: [email protected]; [email protected];
> [email protected]; [email protected]; Ajay Sharma
> <[email protected]>
> Subject: [EXTERNAL] [Patch v3 1/4] RDMA/mana_ib : Rename all mana_ib_dev
> type variables to mib_dev
>
> From: Ajay Sharma <[email protected]>
>
> This patch does not introduce any functional changes. It creates naming
> convention to distinguish especially when used in the same function.Renaming
> all mana_ib_dev type variables to mib_dev to have clean separation between
> eth dev and ibdev variables.
>
> Signed-off-by: Ajay Sharma <[email protected]>
> ---
> drivers/infiniband/hw/mana/cq.c | 12 ++--
> drivers/infiniband/hw/mana/device.c | 34 +++++------
> drivers/infiniband/hw/mana/main.c | 87 ++++++++++++++--------------
> drivers/infiniband/hw/mana/mana_ib.h | 9 +--
> drivers/infiniband/hw/mana/mr.c | 29 +++++-----
> drivers/infiniband/hw/mana/qp.c | 82 +++++++++++++-------------
> drivers/infiniband/hw/mana/wq.c | 21 +++----
> 7 files changed, 140 insertions(+), 134 deletions(-)
>
> diff --git a/drivers/infiniband/hw/mana/cq.c
> b/drivers/infiniband/hw/mana/cq.c index d141cab8a1e6..1aed4e6360ba
> 100644
> --- a/drivers/infiniband/hw/mana/cq.c
> +++ b/drivers/infiniband/hw/mana/cq.c
> @@ -11,10 +11,10 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct
> ib_cq_init_attr *attr,
> struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
> struct ib_device *ibdev = ibcq->device;
> struct mana_ib_create_cq ucmd = {};
> - struct mana_ib_dev *mdev;
> + struct mana_ib_dev *mib_dev;
> int err;
>
> - mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
> + mib_dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
>
> if (udata->inlen < sizeof(ucmd))
> return -EINVAL;
> @@ -41,7 +41,7 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct
> ib_cq_init_attr *attr,
> return err;
> }
>
> - err = mana_ib_gd_create_dma_region(mdev, cq->umem, &cq-
> >gdma_region);
> + err = mana_ib_gd_create_dma_region(mib_dev, cq->umem,
> +&cq->gdma_region);
> if (err) {
> ibdev_dbg(ibdev,
> "Failed to create dma region for create cq, %d\n",
> @@ -68,11 +68,11 @@ int mana_ib_destroy_cq(struct ib_cq *ibcq, struct
> ib_udata *udata) {
> struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
> struct ib_device *ibdev = ibcq->device;
> - struct mana_ib_dev *mdev;
> + struct mana_ib_dev *mib_dev;
>
> - mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
> + mib_dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
>
> - mana_ib_gd_destroy_dma_region(mdev, cq->gdma_region);
> + mana_ib_gd_destroy_dma_region(mib_dev, cq->gdma_region);
> ib_umem_release(cq->umem);
>
> return 0;
> diff --git a/drivers/infiniband/hw/mana/device.c
> b/drivers/infiniband/hw/mana/device.c
> index d4541b8707e4..083f27246ba8 100644
> --- a/drivers/infiniband/hw/mana/device.c
> +++ b/drivers/infiniband/hw/mana/device.c
> @@ -51,51 +51,51 @@ static int mana_ib_probe(struct auxiliary_device
> *adev, {
> struct mana_adev *madev = container_of(adev, struct mana_adev,
> adev);
> struct gdma_dev *mdev = madev->mdev;
> + struct mana_ib_dev *mib_dev;
> struct mana_context *mc;
> - struct mana_ib_dev *dev;
> int ret;
>
> mc = mdev->driver_data;
>
> - dev = ib_alloc_device(mana_ib_dev, ib_dev);
> - if (!dev)
> + mib_dev = ib_alloc_device(mana_ib_dev, ib_dev);
> + if (!mib_dev)
> return -ENOMEM;
>
> - ib_set_device_ops(&dev->ib_dev, &mana_ib_dev_ops);
> + ib_set_device_ops(&mib_dev->ib_dev, &mana_ib_dev_ops);
>
> - dev->ib_dev.phys_port_cnt = mc->num_ports;
> + mib_dev->ib_dev.phys_port_cnt = mc->num_ports;
>
> - ibdev_dbg(&dev->ib_dev, "mdev=%p id=%d num_ports=%d\n", mdev,
> - mdev->dev_id.as_uint32, dev->ib_dev.phys_port_cnt);
> + ibdev_dbg(&mib_dev->ib_dev, "mdev=%p id=%d num_ports=%d\n",
> mdev,
> + mdev->dev_id.as_uint32, mib_dev->ib_dev.phys_port_cnt);
>
> - dev->gdma_dev = mdev;
> - dev->ib_dev.node_type = RDMA_NODE_IB_CA;
> + mib_dev->gdma_dev = mdev;
> + mib_dev->ib_dev.node_type = RDMA_NODE_IB_CA;
>
> /*
> * num_comp_vectors needs to set to the max MSIX index
> * when interrupts and event queues are implemented
> */
> - dev->ib_dev.num_comp_vectors = 1;
> - dev->ib_dev.dev.parent = mdev->gdma_context->dev;
> + mib_dev->ib_dev.num_comp_vectors = 1;
> + mib_dev->ib_dev.dev.parent = mdev->gdma_context->dev;
>
> - ret = ib_register_device(&dev->ib_dev, "mana_%d",
> + ret = ib_register_device(&mib_dev->ib_dev, "mana_%d",
> mdev->gdma_context->dev);
> if (ret) {
> - ib_dealloc_device(&dev->ib_dev);
> + ib_dealloc_device(&mib_dev->ib_dev);
> return ret;
> }
>
> - dev_set_drvdata(&adev->dev, dev);
> + dev_set_drvdata(&adev->dev, mib_dev);
>
> return 0;
> }
>
> static void mana_ib_remove(struct auxiliary_device *adev) {
> - struct mana_ib_dev *dev = dev_get_drvdata(&adev->dev);
> + struct mana_ib_dev *mib_dev = dev_get_drvdata(&adev->dev);
>
> - ib_unregister_device(&dev->ib_dev);
> - ib_dealloc_device(&dev->ib_dev);
> + ib_unregister_device(&mib_dev->ib_dev);
> + ib_dealloc_device(&mib_dev->ib_dev);
> }
>
> static const struct auxiliary_device_id mana_id_table[] = { diff --git
> a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
> index 7be4c3adb4e2..189e774cdab6 100644
> --- a/drivers/infiniband/hw/mana/main.c
> +++ b/drivers/infiniband/hw/mana/main.c
> @@ -5,10 +5,10 @@
>
> #include "mana_ib.h"
>
> -void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd,
> +void mana_ib_uncfg_vport(struct mana_ib_dev *mib_dev, struct mana_ib_pd
> +*pd,
> u32 port)
> {
> - struct gdma_dev *gd = dev->gdma_dev;
> + struct gdma_dev *gd = mib_dev->gdma_dev;
> struct mana_port_context *mpc;
> struct net_device *ndev;
> struct mana_context *mc;
> @@ -28,10 +28,11 @@ void mana_ib_uncfg_vport(struct mana_ib_dev *dev,
> struct mana_ib_pd *pd,
> mutex_unlock(&pd->vport_mutex);
> }
>
> -int mana_ib_cfg_vport(struct mana_ib_dev *dev, u32 port, struct mana_ib_pd
> *pd,
> +int mana_ib_cfg_vport(struct mana_ib_dev *mib_dev, u32 port,
> + struct mana_ib_pd *pd,
> u32 doorbell_id)
> {
> - struct gdma_dev *mdev = dev->gdma_dev;
> + struct gdma_dev *mdev = mib_dev->gdma_dev;
> struct mana_port_context *mpc;
> struct mana_context *mc;
> struct net_device *ndev;
> @@ -45,7 +46,7 @@ int mana_ib_cfg_vport(struct mana_ib_dev *dev, u32
> port, struct mana_ib_pd *pd,
>
> pd->vport_use_count++;
> if (pd->vport_use_count > 1) {
> - ibdev_dbg(&dev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Skip as this PD is already configured vport\n");
> mutex_unlock(&pd->vport_mutex);
> return 0;
> @@ -56,7 +57,8 @@ int mana_ib_cfg_vport(struct mana_ib_dev *dev, u32
> port, struct mana_ib_pd *pd,
> pd->vport_use_count--;
> mutex_unlock(&pd->vport_mutex);
>
> - ibdev_dbg(&dev->ib_dev, "Failed to configure vPort %d\n",
> err);
> + ibdev_dbg(&mib_dev->ib_dev, "Failed to configure vPort
> %d\n",
> + err);
> return err;
> }
>
> @@ -65,7 +67,7 @@ int mana_ib_cfg_vport(struct mana_ib_dev *dev, u32
> port, struct mana_ib_pd *pd,
> pd->tx_shortform_allowed = mpc->tx_shortform_allowed;
> pd->tx_vp_offset = mpc->tx_vp_offset;
>
> - ibdev_dbg(&dev->ib_dev, "vport handle %llx pdid %x doorbell_id
> %x\n",
> + ibdev_dbg(&mib_dev->ib_dev, "vport handle %llx pdid %x doorbell_id
> +%x\n",
> mpc->port_handle, pd->pdn, doorbell_id);
>
> return 0;
> @@ -78,12 +80,12 @@ int mana_ib_alloc_pd(struct ib_pd *ibpd, struct
> ib_udata *udata)
> struct gdma_create_pd_resp resp = {};
> struct gdma_create_pd_req req = {};
> enum gdma_pd_flags flags = 0;
> - struct mana_ib_dev *dev;
> + struct mana_ib_dev *mib_dev;
> struct gdma_dev *mdev;
> int err;
>
> - dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
> - mdev = dev->gdma_dev;
> + mib_dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
> + mdev = mib_dev->gdma_dev;
>
> mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_PD, sizeof(req),
> sizeof(resp));
> @@ -93,7 +95,7 @@ int mana_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata
> *udata)
> sizeof(resp), &resp);
>
> if (err || resp.hdr.status) {
> - ibdev_dbg(&dev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Failed to get pd_id err %d status %u\n", err,
> resp.hdr.status);
> if (!err)
> @@ -104,7 +106,7 @@ int mana_ib_alloc_pd(struct ib_pd *ibpd, struct
> ib_udata *udata)
>
> pd->pd_handle = resp.pd_handle;
> pd->pdn = resp.pd_id;
> - ibdev_dbg(&dev->ib_dev, "pd_handle 0x%llx pd_id %d\n",
> + ibdev_dbg(&mib_dev->ib_dev, "pd_handle 0x%llx pd_id %d\n",
> pd->pd_handle, pd->pdn);
>
> mutex_init(&pd->vport_mutex);
> @@ -118,12 +120,12 @@ int mana_ib_dealloc_pd(struct ib_pd *ibpd, struct
> ib_udata *udata)
> struct ib_device *ibdev = ibpd->device;
> struct gdma_destory_pd_resp resp = {};
> struct gdma_destroy_pd_req req = {};
> - struct mana_ib_dev *dev;
> + struct mana_ib_dev *mib_dev;
> struct gdma_dev *mdev;
> int err;
>
> - dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
> - mdev = dev->gdma_dev;
> + mib_dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
> + mdev = mib_dev->gdma_dev;
>
> mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_PD, sizeof(req),
> sizeof(resp));
> @@ -133,7 +135,7 @@ int mana_ib_dealloc_pd(struct ib_pd *ibpd, struct
> ib_udata *udata)
> sizeof(resp), &resp);
>
> if (err || resp.hdr.status) {
> - ibdev_dbg(&dev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Failed to destroy pd_handle 0x%llx err %d status
> %u",
> pd->pd_handle, err, resp.hdr.status);
> if (!err)
> @@ -204,14 +206,14 @@ int mana_ib_alloc_ucontext(struct ib_ucontext
> *ibcontext,
> struct mana_ib_ucontext *ucontext =
> container_of(ibcontext, struct mana_ib_ucontext, ibucontext);
> struct ib_device *ibdev = ibcontext->device;
> - struct mana_ib_dev *mdev;
> + struct mana_ib_dev *mib_dev;
> struct gdma_context *gc;
> struct gdma_dev *dev;
> int doorbell_page;
> int ret;
>
> - mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
> - dev = mdev->gdma_dev;
> + mib_dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
> + dev = mib_dev->gdma_dev;
> gc = dev->gdma_context;
>
> /* Allocate a doorbell page index */
> @@ -233,12 +235,12 @@ void mana_ib_dealloc_ucontext(struct ib_ucontext
> *ibcontext)
> struct mana_ib_ucontext *mana_ucontext =
> container_of(ibcontext, struct mana_ib_ucontext, ibucontext);
> struct ib_device *ibdev = ibcontext->device;
> - struct mana_ib_dev *mdev;
> + struct mana_ib_dev *mib_dev;
> struct gdma_context *gc;
> int ret;
>
> - mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
> - gc = mdev->gdma_dev->gdma_context;
> + mib_dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
> + gc = mib_dev->gdma_dev->gdma_context;
>
> ret = mana_gd_destroy_doorbell_page(gc, mana_ucontext->doorbell);
> if (ret)
> @@ -246,7 +248,7 @@ void mana_ib_dealloc_ucontext(struct ib_ucontext
> *ibcontext) }
>
> static int
> -mana_ib_gd_first_dma_region(struct mana_ib_dev *dev,
> +mana_ib_gd_first_dma_region(struct mana_ib_dev *mib_dev,
> struct gdma_context *gc,
> struct gdma_create_dma_region_req *create_req,
> size_t num_pages, mana_handle_t *gdma_region,
> @@ -263,7 +265,7 @@ mana_ib_gd_first_dma_region(struct mana_ib_dev
> *dev,
> err = mana_gd_send_request(gc, create_req_msg_size, create_req,
> sizeof(create_resp), &create_resp);
> if (err || create_resp.hdr.status != expected_status) {
> - ibdev_dbg(&dev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Failed to create DMA region: %d, 0x%x\n",
> err, create_resp.hdr.status);
> if (!err)
> @@ -273,14 +275,14 @@ mana_ib_gd_first_dma_region(struct mana_ib_dev
> *dev,
> }
>
> *gdma_region = create_resp.dma_region_handle;
> - ibdev_dbg(&dev->ib_dev, "Created DMA region handle 0x%llx\n",
> + ibdev_dbg(&mib_dev->ib_dev, "Created DMA region handle 0x%llx\n",
> *gdma_region);
>
> return 0;
> }
>
> static int
> -mana_ib_gd_add_dma_region(struct mana_ib_dev *dev, struct gdma_context
> *gc,
> +mana_ib_gd_add_dma_region(struct mana_ib_dev *mib_dev, struct
> +gdma_context *gc,
> struct gdma_dma_region_add_pages_req *add_req,
> unsigned int num_pages, u32 expected_status) { @@
> -296,7 +298,7 @@ mana_ib_gd_add_dma_region(struct mana_ib_dev *dev,
> struct gdma_context *gc,
> err = mana_gd_send_request(gc, add_req_msg_size, add_req,
> sizeof(add_resp), &add_resp);
> if (err || add_resp.hdr.status != expected_status) {
> - ibdev_dbg(&dev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Failed to create DMA region: %d, 0x%x\n",
> err, add_resp.hdr.status);
>
> @@ -309,7 +311,8 @@ mana_ib_gd_add_dma_region(struct mana_ib_dev
> *dev, struct gdma_context *gc,
> return 0;
> }
>
> -int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct
> ib_umem *umem,
> +int mana_ib_gd_create_dma_region(struct mana_ib_dev *mib_dev,
> + struct ib_umem *umem,
> mana_handle_t *gdma_region)
> {
> struct gdma_dma_region_add_pages_req *add_req = NULL; @@ -
> 329,14 +332,14 @@ int mana_ib_gd_create_dma_region(struct mana_ib_dev
> *dev, struct ib_umem *umem,
> void *request_buf;
> int err;
>
> - mdev = dev->gdma_dev;
> + mdev = mib_dev->gdma_dev;
> gc = mdev->gdma_context;
> hwc = gc->hwc.driver_data;
>
> /* Hardware requires dma region to align to chosen page size */
> page_sz = ib_umem_find_best_pgsz(umem, PAGE_SZ_BM, 0);
> if (!page_sz) {
> - ibdev_dbg(&dev->ib_dev, "failed to find page size.\n");
> + ibdev_dbg(&mib_dev->ib_dev, "failed to find page size.\n");
> return -ENOMEM;
> }
> num_pages_total = ib_umem_num_dma_blocks(umem, page_sz);
> @@ -362,13 +365,13 @@ int mana_ib_gd_create_dma_region(struct
> mana_ib_dev *dev, struct ib_umem *umem,
> create_req->gdma_page_type = order_base_2(page_sz) - PAGE_SHIFT;
> create_req->page_count = num_pages_total;
>
> - ibdev_dbg(&dev->ib_dev, "size_dma_region %lu num_pages_total
> %lu\n",
> + ibdev_dbg(&mib_dev->ib_dev, "size_dma_region %lu
> num_pages_total
> +%lu\n",
> umem->length, num_pages_total);
>
> - ibdev_dbg(&dev->ib_dev, "page_sz %lu offset_in_page %u\n",
> + ibdev_dbg(&mib_dev->ib_dev, "page_sz %lu offset_in_page %u\n",
> page_sz, create_req->offset_in_page);
>
> - ibdev_dbg(&dev->ib_dev, "num_pages_to_handle %lu,
> gdma_page_type %u",
> + ibdev_dbg(&mib_dev->ib_dev, "num_pages_to_handle %lu,
> gdma_page_type
> +%u",
> num_pages_to_handle, create_req->gdma_page_type);
>
> page_addr_list = create_req->page_addr_list; @@ -385,7 +388,7 @@
> int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct
> ib_umem *umem,
>
> if (!num_pages_processed) {
> /* First create message */
> - err = mana_ib_gd_first_dma_region(dev, gc,
> create_req,
> + err = mana_ib_gd_first_dma_region(mib_dev, gc,
> create_req,
> tail, gdma_region,
> expected_status);
> if (err)
> @@ -400,7 +403,7 @@ int mana_ib_gd_create_dma_region(struct
> mana_ib_dev *dev, struct ib_umem *umem,
> page_addr_list = add_req->page_addr_list;
> } else {
> /* Subsequent create messages */
> - err = mana_ib_gd_add_dma_region(dev, gc, add_req,
> tail,
> + err = mana_ib_gd_add_dma_region(mib_dev, gc,
> add_req, tail,
> expected_status);
> if (err)
> break;
> @@ -417,20 +420,20 @@ int mana_ib_gd_create_dma_region(struct
> mana_ib_dev *dev, struct ib_umem *umem,
> }
>
> if (err)
> - mana_ib_gd_destroy_dma_region(dev, *gdma_region);
> + mana_ib_gd_destroy_dma_region(mib_dev, *gdma_region);
>
> out:
> kfree(request_buf);
> return err;
> }
>
> -int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev, u64
> gdma_region)
> +int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *mib_dev, u64
> +gdma_region)
> {
> - struct gdma_dev *mdev = dev->gdma_dev;
> + struct gdma_dev *mdev = mib_dev->gdma_dev;
> struct gdma_context *gc;
>
> gc = mdev->gdma_context;
> - ibdev_dbg(&dev->ib_dev, "destroy dma region 0x%llx\n",
> gdma_region);
> + ibdev_dbg(&mib_dev->ib_dev, "destroy dma region 0x%llx\n",
> +gdma_region);
>
> return mana_gd_destroy_dma_region(gc, gdma_region); } @@ -
> 440,14 +443,14 @@ int mana_ib_mmap(struct ib_ucontext *ibcontext, struct
> vm_area_struct *vma)
> struct mana_ib_ucontext *mana_ucontext =
> container_of(ibcontext, struct mana_ib_ucontext, ibucontext);
> struct ib_device *ibdev = ibcontext->device;
> - struct mana_ib_dev *mdev;
> + struct mana_ib_dev *mib_dev;
> struct gdma_context *gc;
> phys_addr_t pfn;
> pgprot_t prot;
> int ret;
>
> - mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
> - gc = mdev->gdma_dev->gdma_context;
> + mib_dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
> + gc = mib_dev->gdma_dev->gdma_context;
>
> if (vma->vm_pgoff != 0) {
> ibdev_dbg(ibdev, "Unexpected vm_pgoff %lu\n", vma-
> >vm_pgoff); diff --git a/drivers/infiniband/hw/mana/mana_ib.h
> b/drivers/infiniband/hw/mana/mana_ib.h
> index 502cc8672eef..ee4efd0af278 100644
> --- a/drivers/infiniband/hw/mana/mana_ib.h
> +++ b/drivers/infiniband/hw/mana/mana_ib.h
> @@ -92,10 +92,11 @@ struct mana_ib_rwq_ind_table {
> struct ib_rwq_ind_table ib_ind_table;
> };
>
> -int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct
> ib_umem *umem,
> +int mana_ib_gd_create_dma_region(struct mana_ib_dev *mib_dev,
> + struct ib_umem *umem,
> mana_handle_t *gdma_region);
>
> -int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev,
> +int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *mib_dev,
> mana_handle_t gdma_region);
>
> struct ib_wq *mana_ib_create_wq(struct ib_pd *pd, @@ -129,9 +130,9 @@
> int mana_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
>
> int mana_ib_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
>
> -int mana_ib_cfg_vport(struct mana_ib_dev *dev, u32 port_id,
> +int mana_ib_cfg_vport(struct mana_ib_dev *mib_dev, u32 port_id,
> struct mana_ib_pd *pd, u32 doorbell_id); -void
> mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd,
> +void mana_ib_uncfg_vport(struct mana_ib_dev *mib_dev, struct mana_ib_pd
> +*pd,
> u32 port);
>
> int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
> diff --git a/drivers/infiniband/hw/mana/mr.c
> b/drivers/infiniband/hw/mana/mr.c index 351207c60eb6..f6a53906204d
> 100644
> --- a/drivers/infiniband/hw/mana/mr.c
> +++ b/drivers/infiniband/hw/mana/mr.c
> @@ -25,12 +25,13 @@ mana_ib_verbs_to_gdma_access_flags(int
> access_flags)
> return flags;
> }
>
> -static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct
> mana_ib_mr *mr,
> +static int mana_ib_gd_create_mr(struct mana_ib_dev *mib_dev,
> + struct mana_ib_mr *mr,
> struct gdma_create_mr_params *mr_params)
> {
> + struct gdma_dev *mdev = mib_dev->gdma_dev;
> struct gdma_create_mr_response resp = {};
> struct gdma_create_mr_request req = {};
> - struct gdma_dev *mdev = dev->gdma_dev;
> struct gdma_context *gc;
> int err;
>
> @@ -49,7 +50,7 @@ static int mana_ib_gd_create_mr(struct mana_ib_dev
> *dev, struct mana_ib_mr *mr,
> break;
>
> default:
> - ibdev_dbg(&dev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "invalid param (GDMA_MR_TYPE) passed, type
> %d\n",
> req.mr_type);
> return -EINVAL;
> @@ -58,7 +59,7 @@ static int mana_ib_gd_create_mr(struct mana_ib_dev
> *dev, struct mana_ib_mr *mr,
> err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp),
> &resp);
>
> if (err || resp.hdr.status) {
> - ibdev_dbg(&dev->ib_dev, "Failed to create mr %d, %u", err,
> + ibdev_dbg(&mib_dev->ib_dev, "Failed to create mr %d, %u",
> err,
> resp.hdr.status);
> if (!err)
> err = -EPROTO;
> @@ -73,11 +74,11 @@ static int mana_ib_gd_create_mr(struct mana_ib_dev
> *dev, struct mana_ib_mr *mr,
> return 0;
> }
>
> -static int mana_ib_gd_destroy_mr(struct mana_ib_dev *dev, u64 mr_handle)
> +static int mana_ib_gd_destroy_mr(struct mana_ib_dev *mib_dev, u64
> +mr_handle)
> {
> struct gdma_destroy_mr_response resp = {};
> + struct gdma_dev *mdev = mib_dev->gdma_dev;
> struct gdma_destroy_mr_request req = {};
> - struct gdma_dev *mdev = dev->gdma_dev;
> struct gdma_context *gc;
> int err;
>
> @@ -107,12 +108,12 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd
> *ibpd, u64 start, u64 length,
> struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
> struct gdma_create_mr_params mr_params = {};
> struct ib_device *ibdev = ibpd->device;
> - struct mana_ib_dev *dev;
> + struct mana_ib_dev *mib_dev;
> struct mana_ib_mr *mr;
> u64 dma_region_handle;
> int err;
>
> - dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
> + mib_dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
>
> ibdev_dbg(ibdev,
> "start 0x%llx, iova 0x%llx length 0x%llx access_flags 0x%x",
> @@ -133,7 +134,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd
> *ibpd, u64 start, u64 length,
> goto err_free;
> }
>
> - err = mana_ib_gd_create_dma_region(dev, mr->umem,
> &dma_region_handle);
> + err = mana_ib_gd_create_dma_region(mib_dev, mr->umem,
> +&dma_region_handle);
> if (err) {
> ibdev_dbg(ibdev, "Failed create dma region for user-mr, %d\n",
> err);
> @@ -151,7 +152,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd
> *ibpd, u64 start, u64 length,
> mr_params.gva.access_flags =
> mana_ib_verbs_to_gdma_access_flags(access_flags);
>
> - err = mana_ib_gd_create_mr(dev, mr, &mr_params);
> + err = mana_ib_gd_create_mr(mib_dev, mr, &mr_params);
> if (err)
> goto err_dma_region;
>
> @@ -164,7 +165,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd
> *ibpd, u64 start, u64 length,
> return &mr->ibmr;
>
> err_dma_region:
> - mana_gd_destroy_dma_region(dev->gdma_dev->gdma_context,
> + mana_gd_destroy_dma_region(mib_dev->gdma_dev->gdma_context,
> dma_region_handle);
>
> err_umem:
> @@ -179,12 +180,12 @@ int mana_ib_dereg_mr(struct ib_mr *ibmr, struct
> ib_udata *udata) {
> struct mana_ib_mr *mr = container_of(ibmr, struct mana_ib_mr,
> ibmr);
> struct ib_device *ibdev = ibmr->device;
> - struct mana_ib_dev *dev;
> + struct mana_ib_dev *mib_dev;
> int err;
>
> - dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
> + mib_dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
>
> - err = mana_ib_gd_destroy_mr(dev, mr->mr_handle);
> + err = mana_ib_gd_destroy_mr(mib_dev, mr->mr_handle);
> if (err)
> return err;
>
> diff --git a/drivers/infiniband/hw/mana/qp.c
> b/drivers/infiniband/hw/mana/qp.c index 4b3b5b274e84..2e3a57123ed7
> 100644
> --- a/drivers/infiniband/hw/mana/qp.c
> +++ b/drivers/infiniband/hw/mana/qp.c
> @@ -5,7 +5,7 @@
>
> #include "mana_ib.h"
>
> -static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
> +static int mana_ib_cfg_vport_steering(struct mana_ib_dev *mib_dev,
> struct net_device *ndev,
> mana_handle_t default_rxobj,
> mana_handle_t ind_table[],
> @@ -21,7 +21,7 @@ static int mana_ib_cfg_vport_steering(struct
> mana_ib_dev *dev,
> u32 req_buf_size;
> int i, err;
>
> - mdev = dev->gdma_dev;
> + mdev = mib_dev->gdma_dev;
> gc = mdev->gdma_context;
>
> req_buf_size =
> @@ -55,10 +55,10 @@ static int mana_ib_cfg_vport_steering(struct
> mana_ib_dev *dev,
> * MANA_INDIRECT_TABLE_SIZE entries. Adjust the verb
> * ind_table to MANA_INDIRECT_TABLE_SIZE if required
> */
> - ibdev_dbg(&dev->ib_dev, "ind table size %u\n", 1 << log_ind_tbl_size);
> + ibdev_dbg(&mib_dev->ib_dev, "ind table size %u\n", 1 <<
> +log_ind_tbl_size);
> for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
> req_indir_tab[i] = ind_table[i % (1 << log_ind_tbl_size)];
> - ibdev_dbg(&dev->ib_dev, "index %u handle 0x%llx\n", i,
> + ibdev_dbg(&mib_dev->ib_dev, "index %u handle 0x%llx\n", i,
> req_indir_tab[i]);
> }
>
> @@ -68,7 +68,7 @@ static int mana_ib_cfg_vport_steering(struct
> mana_ib_dev *dev,
> else
> netdev_rss_key_fill(req->hashkey, MANA_HASH_KEY_SIZE);
>
> - ibdev_dbg(&dev->ib_dev, "vport handle %llu default_rxobj 0x%llx\n",
> + ibdev_dbg(&mib_dev->ib_dev, "vport handle %llu default_rxobj
> +0x%llx\n",
> req->vport, default_rxobj);
>
> err = mana_gd_send_request(gc, req_buf_size, req, sizeof(resp),
> &resp); @@ -97,12 +97,12 @@ static int mana_ib_create_qp_rss(struct ib_qp
> *ibqp, struct ib_pd *pd,
> struct ib_udata *udata)
> {
> struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
> - struct mana_ib_dev *mdev =
> + struct mana_ib_dev *mib_dev =
> container_of(pd->device, struct mana_ib_dev, ib_dev);
> struct ib_rwq_ind_table *ind_tbl = attr->rwq_ind_tbl;
> struct mana_ib_create_qp_rss_resp resp = {};
> struct mana_ib_create_qp_rss ucmd = {};
> - struct gdma_dev *gd = mdev->gdma_dev;
> + struct gdma_dev *gd = mib_dev->gdma_dev;
> mana_handle_t *mana_ind_table;
> struct mana_port_context *mpc;
> struct mana_context *mc;
> @@ -123,21 +123,21 @@ static int mana_ib_create_qp_rss(struct ib_qp
> *ibqp, struct ib_pd *pd,
>
> ret = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata-
> >inlen));
> if (ret) {
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Failed copy from udata for create rss-qp, err %d\n",
> ret);
> return ret;
> }
>
> if (attr->cap.max_recv_wr > MAX_SEND_BUFFERS_PER_QUEUE) {
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Requested max_recv_wr %d exceeding limit\n",
> attr->cap.max_recv_wr);
> return -EINVAL;
> }
>
> if (attr->cap.max_recv_sge > MAX_RX_WQE_SGL_ENTRIES) {
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Requested max_recv_sge %d exceeding limit\n",
> attr->cap.max_recv_sge);
> return -EINVAL;
> @@ -145,14 +145,14 @@ static int mana_ib_create_qp_rss(struct ib_qp
> *ibqp, struct ib_pd *pd,
>
> ind_tbl_size = 1 << ind_tbl->log_ind_tbl_size;
> if (ind_tbl_size > MANA_INDIRECT_TABLE_SIZE) {
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Indirect table size %d exceeding limit\n",
> ind_tbl_size);
> return -EINVAL;
> }
>
> if (ucmd.rx_hash_function != MANA_IB_RX_HASH_FUNC_TOEPLITZ) {
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "RX Hash function is not supported, %d\n",
> ucmd.rx_hash_function);
> return -EINVAL;
> @@ -161,14 +161,14 @@ static int mana_ib_create_qp_rss(struct ib_qp
> *ibqp, struct ib_pd *pd,
> /* IB ports start with 1, MANA start with 0 */
> port = ucmd.port;
> if (port < 1 || port > mc->num_ports) {
> - ibdev_dbg(&mdev->ib_dev, "Invalid port %u in creating qp\n",
> + ibdev_dbg(&mib_dev->ib_dev, "Invalid port %u in creating
> qp\n",
> port);
> return -EINVAL;
> }
> ndev = mc->ports[port - 1];
> mpc = netdev_priv(ndev);
>
> - ibdev_dbg(&mdev->ib_dev, "rx_hash_function %d port %d\n",
> + ibdev_dbg(&mib_dev->ib_dev, "rx_hash_function %d port %d\n",
> ucmd.rx_hash_function, port);
>
> mana_ind_table = kcalloc(ind_tbl_size, sizeof(mana_handle_t), @@ -
> 210,7 +210,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct
> ib_pd *pd,
> wq->id = wq_spec.queue_index;
> cq->id = cq_spec.queue_index;
>
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "ret %d rx_object 0x%llx wq id %llu cq id %llu\n",
> ret, wq->rx_object, wq->id, cq->id);
>
> @@ -221,7 +221,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp,
> struct ib_pd *pd,
> }
> resp.num_entries = i;
>
> - ret = mana_ib_cfg_vport_steering(mdev, ndev, wq->rx_object,
> + ret = mana_ib_cfg_vport_steering(mib_dev, ndev, wq->rx_object,
> mana_ind_table,
> ind_tbl->log_ind_tbl_size,
> ucmd.rx_hash_key_len,
> @@ -231,7 +231,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp,
> struct ib_pd *pd,
>
> ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
> if (ret) {
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Failed to copy to udata create rss-qp, %d\n",
> ret);
> goto fail;
> @@ -259,7 +259,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp,
> struct ib_pd *ibpd, {
> struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
> struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
> - struct mana_ib_dev *mdev =
> + struct mana_ib_dev *mib_dev =
> container_of(ibpd->device, struct mana_ib_dev, ib_dev);
> struct mana_ib_cq *send_cq =
> container_of(attr->send_cq, struct mana_ib_cq, ibcq); @@ -
> 267,7 +267,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct
> ib_pd *ibpd,
> rdma_udata_to_drv_context(udata, struct mana_ib_ucontext,
> ibucontext);
> struct mana_ib_create_qp_resp resp = {};
> - struct gdma_dev *gd = mdev->gdma_dev;
> + struct gdma_dev *gd = mib_dev->gdma_dev;
> struct mana_ib_create_qp ucmd = {};
> struct mana_obj_spec wq_spec = {};
> struct mana_obj_spec cq_spec = {};
> @@ -285,7 +285,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp,
> struct ib_pd *ibpd,
>
> err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata-
> >inlen));
> if (err) {
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Failed to copy from udata create qp-raw, %d\n", err);
> return err;
> }
> @@ -296,14 +296,14 @@ static int mana_ib_create_qp_raw(struct ib_qp
> *ibqp, struct ib_pd *ibpd,
> return -EINVAL;
>
> if (attr->cap.max_send_wr > MAX_SEND_BUFFERS_PER_QUEUE) {
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Requested max_send_wr %d exceeding limit\n",
> attr->cap.max_send_wr);
> return -EINVAL;
> }
>
> if (attr->cap.max_send_sge > MAX_TX_WQE_SGL_ENTRIES) {
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Requested max_send_sge %d exceeding limit\n",
> attr->cap.max_send_sge);
> return -EINVAL;
> @@ -311,38 +311,38 @@ static int mana_ib_create_qp_raw(struct ib_qp
> *ibqp, struct ib_pd *ibpd,
>
> ndev = mc->ports[port - 1];
> mpc = netdev_priv(ndev);
> - ibdev_dbg(&mdev->ib_dev, "port %u ndev %p mpc %p\n", port, ndev,
> mpc);
> + ibdev_dbg(&mib_dev->ib_dev, "port %u ndev %p mpc %p\n", port,
> ndev,
> +mpc);
>
> - err = mana_ib_cfg_vport(mdev, port - 1, pd, mana_ucontext-
> >doorbell);
> + err = mana_ib_cfg_vport(mib_dev, port - 1, pd,
> +mana_ucontext->doorbell);
> if (err)
> return -ENODEV;
>
> qp->port = port;
>
> - ibdev_dbg(&mdev->ib_dev, "ucmd sq_buf_addr 0x%llx port %u\n",
> + ibdev_dbg(&mib_dev->ib_dev, "ucmd sq_buf_addr 0x%llx port %u\n",
> ucmd.sq_buf_addr, ucmd.port);
>
> umem = ib_umem_get(ibpd->device, ucmd.sq_buf_addr,
> ucmd.sq_buf_size,
> IB_ACCESS_LOCAL_WRITE);
> if (IS_ERR(umem)) {
> err = PTR_ERR(umem);
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Failed to get umem for create qp-raw, err %d\n",
> err);
> goto err_free_vport;
> }
> qp->sq_umem = umem;
>
> - err = mana_ib_gd_create_dma_region(mdev, qp->sq_umem,
> + err = mana_ib_gd_create_dma_region(mib_dev, qp->sq_umem,
> &qp->sq_gdma_region);
> if (err) {
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Failed to create dma region for create qp-raw,
> %d\n",
> err);
> goto err_release_umem;
> }
>
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "mana_ib_gd_create_dma_region ret %d gdma_region
> 0x%llx\n",
> err, qp->sq_gdma_region);
>
> @@ -358,7 +358,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp,
> struct ib_pd *ibpd,
> err = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_SQ,
> &wq_spec,
> &cq_spec, &qp->tx_object);
> if (err) {
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Failed to create wq for create raw-qp, err %d\n",
> err);
> goto err_destroy_dma_region;
> @@ -371,7 +371,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp,
> struct ib_pd *ibpd,
> qp->sq_id = wq_spec.queue_index;
> send_cq->id = cq_spec.queue_index;
>
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "ret %d qp->tx_object 0x%llx sq id %llu cq id %llu\n", err,
> qp->tx_object, qp->sq_id, send_cq->id);
>
> @@ -381,7 +381,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp,
> struct ib_pd *ibpd,
>
> err = ib_copy_to_udata(udata, &resp, sizeof(resp));
> if (err) {
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Failed copy udata for create qp-raw, %d\n",
> err);
> goto err_destroy_wq_obj;
> @@ -393,13 +393,13 @@ static int mana_ib_create_qp_raw(struct ib_qp
> *ibqp, struct ib_pd *ibpd,
> mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object);
>
> err_destroy_dma_region:
> - mana_ib_gd_destroy_dma_region(mdev, qp->sq_gdma_region);
> + mana_ib_gd_destroy_dma_region(mib_dev, qp->sq_gdma_region);
>
> err_release_umem:
> ib_umem_release(umem);
>
> err_free_vport:
> - mana_ib_uncfg_vport(mdev, pd, port - 1);
> + mana_ib_uncfg_vport(mib_dev, pd, port - 1);
>
> return err;
> }
> @@ -435,9 +435,9 @@ static int mana_ib_destroy_qp_rss(struct mana_ib_qp
> *qp,
> struct ib_rwq_ind_table *ind_tbl,
> struct ib_udata *udata)
> {
> - struct mana_ib_dev *mdev =
> + struct mana_ib_dev *mib_dev =
> container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
> - struct gdma_dev *gd = mdev->gdma_dev;
> + struct gdma_dev *gd = mib_dev->gdma_dev;
> struct mana_port_context *mpc;
> struct mana_context *mc;
> struct net_device *ndev;
> @@ -452,7 +452,7 @@ static int mana_ib_destroy_qp_rss(struct mana_ib_qp
> *qp,
> for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) {
> ibwq = ind_tbl->ind_tbl[i];
> wq = container_of(ibwq, struct mana_ib_wq, ibwq);
> - ibdev_dbg(&mdev->ib_dev, "destroying wq->rx_object %llu\n",
> + ibdev_dbg(&mib_dev->ib_dev, "destroying wq->rx_object
> %llu\n",
> wq->rx_object);
> mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
> }
> @@ -462,9 +462,9 @@ static int mana_ib_destroy_qp_rss(struct mana_ib_qp
> *qp,
>
> static int mana_ib_destroy_qp_raw(struct mana_ib_qp *qp, struct ib_udata
> *udata) {
> - struct mana_ib_dev *mdev =
> + struct mana_ib_dev *mib_dev =
> container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
> - struct gdma_dev *gd = mdev->gdma_dev;
> + struct gdma_dev *gd = mib_dev->gdma_dev;
> struct ib_pd *ibpd = qp->ibqp.pd;
> struct mana_port_context *mpc;
> struct mana_context *mc;
> @@ -479,11 +479,11 @@ static int mana_ib_destroy_qp_raw(struct
> mana_ib_qp *qp, struct ib_udata *udata)
> mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object);
>
> if (qp->sq_umem) {
> - mana_ib_gd_destroy_dma_region(mdev, qp-
> >sq_gdma_region);
> + mana_ib_gd_destroy_dma_region(mib_dev, qp-
> >sq_gdma_region);
> ib_umem_release(qp->sq_umem);
> }
>
> - mana_ib_uncfg_vport(mdev, pd, qp->port - 1);
> + mana_ib_uncfg_vport(mib_dev, pd, qp->port - 1);
>
> return 0;
> }
> diff --git a/drivers/infiniband/hw/mana/wq.c
> b/drivers/infiniband/hw/mana/wq.c index 372d361510e0..56bc2b8b6690
> 100644
> --- a/drivers/infiniband/hw/mana/wq.c
> +++ b/drivers/infiniband/hw/mana/wq.c
> @@ -9,7 +9,7 @@ struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
> struct ib_wq_init_attr *init_attr,
> struct ib_udata *udata)
> {
> - struct mana_ib_dev *mdev =
> + struct mana_ib_dev *mib_dev =
> container_of(pd->device, struct mana_ib_dev, ib_dev);
> struct mana_ib_create_wq ucmd = {};
> struct mana_ib_wq *wq;
> @@ -21,7 +21,7 @@ struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
>
> err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata-
> >inlen));
> if (err) {
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Failed to copy from udata for create wq, %d\n", err);
> return ERR_PTR(err);
> }
> @@ -30,13 +30,14 @@ struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
> if (!wq)
> return ERR_PTR(-ENOMEM);
>
> - ibdev_dbg(&mdev->ib_dev, "ucmd wq_buf_addr 0x%llx\n",
> ucmd.wq_buf_addr);
> + ibdev_dbg(&mib_dev->ib_dev, "ucmd wq_buf_addr 0x%llx\n",
> + ucmd.wq_buf_addr);
>
> umem = ib_umem_get(pd->device, ucmd.wq_buf_addr,
> ucmd.wq_buf_size,
> IB_ACCESS_LOCAL_WRITE);
> if (IS_ERR(umem)) {
> err = PTR_ERR(umem);
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Failed to get umem for create wq, err %d\n", err);
> goto err_free_wq;
> }
> @@ -46,15 +47,15 @@ struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
> wq->wq_buf_size = ucmd.wq_buf_size;
> wq->rx_object = INVALID_MANA_HANDLE;
>
> - err = mana_ib_gd_create_dma_region(mdev, wq->umem, &wq-
> >gdma_region);
> + err = mana_ib_gd_create_dma_region(mib_dev, wq->umem,
> +&wq->gdma_region);
> if (err) {
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Failed to create dma region for create wq, %d\n",
> err);
> goto err_release_umem;
> }
>
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "mana_ib_gd_create_dma_region ret %d gdma_region
> 0x%llx\n",
> err, wq->gdma_region);
>
> @@ -82,11 +83,11 @@ int mana_ib_destroy_wq(struct ib_wq *ibwq, struct
> ib_udata *udata) {
> struct mana_ib_wq *wq = container_of(ibwq, struct mana_ib_wq,
> ibwq);
> struct ib_device *ib_dev = ibwq->device;
> - struct mana_ib_dev *mdev;
> + struct mana_ib_dev *mib_dev;
>
> - mdev = container_of(ib_dev, struct mana_ib_dev, ib_dev);
> + mib_dev = container_of(ib_dev, struct mana_ib_dev, ib_dev);
>
> - mana_ib_gd_destroy_dma_region(mdev, wq->gdma_region);
> + mana_ib_gd_destroy_dma_region(mib_dev, wq->gdma_region);
> ib_umem_release(wq->umem);
>
> kfree(wq);
> --
> 2.25.1


2023-07-28 04:12:01

by Ajay Sharma

[permalink] [raw]
Subject: RE: [EXTERNAL] [Patch v3 4/4] RDMA/mana_ib : Query adapter capabilities



> -----Original Message-----
> From: [email protected] <[email protected]>
> Sent: Wednesday, July 26, 2023 3:08 PM
> To: Jason Gunthorpe <[email protected]>; Leon Romanovsky <[email protected]>;
> Dexuan Cui <[email protected]>; Wei Liu <[email protected]>; David S.
> Miller <[email protected]>; Eric Dumazet <[email protected]>;
> Jakub Kicinski <[email protected]>; Paolo Abeni <[email protected]>
> Cc: [email protected]; [email protected];
> [email protected]; [email protected]; Ajay Sharma
> <[email protected]>
> Subject: [EXTERNAL] [Patch v3 4/4] RDMA/mana_ib : Query adapter
> capabilities
>
> From: Ajay Sharma <[email protected]>
>
> Query the adapter capabilities to expose to other clients and VF. This checks
> against the user supplied values and protects against overflows.
>
> Signed-off-by: Ajay Sharma <[email protected]>
> ---
> drivers/infiniband/hw/mana/device.c | 4 ++
> drivers/infiniband/hw/mana/main.c | 66 +++++++++++++++++++++++++---
> drivers/infiniband/hw/mana/mana_ib.h | 53 +++++++++++++++++++++-
> 3 files changed, 115 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/infiniband/hw/mana/device.c
> b/drivers/infiniband/hw/mana/device.c
> index 4077e440657a..e15da43c73a0 100644
> --- a/drivers/infiniband/hw/mana/device.c
> +++ b/drivers/infiniband/hw/mana/device.c
> @@ -97,6 +97,10 @@ static int mana_ib_probe(struct auxiliary_device *adev,
> goto free_error_eq;
> }
>
> + ret = mana_ib_query_adapter_caps(mib_dev);
> + if (ret)
> + ibdev_dbg(&mib_dev->ib_dev, "Failed to get caps, use
> defaults");
> +
> ret = ib_register_device(&mib_dev->ib_dev, "mana_%d",
> mdev->gdma_context->dev);
> if (ret)
> diff --git a/drivers/infiniband/hw/mana/main.c
> b/drivers/infiniband/hw/mana/main.c
> index 1b1a8670d0fa..512815e1e64d 100644
> --- a/drivers/infiniband/hw/mana/main.c
> +++ b/drivers/infiniband/hw/mana/main.c
> @@ -469,21 +469,27 @@ int mana_ib_get_port_immutable(struct ib_device
> *ibdev, u32 port_num, int mana_ib_query_device(struct ib_device *ibdev,
> struct ib_device_attr *props,
> struct ib_udata *uhw)
> {
> + struct mana_ib_dev *mib_dev = container_of(ibdev,
> + struct mana_ib_dev, ib_dev);
> +
> props->max_qp = MANA_MAX_NUM_QUEUES;
> props->max_qp_wr = MAX_SEND_BUFFERS_PER_QUEUE;
> -
> - /*
> - * max_cqe could be potentially much bigger.
> - * As this version of driver only support RAW QP, set it to the same
> - * value as max_qp_wr
> - */
> props->max_cqe = MAX_SEND_BUFFERS_PER_QUEUE;
> -
> props->max_mr_size = MANA_IB_MAX_MR_SIZE;
> props->max_mr = MANA_IB_MAX_MR;
> props->max_send_sge = MAX_TX_WQE_SGL_ENTRIES;
> props->max_recv_sge = MAX_RX_WQE_SGL_ENTRIES;
>
> + /* If the Management SW is updated and supports adapter creation */
> + if (mib_dev->adapter_handle) {
> + props->max_qp = mib_dev->adapter_caps.max_qp_count;
> + props->max_qp_wr = mib_dev-
> >adapter_caps.max_requester_sq_size;
> + props->max_cqe = mib_dev-
> >adapter_caps.max_requester_sq_size;
> + props->max_mr = mib_dev->adapter_caps.max_mr_count;
> + props->max_send_sge = mib_dev-
> >adapter_caps.max_send_wqe_size;
> + props->max_recv_sge = mib_dev-
> >adapter_caps.max_recv_wqe_size;
> + }
> +
> return 0;
> }
>
> @@ -599,3 +605,49 @@ int mana_ib_create_error_eq(struct mana_ib_dev
> *mib_dev)
>
> return 0;
> }
> +
> +static void assign_caps(struct mana_ib_adapter_caps *caps,
> + struct mana_ib_query_adapter_caps_resp *resp) {
> + caps->max_sq_id = resp->max_sq_id;
> + caps->max_rq_id = resp->max_rq_id;
> + caps->max_cq_id = resp->max_cq_id;
> + caps->max_qp_count = resp->max_qp_count;
> + caps->max_cq_count = resp->max_cq_count;
> + caps->max_mr_count = resp->max_mr_count;
> + caps->max_pd_count = resp->max_pd_count;
> + caps->max_inbound_read_limit = resp->max_inbound_read_limit;
> + caps->max_outbound_read_limit = resp->max_outbound_read_limit;
> + caps->mw_count = resp->mw_count;
> + caps->max_srq_count = resp->max_srq_count;
> + caps->max_requester_sq_size = resp->max_requester_sq_size;
> + caps->max_responder_sq_size = resp->max_responder_sq_size;
> + caps->max_requester_rq_size = resp->max_requester_rq_size;
> + caps->max_responder_rq_size = resp->max_responder_rq_size;
> + caps->max_send_wqe_size = resp->max_send_wqe_size;
> + caps->max_recv_wqe_size = resp->max_recv_wqe_size;
> + caps->max_inline_data_size = resp->max_inline_data_size; }
> +
> +int mana_ib_query_adapter_caps(struct mana_ib_dev *mib_dev) {
> + struct mana_ib_query_adapter_caps_resp resp = {};
> + struct mana_ib_query_adapter_caps_req req = {};
> + int err;
> +
> + mana_gd_init_req_hdr(&req.hdr, MANA_IB_GET_ADAPTER_CAP,
> sizeof(req),
> + sizeof(resp));
> + req.hdr.resp.msg_version =
> MANA_IB__GET_ADAPTER_CAP_RESPONSE_V3;
> + req.hdr.dev_id = mib_dev->gc->mana_ib.dev_id;
> +
> + err = mana_gd_send_request(mib_dev->gc, sizeof(req), &req,
> + sizeof(resp), &resp);
> +
> + if (err) {
> + ibdev_err(&mib_dev->ib_dev, "Failed to query adapter caps
> err %d", err);
> + return err;
> + }
> +
> + assign_caps(&mib_dev->adapter_caps, &resp);
> + return 0;
> +}
> diff --git a/drivers/infiniband/hw/mana/mana_ib.h
> b/drivers/infiniband/hw/mana/mana_ib.h
> index 8a652bccd978..1044358230d3 100644
> --- a/drivers/infiniband/hw/mana/mana_ib.h
> +++ b/drivers/infiniband/hw/mana/mana_ib.h
> @@ -20,19 +20,41 @@
>
> /* MANA doesn't have any limit for MR size */
> #define MANA_IB_MAX_MR_SIZE U64_MAX
> -
> +#define MANA_IB__GET_ADAPTER_CAP_RESPONSE_V3 3
> /*
> * The hardware limit of number of MRs is greater than maximum number of
> MRs
> * that can possibly represent in 24 bits
> */
> #define MANA_IB_MAX_MR 0xFFFFFFu
>
> +struct mana_ib_adapter_caps {
> + u32 max_sq_id;
> + u32 max_rq_id;
> + u32 max_cq_id;
> + u32 max_qp_count;
> + u32 max_cq_count;
> + u32 max_mr_count;
> + u32 max_pd_count;
> + u32 max_inbound_read_limit;
> + u32 max_outbound_read_limit;
> + u32 mw_count;
> + u32 max_srq_count;
> + u32 max_requester_sq_size;
> + u32 max_responder_sq_size;
> + u32 max_requester_rq_size;
> + u32 max_responder_rq_size;
> + u32 max_send_wqe_size;
> + u32 max_recv_wqe_size;
> + u32 max_inline_data_size;
> +};
> +
> struct mana_ib_dev {
> struct ib_device ib_dev;
> struct gdma_dev *gdma_dev;
> struct gdma_context *gc;
> struct gdma_queue *fatal_err_eq;
> mana_handle_t adapter_handle;
> + struct mana_ib_adapter_caps adapter_caps;
> };
>
> struct mana_ib_wq {
> @@ -96,6 +118,7 @@ struct mana_ib_rwq_ind_table { };
>
> enum mana_ib_command_code {
> + MANA_IB_GET_ADAPTER_CAP = 0x30001,
> MANA_IB_CREATE_ADAPTER = 0x30002,
> MANA_IB_DESTROY_ADAPTER = 0x30003,
> };
> @@ -120,6 +143,32 @@ struct mana_ib_destroy_adapter_resp {
> struct gdma_resp_hdr hdr;
> }; /* HW Data */
>
> +struct mana_ib_query_adapter_caps_req {
> + struct gdma_req_hdr hdr;
> +}; /*HW Data */
> +
> +struct mana_ib_query_adapter_caps_resp {
> + struct gdma_resp_hdr hdr;
> + u32 max_sq_id;
> + u32 max_rq_id;
> + u32 max_cq_id;
> + u32 max_qp_count;
> + u32 max_cq_count;
> + u32 max_mr_count;
> + u32 max_pd_count;
> + u32 max_inbound_read_limit;
> + u32 max_outbound_read_limit;
> + u32 mw_count;
> + u32 max_srq_count;
> + u32 max_requester_sq_size;
> + u32 max_responder_sq_size;
> + u32 max_requester_rq_size;
> + u32 max_responder_rq_size;
> + u32 max_send_wqe_size;
> + u32 max_recv_wqe_size;
> + u32 max_inline_data_size;
> +}; /* HW Data */
> +
> int mana_ib_gd_create_dma_region(struct mana_ib_dev *mib_dev,
> struct ib_umem *umem,
> mana_handle_t *gdma_region);
> @@ -194,4 +243,6 @@ int mana_ib_create_adapter(struct mana_ib_dev
> *mib_dev);
>
> int mana_ib_destroy_adapter(struct mana_ib_dev *mib_dev);
>
> +int mana_ib_query_adapter_caps(struct mana_ib_dev *mib_dev);
> +
> #endif
> --
> 2.25.1


2023-07-28 22:45:37

by Long Li

[permalink] [raw]
Subject: RE: [Patch v3 3/4] RDMA/mana_ib : Create adapter and Add error eq

> Subject: [Patch v3 3/4] RDMA/mana_ib : Create adapter and Add error eq
>
> From: Ajay Sharma <[email protected]>
>
> Create adapter object as nice container for VF resources.
> Add error eq needed for adapter creation and later used for notification from
> Management SW. The management software uses this channel to send
> messages or error notifications back to the Client.
>
> Signed-off-by: Ajay Sharma <[email protected]>
> ---
> drivers/infiniband/hw/mana/device.c | 22 ++-
> drivers/infiniband/hw/mana/main.c | 95 ++++++++++++
> drivers/infiniband/hw/mana/mana_ib.h | 33 ++++
> .../net/ethernet/microsoft/mana/gdma_main.c | 146 ++++++++++--------
> drivers/net/ethernet/microsoft/mana/mana_en.c | 3 +
> include/net/mana/gdma.h | 13 +-
> 6 files changed, 242 insertions(+), 70 deletions(-)
>
> diff --git a/drivers/infiniband/hw/mana/device.c
> b/drivers/infiniband/hw/mana/device.c
> index ea4c8c8fc10d..4077e440657a 100644
> --- a/drivers/infiniband/hw/mana/device.c
> +++ b/drivers/infiniband/hw/mana/device.c
> @@ -68,7 +68,7 @@ static int mana_ib_probe(struct auxiliary_device *adev,
> ibdev_dbg(&mib_dev->ib_dev, "mdev=%p id=%d num_ports=%d\n",
> mdev,
> mdev->dev_id.as_uint32, mib_dev->ib_dev.phys_port_cnt);
>
> - mib_dev->gdma_dev = mdev;
> + mib_dev->gc = mdev->gdma_context;
> mib_dev->ib_dev.node_type = RDMA_NODE_IB_CA;
>
> /*
> @@ -85,15 +85,31 @@ static int mana_ib_probe(struct auxiliary_device
> *adev,
> goto free_ib_device;
> }
>
> + ret = mana_ib_create_error_eq(mib_dev);
> + if (ret) {
> + ibdev_err(&mib_dev->ib_dev, "Failed to allocate err eq");
> + goto deregister_device;
> + }
> +
> + ret = mana_ib_create_adapter(mib_dev);
> + if (ret) {
> + ibdev_err(&mib_dev->ib_dev, "Failed to create adapter");
> + goto free_error_eq;
> + }
> +
> ret = ib_register_device(&mib_dev->ib_dev, "mana_%d",
> mdev->gdma_context->dev);
> if (ret)
> - goto deregister_device;
> + goto destroy_adapter;
>
> dev_set_drvdata(&adev->dev, mib_dev);
>
> return 0;
>
> +destroy_adapter:
> + mana_ib_destroy_adapter(mib_dev);
> +free_error_eq:
> + mana_gd_destroy_queue(mib_dev->gc, mib_dev->fatal_err_eq);
> deregister_device:
> mana_gd_deregister_device(&mib_dev->gc->mana_ib);
> free_ib_device:
> @@ -105,6 +121,8 @@ static void mana_ib_remove(struct auxiliary_device
> *adev) {
> struct mana_ib_dev *mib_dev = dev_get_drvdata(&adev->dev);
>
> + mana_gd_destroy_queue(mib_dev->gc, mib_dev->fatal_err_eq);
> + mana_ib_destroy_adapter(mib_dev);
> mana_gd_deregister_device(&mib_dev->gc->mana_ib);
> ib_unregister_device(&mib_dev->ib_dev);
> ib_dealloc_device(&mib_dev->ib_dev);
> diff --git a/drivers/infiniband/hw/mana/main.c
> b/drivers/infiniband/hw/mana/main.c
> index 2c4e3c496644..1b1a8670d0fa 100644
> --- a/drivers/infiniband/hw/mana/main.c
> +++ b/drivers/infiniband/hw/mana/main.c
> @@ -504,3 +504,98 @@ int mana_ib_query_gid(struct ib_device *ibdev, u32
> port, int index, void mana_ib_disassociate_ucontext(struct ib_ucontext
> *ibcontext) { }
> +
> +int mana_ib_destroy_adapter(struct mana_ib_dev *mib_dev) {
> + struct mana_ib_destroy_adapter_resp resp = {};
> + struct mana_ib_destroy_adapter_req req = {};
> + struct gdma_context *gc;
> + int err;
> +
> + gc = mib_dev->gc;
> +
> + mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_ADAPTER,
> sizeof(req),
> + sizeof(resp));
> + req.adapter = mib_dev->adapter_handle;
> + req.hdr.dev_id = gc->mana_ib.dev_id;
> +
> + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp),
> +&resp);
> +
> + if (err) {
> + ibdev_err(&mib_dev->ib_dev, "Failed to destroy adapter
> err %d", err);
> + return err;
> + }
> +
> + return 0;
> +}
> +
> +int mana_ib_create_adapter(struct mana_ib_dev *mib_dev) {
> + struct mana_ib_create_adapter_resp resp = {};
> + struct mana_ib_create_adapter_req req = {};
> + struct gdma_context *gc;
> + int err;
> +
> + gc = mib_dev->gc;
> +
> + mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_ADAPTER,
> sizeof(req),
> + sizeof(resp));
> + req.notify_eq_id = mib_dev->fatal_err_eq->id;
> + req.hdr.dev_id = gc->mana_ib.dev_id;
> +
> + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp),
> +&resp);
> +
> + if (err) {
> + ibdev_err(&mib_dev->ib_dev, "Failed to create adapter
> err %d",
> + err);
> + return err;
> + }
> +
> + mib_dev->adapter_handle = resp.adapter;
> +
> + return 0;
> +}
> +
> +static void mana_ib_soc_event_handler(void *ctx, struct gdma_queue
> *queue,
> + struct gdma_event *event)
> +{
> + struct mana_ib_dev *mib_dev = (struct mana_ib_dev *)ctx;
> +
> + switch (event->type) {
> + case GDMA_EQE_SOC_EVENT_NOTIFICATION:
> + ibdev_info(&mib_dev->ib_dev, "Received SOC Notification");
> + break;

Should we do something with the event?




> + case GDMA_EQE_SOC_EVENT_TEST:
> + ibdev_info(&mib_dev->ib_dev, "Received SoC Test");
> + break;
> + default:
> + ibdev_dbg(&mib_dev->ib_dev, "Received unsolicited evt %d",
> + event->type);
> + }
> +}
> +
> +int mana_ib_create_error_eq(struct mana_ib_dev *mib_dev) {
> + struct gdma_queue_spec spec = {};
> + int err;
> +
> + spec.type = GDMA_EQ;
> + spec.monitor_avl_buf = false;
> + spec.queue_size = EQ_SIZE;
> + spec.eq.callback = mana_ib_soc_event_handler;
> + spec.eq.context = mib_dev;
> + spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
> + spec.eq.msix_allocated = true;
> + spec.eq.msix_index = 0;
> + spec.doorbell = mib_dev->gc->mana_ib.doorbell;
> + spec.pdid = mib_dev->gc->mana_ib.pdid;
> +
> + err = mana_gd_create_mana_eq(&mib_dev->gc->mana_ib, &spec,
> + &mib_dev->fatal_err_eq);
> + if (err)
> + return err;
> +
> + mib_dev->fatal_err_eq->eq.disable_needed = true;
> +
> + return 0;
> +}
> diff --git a/drivers/infiniband/hw/mana/mana_ib.h
> b/drivers/infiniband/hw/mana/mana_ib.h
> index 3a2ba6b96f15..8a652bccd978 100644
> --- a/drivers/infiniband/hw/mana/mana_ib.h
> +++ b/drivers/infiniband/hw/mana/mana_ib.h
> @@ -31,6 +31,8 @@ struct mana_ib_dev {
> struct ib_device ib_dev;
> struct gdma_dev *gdma_dev;
> struct gdma_context *gc;
> + struct gdma_queue *fatal_err_eq;
> + mana_handle_t adapter_handle;
> };
>
> struct mana_ib_wq {
> @@ -93,6 +95,31 @@ struct mana_ib_rwq_ind_table {
> struct ib_rwq_ind_table ib_ind_table;
> };
>
> +enum mana_ib_command_code {
> + MANA_IB_CREATE_ADAPTER = 0x30002,
> + MANA_IB_DESTROY_ADAPTER = 0x30003,
> +};
> +
> +struct mana_ib_create_adapter_req {
> + struct gdma_req_hdr hdr;
> + u32 notify_eq_id;
> + u32 reserved;
> +}; /*HW Data */
> +
> +struct mana_ib_create_adapter_resp {
> + struct gdma_resp_hdr hdr;
> + mana_handle_t adapter;
> +}; /* HW Data */
> +
> +struct mana_ib_destroy_adapter_req {
> + struct gdma_req_hdr hdr;
> + mana_handle_t adapter;
> +}; /*HW Data */
> +
> +struct mana_ib_destroy_adapter_resp {
> + struct gdma_resp_hdr hdr;
> +}; /* HW Data */
> +
> int mana_ib_gd_create_dma_region(struct mana_ib_dev *mib_dev,
> struct ib_umem *umem,
> mana_handle_t *gdma_region);
> @@ -161,4 +188,10 @@ int mana_ib_query_gid(struct ib_device *ibdev, u32
> port, int index,
>
> void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext);
>
> +int mana_ib_create_error_eq(struct mana_ib_dev *mib_dev);
> +
> +int mana_ib_create_adapter(struct mana_ib_dev *mib_dev);
> +
> +int mana_ib_destroy_adapter(struct mana_ib_dev *mib_dev);
> +
> #endif
> diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c
> b/drivers/net/ethernet/microsoft/mana/gdma_main.c
> index 9fa7a2d6c2b2..55e194c9d84e 100644
> --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
> +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
> @@ -185,7 +185,8 @@ void mana_gd_free_memory(struct gdma_mem_info
> *gmi) }
>
> static int mana_gd_create_hw_eq(struct gdma_context *gc,
> - struct gdma_queue *queue)
> + struct gdma_queue *queue,
> + u32 doorbell, u32 pdid)
> {
> struct gdma_create_queue_resp resp = {};
> struct gdma_create_queue_req req = {}; @@ -199,8 +200,8 @@ static
> int mana_gd_create_hw_eq(struct gdma_context *gc,
>
> req.hdr.dev_id = queue->gdma_dev->dev_id;
> req.type = queue->type;
> - req.pdid = queue->gdma_dev->pdid;
> - req.doolbell_id = queue->gdma_dev->doorbell;
> + req.pdid = pdid;
> + req.doolbell_id = doorbell;
> req.gdma_region = queue->mem_info.dma_region_handle;
> req.queue_size = queue->queue_size;
> req.log2_throttle_limit = queue->eq.log2_throttle_limit; @@ -371,53
> +372,51 @@ static void mana_gd_process_eqe(struct gdma_queue *eq)
> }
> }
>
> -static void mana_gd_process_eq_events(void *arg)
> +static void mana_gd_process_eq_events(struct list_head *eq_list)
> {
> u32 owner_bits, new_bits, old_bits;
> union gdma_eqe_info eqe_info;
> struct gdma_eqe *eq_eqe_ptr;
> - struct gdma_queue *eq = arg;
> + struct gdma_queue *eq;
> struct gdma_context *gc;
> struct gdma_eqe *eqe;
> u32 head, num_eqe;
> int i;
>
> - gc = eq->gdma_dev->gdma_context;
> -
> - num_eqe = eq->queue_size / GDMA_EQE_SIZE;
> - eq_eqe_ptr = eq->queue_mem_ptr;
> -
> - /* Process up to 5 EQEs at a time, and update the HW head. */
> - for (i = 0; i < 5; i++) {
> - eqe = &eq_eqe_ptr[eq->head % num_eqe];
> - eqe_info.as_uint32 = eqe->eqe_info;
> - owner_bits = eqe_info.owner_bits;
> -
> - old_bits = (eq->head / num_eqe - 1) &
> GDMA_EQE_OWNER_MASK;
> - /* No more entries */
> - if (owner_bits == old_bits)
> - break;
> -
> - new_bits = (eq->head / num_eqe) &
> GDMA_EQE_OWNER_MASK;
> - if (owner_bits != new_bits) {
> - dev_err(gc->dev, "EQ %d: overflow detected\n", eq-
> >id);
> - break;
> + list_for_each_entry_rcu(eq, eq_list, entry) {
> + gc = eq->gdma_dev->gdma_context;
> +
> + num_eqe = eq->queue_size / GDMA_EQE_SIZE;
> + eq_eqe_ptr = eq->queue_mem_ptr;
> + /* Process up to 5 EQEs at a time, and update the HW head. */
> + for (i = 0; i < 5; i++) {
> + eqe = &eq_eqe_ptr[eq->head % num_eqe];
> + eqe_info.as_uint32 = eqe->eqe_info;
> + owner_bits = eqe_info.owner_bits;
> +
> + old_bits = (eq->head / num_eqe - 1) &
> GDMA_EQE_OWNER_MASK;
> + /* No more entries */
> + if (owner_bits == old_bits)
> + break;
> +
> + new_bits = (eq->head / num_eqe) &
> GDMA_EQE_OWNER_MASK;
> + if (owner_bits != new_bits) {
> + dev_err(gc->dev, "EQ %d: overflow
> detected\n",
> + eq->id);
> + break;
> + }
> + /* Per GDMA spec, rmb is necessary after checking
> owner_bits, before
> + * reading eqe.
> + */
> + rmb();
> + mana_gd_process_eqe(eq);
> + eq->head++;
> }
>
> - /* Per GDMA spec, rmb is necessary after checking
> owner_bits, before
> - * reading eqe.
> - */
> - rmb();
> -
> - mana_gd_process_eqe(eq);
> -
> - eq->head++;
> + head = eq->head % (num_eqe << GDMA_EQE_OWNER_BITS);
> + mana_gd_ring_doorbell(gc, eq->gdma_dev->doorbell, eq-
> >type,
> + eq->id, head, SET_ARM_BIT);
> }
> -
> - head = eq->head % (num_eqe << GDMA_EQE_OWNER_BITS);
> -
> - mana_gd_ring_doorbell(gc, eq->gdma_dev->doorbell, eq->type, eq-
> >id,
> - head, SET_ARM_BIT);
> }
>
> static int mana_gd_register_irq(struct gdma_queue *queue, @@ -435,44
> +434,47 @@ static int mana_gd_register_irq(struct gdma_queue *queue,
> gc = gd->gdma_context;
> r = &gc->msix_resource;
> dev = gc->dev;
> + msi_index = spec->eq.msix_index;
>
> spin_lock_irqsave(&r->lock, flags);
>
> - msi_index = find_first_zero_bit(r->map, r->size);
> - if (msi_index >= r->size || msi_index >= gc->num_msix_usable) {
> - err = -ENOSPC;
> - } else {
> - bitmap_set(r->map, msi_index, 1);
> - queue->eq.msix_index = msi_index;
> - }
> -
> - spin_unlock_irqrestore(&r->lock, flags);
> + if (!spec->eq.msix_allocated) {
> + msi_index = find_first_zero_bit(r->map, r->size);
>
> - if (err) {
> - dev_err(dev, "Register IRQ err:%d, msi:%u rsize:%u,
> nMSI:%u",
> - err, msi_index, r->size, gc->num_msix_usable);
> + if (msi_index >= r->size ||
> + msi_index >= gc->num_msix_usable)
> + err = -ENOSPC;
> + else
> + bitmap_set(r->map, msi_index, 1);
>
> - return err;
> + if (err) {
> + dev_err(dev, "Register IRQ err:%d, msi:%u rsize:%u,
> nMSI:%u",
> + err, msi_index, r->size, gc->num_msix_usable);
> + goto out;
> + }
> }
>
> + queue->eq.msix_index = msi_index;
> gic = &gc->irq_contexts[msi_index];
>
> - WARN_ON(gic->handler || gic->arg);
> -
> - gic->arg = queue;
> + list_add_rcu(&queue->entry, &gic->eq_list);
>
> gic->handler = mana_gd_process_eq_events;
>
> - return 0;
> +out:
> + spin_unlock_irqrestore(&r->lock, flags);
> + return err;
> }
>
> -static void mana_gd_deregiser_irq(struct gdma_queue *queue)
> +static void mana_gd_deregister_irq(struct gdma_queue *queue)
> {
> struct gdma_dev *gd = queue->gdma_dev;
> struct gdma_irq_context *gic;
> struct gdma_context *gc;
> struct gdma_resource *r;
> unsigned int msix_index;
> + struct list_head *p, *n;
> + struct gdma_queue *eq;
> unsigned long flags;
>
> gc = gd->gdma_context;
> @@ -483,14 +485,23 @@ static void mana_gd_deregiser_irq(struct
> gdma_queue *queue)
> if (WARN_ON(msix_index >= gc->num_msix_usable))
> return;
>
> + spin_lock_irqsave(&r->lock, flags);
> +
> gic = &gc->irq_contexts[msix_index];
> - gic->handler = NULL;
> - gic->arg = NULL;
> + list_for_each_safe(p, n, &gic->eq_list) {
> + eq = list_entry(p, struct gdma_queue, entry);
> + if (queue == eq) {
> + list_del(&eq->entry);

The previous code used list_for_each_entry_rcu() for iterating eq, need to add rcu_synchronize()?




> + break;
> + }
> + }
>

2023-07-28 22:53:11

by Long Li

[permalink] [raw]
Subject: RE: [Patch v3 1/4] RDMA/mana_ib : Rename all mana_ib_dev type variables to mib_dev



> -----Original Message-----
> From: [email protected] <[email protected]>
> Sent: Wednesday, July 26, 2023 1:08 PM
> To: Jason Gunthorpe <[email protected]>; Leon Romanovsky <[email protected]>;
> Dexuan Cui <[email protected]>; Wei Liu <[email protected]>; David S.
> Miller <[email protected]>; Eric Dumazet <[email protected]>;
> Jakub Kicinski <[email protected]>; Paolo Abeni <[email protected]>
> Cc: [email protected]; [email protected];
> [email protected]; [email protected]; Ajay Sharma
> <[email protected]>
> Subject: [Patch v3 1/4] RDMA/mana_ib : Rename all mana_ib_dev type
> variables to mib_dev
>
> From: Ajay Sharma <[email protected]>
>
> This patch does not introduce any functional changes. It creates naming
> convention to distinguish especially when used in the same
> function.Renaming all mana_ib_dev type variables to mib_dev to have clean
> separation between eth dev and ibdev variables.
>
> Signed-off-by: Ajay Sharma <[email protected]>
> ---
> drivers/infiniband/hw/mana/cq.c | 12 ++--
> drivers/infiniband/hw/mana/device.c | 34 +++++------
> drivers/infiniband/hw/mana/main.c | 87 ++++++++++++++--------------
> drivers/infiniband/hw/mana/mana_ib.h | 9 +--
> drivers/infiniband/hw/mana/mr.c | 29 +++++-----
> drivers/infiniband/hw/mana/qp.c | 82 +++++++++++++-------------
> drivers/infiniband/hw/mana/wq.c | 21 +++----
> 7 files changed, 140 insertions(+), 134 deletions(-)
>
> diff --git a/drivers/infiniband/hw/mana/cq.c
> b/drivers/infiniband/hw/mana/cq.c index d141cab8a1e6..1aed4e6360ba
> 100644
> --- a/drivers/infiniband/hw/mana/cq.c
> +++ b/drivers/infiniband/hw/mana/cq.c
> @@ -11,10 +11,10 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const
> struct ib_cq_init_attr *attr,
> struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
> struct ib_device *ibdev = ibcq->device;
> struct mana_ib_create_cq ucmd = {};
> - struct mana_ib_dev *mdev;
> + struct mana_ib_dev *mib_dev;
> int err;
>
> - mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
> + mib_dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
>
> if (udata->inlen < sizeof(ucmd))
> return -EINVAL;
> @@ -41,7 +41,7 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct
> ib_cq_init_attr *attr,
> return err;
> }
>
> - err = mana_ib_gd_create_dma_region(mdev, cq->umem, &cq-
> >gdma_region);
> + err = mana_ib_gd_create_dma_region(mib_dev, cq->umem,
> +&cq->gdma_region);
> if (err) {
> ibdev_dbg(ibdev,
> "Failed to create dma region for create cq, %d\n",
> @@ -68,11 +68,11 @@ int mana_ib_destroy_cq(struct ib_cq *ibcq, struct
> ib_udata *udata) {
> struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
> struct ib_device *ibdev = ibcq->device;
> - struct mana_ib_dev *mdev;
> + struct mana_ib_dev *mib_dev;
>
> - mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
> + mib_dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
>
> - mana_ib_gd_destroy_dma_region(mdev, cq->gdma_region);
> + mana_ib_gd_destroy_dma_region(mib_dev, cq->gdma_region);
> ib_umem_release(cq->umem);
>
> return 0;
> diff --git a/drivers/infiniband/hw/mana/device.c
> b/drivers/infiniband/hw/mana/device.c
> index d4541b8707e4..083f27246ba8 100644
> --- a/drivers/infiniband/hw/mana/device.c
> +++ b/drivers/infiniband/hw/mana/device.c
> @@ -51,51 +51,51 @@ static int mana_ib_probe(struct auxiliary_device
> *adev, {
> struct mana_adev *madev = container_of(adev, struct mana_adev,
> adev);
> struct gdma_dev *mdev = madev->mdev;
> + struct mana_ib_dev *mib_dev;
> struct mana_context *mc;
> - struct mana_ib_dev *dev;
> int ret;
>
> mc = mdev->driver_data;
>
> - dev = ib_alloc_device(mana_ib_dev, ib_dev);
> - if (!dev)
> + mib_dev = ib_alloc_device(mana_ib_dev, ib_dev);
> + if (!mib_dev)
> return -ENOMEM;
>
> - ib_set_device_ops(&dev->ib_dev, &mana_ib_dev_ops);
> + ib_set_device_ops(&mib_dev->ib_dev, &mana_ib_dev_ops);
>
> - dev->ib_dev.phys_port_cnt = mc->num_ports;
> + mib_dev->ib_dev.phys_port_cnt = mc->num_ports;
>
> - ibdev_dbg(&dev->ib_dev, "mdev=%p id=%d num_ports=%d\n",
> mdev,
> - mdev->dev_id.as_uint32, dev->ib_dev.phys_port_cnt);
> + ibdev_dbg(&mib_dev->ib_dev, "mdev=%p id=%d num_ports=%d\n",
> mdev,
> + mdev->dev_id.as_uint32, mib_dev->ib_dev.phys_port_cnt);
>
> - dev->gdma_dev = mdev;
> - dev->ib_dev.node_type = RDMA_NODE_IB_CA;
> + mib_dev->gdma_dev = mdev;
> + mib_dev->ib_dev.node_type = RDMA_NODE_IB_CA;
>
> /*
> * num_comp_vectors needs to set to the max MSIX index
> * when interrupts and event queues are implemented
> */
> - dev->ib_dev.num_comp_vectors = 1;
> - dev->ib_dev.dev.parent = mdev->gdma_context->dev;
> + mib_dev->ib_dev.num_comp_vectors = 1;
> + mib_dev->ib_dev.dev.parent = mdev->gdma_context->dev;
>
> - ret = ib_register_device(&dev->ib_dev, "mana_%d",
> + ret = ib_register_device(&mib_dev->ib_dev, "mana_%d",
> mdev->gdma_context->dev);
> if (ret) {
> - ib_dealloc_device(&dev->ib_dev);
> + ib_dealloc_device(&mib_dev->ib_dev);
> return ret;
> }
>
> - dev_set_drvdata(&adev->dev, dev);
> + dev_set_drvdata(&adev->dev, mib_dev);
>
> return 0;
> }
>
> static void mana_ib_remove(struct auxiliary_device *adev) {
> - struct mana_ib_dev *dev = dev_get_drvdata(&adev->dev);
> + struct mana_ib_dev *mib_dev = dev_get_drvdata(&adev->dev);
>
> - ib_unregister_device(&dev->ib_dev);
> - ib_dealloc_device(&dev->ib_dev);
> + ib_unregister_device(&mib_dev->ib_dev);
> + ib_dealloc_device(&mib_dev->ib_dev);
> }
>
> static const struct auxiliary_device_id mana_id_table[] = { diff --git
> a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
> index 7be4c3adb4e2..189e774cdab6 100644
> --- a/drivers/infiniband/hw/mana/main.c
> +++ b/drivers/infiniband/hw/mana/main.c
> @@ -5,10 +5,10 @@
>
> #include "mana_ib.h"
>
> -void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd
> *pd,
> +void mana_ib_uncfg_vport(struct mana_ib_dev *mib_dev, struct
> mana_ib_pd
> +*pd,
> u32 port)
> {
> - struct gdma_dev *gd = dev->gdma_dev;
> + struct gdma_dev *gd = mib_dev->gdma_dev;
> struct mana_port_context *mpc;
> struct net_device *ndev;
> struct mana_context *mc;
> @@ -28,10 +28,11 @@ void mana_ib_uncfg_vport(struct mana_ib_dev *dev,
> struct mana_ib_pd *pd,
> mutex_unlock(&pd->vport_mutex);
> }
>
> -int mana_ib_cfg_vport(struct mana_ib_dev *dev, u32 port, struct
> mana_ib_pd *pd,
> +int mana_ib_cfg_vport(struct mana_ib_dev *mib_dev, u32 port,
> + struct mana_ib_pd *pd,
> u32 doorbell_id)
> {
> - struct gdma_dev *mdev = dev->gdma_dev;
> + struct gdma_dev *mdev = mib_dev->gdma_dev;
> struct mana_port_context *mpc;
> struct mana_context *mc;
> struct net_device *ndev;
> @@ -45,7 +46,7 @@ int mana_ib_cfg_vport(struct mana_ib_dev *dev, u32
> port, struct mana_ib_pd *pd,
>
> pd->vport_use_count++;
> if (pd->vport_use_count > 1) {
> - ibdev_dbg(&dev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Skip as this PD is already configured vport\n");
> mutex_unlock(&pd->vport_mutex);
> return 0;
> @@ -56,7 +57,8 @@ int mana_ib_cfg_vport(struct mana_ib_dev *dev, u32
> port, struct mana_ib_pd *pd,
> pd->vport_use_count--;
> mutex_unlock(&pd->vport_mutex);
>
> - ibdev_dbg(&dev->ib_dev, "Failed to configure vPort %d\n",
> err);
> + ibdev_dbg(&mib_dev->ib_dev, "Failed to configure
> vPort %d\n",
> + err);
> return err;
> }
>
> @@ -65,7 +67,7 @@ int mana_ib_cfg_vport(struct mana_ib_dev *dev, u32
> port, struct mana_ib_pd *pd,
> pd->tx_shortform_allowed = mpc->tx_shortform_allowed;
> pd->tx_vp_offset = mpc->tx_vp_offset;
>
> - ibdev_dbg(&dev->ib_dev, "vport handle %llx pdid %x
> doorbell_id %x\n",
> + ibdev_dbg(&mib_dev->ib_dev, "vport handle %llx pdid %x doorbell_id
> +%x\n",
> mpc->port_handle, pd->pdn, doorbell_id);
>
> return 0;
> @@ -78,12 +80,12 @@ int mana_ib_alloc_pd(struct ib_pd *ibpd, struct
> ib_udata *udata)
> struct gdma_create_pd_resp resp = {};
> struct gdma_create_pd_req req = {};
> enum gdma_pd_flags flags = 0;
> - struct mana_ib_dev *dev;
> + struct mana_ib_dev *mib_dev;
> struct gdma_dev *mdev;
> int err;
>
> - dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
> - mdev = dev->gdma_dev;
> + mib_dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
> + mdev = mib_dev->gdma_dev;
>
> mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_PD, sizeof(req),
> sizeof(resp));
> @@ -93,7 +95,7 @@ int mana_ib_alloc_pd(struct ib_pd *ibpd, struct
> ib_udata *udata)
> sizeof(resp), &resp);
>
> if (err || resp.hdr.status) {
> - ibdev_dbg(&dev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Failed to get pd_id err %d status %u\n", err,
> resp.hdr.status);
> if (!err)
> @@ -104,7 +106,7 @@ int mana_ib_alloc_pd(struct ib_pd *ibpd, struct
> ib_udata *udata)
>
> pd->pd_handle = resp.pd_handle;
> pd->pdn = resp.pd_id;
> - ibdev_dbg(&dev->ib_dev, "pd_handle 0x%llx pd_id %d\n",
> + ibdev_dbg(&mib_dev->ib_dev, "pd_handle 0x%llx pd_id %d\n",
> pd->pd_handle, pd->pdn);
>
> mutex_init(&pd->vport_mutex);
> @@ -118,12 +120,12 @@ int mana_ib_dealloc_pd(struct ib_pd *ibpd, struct
> ib_udata *udata)
> struct ib_device *ibdev = ibpd->device;
> struct gdma_destory_pd_resp resp = {};
> struct gdma_destroy_pd_req req = {};
> - struct mana_ib_dev *dev;
> + struct mana_ib_dev *mib_dev;
> struct gdma_dev *mdev;
> int err;
>
> - dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
> - mdev = dev->gdma_dev;
> + mib_dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
> + mdev = mib_dev->gdma_dev;
>
> mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_PD, sizeof(req),
> sizeof(resp));
> @@ -133,7 +135,7 @@ int mana_ib_dealloc_pd(struct ib_pd *ibpd, struct
> ib_udata *udata)
> sizeof(resp), &resp);
>
> if (err || resp.hdr.status) {
> - ibdev_dbg(&dev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Failed to destroy pd_handle 0x%llx err %d
> status %u",
> pd->pd_handle, err, resp.hdr.status);
> if (!err)
> @@ -204,14 +206,14 @@ int mana_ib_alloc_ucontext(struct ib_ucontext
> *ibcontext,
> struct mana_ib_ucontext *ucontext =
> container_of(ibcontext, struct mana_ib_ucontext,
> ibucontext);
> struct ib_device *ibdev = ibcontext->device;
> - struct mana_ib_dev *mdev;
> + struct mana_ib_dev *mib_dev;
> struct gdma_context *gc;
> struct gdma_dev *dev;
> int doorbell_page;
> int ret;
>
> - mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
> - dev = mdev->gdma_dev;
> + mib_dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
> + dev = mib_dev->gdma_dev;
> gc = dev->gdma_context;
>
> /* Allocate a doorbell page index */
> @@ -233,12 +235,12 @@ void mana_ib_dealloc_ucontext(struct ib_ucontext
> *ibcontext)
> struct mana_ib_ucontext *mana_ucontext =
> container_of(ibcontext, struct mana_ib_ucontext,
> ibucontext);
> struct ib_device *ibdev = ibcontext->device;
> - struct mana_ib_dev *mdev;
> + struct mana_ib_dev *mib_dev;
> struct gdma_context *gc;
> int ret;
>
> - mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
> - gc = mdev->gdma_dev->gdma_context;
> + mib_dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
> + gc = mib_dev->gdma_dev->gdma_context;
>
> ret = mana_gd_destroy_doorbell_page(gc, mana_ucontext-
> >doorbell);
> if (ret)
> @@ -246,7 +248,7 @@ void mana_ib_dealloc_ucontext(struct ib_ucontext
> *ibcontext) }
>
> static int
> -mana_ib_gd_first_dma_region(struct mana_ib_dev *dev,
> +mana_ib_gd_first_dma_region(struct mana_ib_dev *mib_dev,
> struct gdma_context *gc,
> struct gdma_create_dma_region_req *create_req,
> size_t num_pages, mana_handle_t *gdma_region,
> @@ -263,7 +265,7 @@ mana_ib_gd_first_dma_region(struct mana_ib_dev
> *dev,
> err = mana_gd_send_request(gc, create_req_msg_size, create_req,
> sizeof(create_resp), &create_resp);
> if (err || create_resp.hdr.status != expected_status) {
> - ibdev_dbg(&dev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Failed to create DMA region: %d, 0x%x\n",
> err, create_resp.hdr.status);
> if (!err)
> @@ -273,14 +275,14 @@ mana_ib_gd_first_dma_region(struct
> mana_ib_dev *dev,
> }
>
> *gdma_region = create_resp.dma_region_handle;
> - ibdev_dbg(&dev->ib_dev, "Created DMA region handle 0x%llx\n",
> + ibdev_dbg(&mib_dev->ib_dev, "Created DMA region handle
> 0x%llx\n",
> *gdma_region);
>
> return 0;
> }
>
> static int
> -mana_ib_gd_add_dma_region(struct mana_ib_dev *dev, struct
> gdma_context *gc,
> +mana_ib_gd_add_dma_region(struct mana_ib_dev *mib_dev, struct
> +gdma_context *gc,
> struct gdma_dma_region_add_pages_req *add_req,
> unsigned int num_pages, u32 expected_status)
> { @@ -296,7 +298,7 @@ mana_ib_gd_add_dma_region(struct mana_ib_dev
> *dev, struct gdma_context *gc,
> err = mana_gd_send_request(gc, add_req_msg_size, add_req,
> sizeof(add_resp), &add_resp);
> if (err || add_resp.hdr.status != expected_status) {
> - ibdev_dbg(&dev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Failed to create DMA region: %d, 0x%x\n",
> err, add_resp.hdr.status);
>
> @@ -309,7 +311,8 @@ mana_ib_gd_add_dma_region(struct mana_ib_dev
> *dev, struct gdma_context *gc,
> return 0;
> }
>
> -int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct
> ib_umem *umem,
> +int mana_ib_gd_create_dma_region(struct mana_ib_dev *mib_dev,
> + struct ib_umem *umem,
> mana_handle_t *gdma_region)
> {
> struct gdma_dma_region_add_pages_req *add_req = NULL; @@ -
> 329,14 +332,14 @@ int mana_ib_gd_create_dma_region(struct mana_ib_dev
> *dev, struct ib_umem *umem,
> void *request_buf;
> int err;
>
> - mdev = dev->gdma_dev;
> + mdev = mib_dev->gdma_dev;
> gc = mdev->gdma_context;
> hwc = gc->hwc.driver_data;
>
> /* Hardware requires dma region to align to chosen page size */
> page_sz = ib_umem_find_best_pgsz(umem, PAGE_SZ_BM, 0);
> if (!page_sz) {
> - ibdev_dbg(&dev->ib_dev, "failed to find page size.\n");
> + ibdev_dbg(&mib_dev->ib_dev, "failed to find page size.\n");
> return -ENOMEM;
> }
> num_pages_total = ib_umem_num_dma_blocks(umem, page_sz);
> @@ -362,13 +365,13 @@ int mana_ib_gd_create_dma_region(struct
> mana_ib_dev *dev, struct ib_umem *umem,
> create_req->gdma_page_type = order_base_2(page_sz) - PAGE_SHIFT;
> create_req->page_count = num_pages_total;
>
> - ibdev_dbg(&dev->ib_dev, "size_dma_region %lu
> num_pages_total %lu\n",
> + ibdev_dbg(&mib_dev->ib_dev, "size_dma_region %lu
> num_pages_total
> +%lu\n",
> umem->length, num_pages_total);
>
> - ibdev_dbg(&dev->ib_dev, "page_sz %lu offset_in_page %u\n",
> + ibdev_dbg(&mib_dev->ib_dev, "page_sz %lu offset_in_page %u\n",
> page_sz, create_req->offset_in_page);
>
> - ibdev_dbg(&dev->ib_dev, "num_pages_to_handle %lu,
> gdma_page_type %u",
> + ibdev_dbg(&mib_dev->ib_dev, "num_pages_to_handle %lu,
> gdma_page_type
> +%u",
> num_pages_to_handle, create_req->gdma_page_type);
>
> page_addr_list = create_req->page_addr_list; @@ -385,7 +388,7 @@
> int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct
> ib_umem *umem,
>
> if (!num_pages_processed) {
> /* First create message */
> - err = mana_ib_gd_first_dma_region(dev, gc,
> create_req,
> + err = mana_ib_gd_first_dma_region(mib_dev, gc,
> create_req,
> tail, gdma_region,
> expected_status);
> if (err)
> @@ -400,7 +403,7 @@ int mana_ib_gd_create_dma_region(struct
> mana_ib_dev *dev, struct ib_umem *umem,
> page_addr_list = add_req->page_addr_list;
> } else {
> /* Subsequent create messages */
> - err = mana_ib_gd_add_dma_region(dev, gc, add_req,
> tail,
> + err = mana_ib_gd_add_dma_region(mib_dev, gc,
> add_req, tail,
> expected_status);
> if (err)
> break;
> @@ -417,20 +420,20 @@ int mana_ib_gd_create_dma_region(struct
> mana_ib_dev *dev, struct ib_umem *umem,
> }
>
> if (err)
> - mana_ib_gd_destroy_dma_region(dev, *gdma_region);
> + mana_ib_gd_destroy_dma_region(mib_dev, *gdma_region);
>
> out:
> kfree(request_buf);
> return err;
> }
>
> -int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev, u64
> gdma_region)
> +int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *mib_dev, u64
> +gdma_region)
> {
> - struct gdma_dev *mdev = dev->gdma_dev;
> + struct gdma_dev *mdev = mib_dev->gdma_dev;
> struct gdma_context *gc;
>
> gc = mdev->gdma_context;
> - ibdev_dbg(&dev->ib_dev, "destroy dma region 0x%llx\n",
> gdma_region);
> + ibdev_dbg(&mib_dev->ib_dev, "destroy dma region 0x%llx\n",
> +gdma_region);
>
> return mana_gd_destroy_dma_region(gc, gdma_region); } @@ -
> 440,14 +443,14 @@ int mana_ib_mmap(struct ib_ucontext *ibcontext,
> struct vm_area_struct *vma)
> struct mana_ib_ucontext *mana_ucontext =
> container_of(ibcontext, struct mana_ib_ucontext,
> ibucontext);
> struct ib_device *ibdev = ibcontext->device;
> - struct mana_ib_dev *mdev;
> + struct mana_ib_dev *mib_dev;
> struct gdma_context *gc;
> phys_addr_t pfn;
> pgprot_t prot;
> int ret;
>
> - mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
> - gc = mdev->gdma_dev->gdma_context;
> + mib_dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
> + gc = mib_dev->gdma_dev->gdma_context;
>
> if (vma->vm_pgoff != 0) {
> ibdev_dbg(ibdev, "Unexpected vm_pgoff %lu\n", vma-
> >vm_pgoff); diff --git a/drivers/infiniband/hw/mana/mana_ib.h
> b/drivers/infiniband/hw/mana/mana_ib.h
> index 502cc8672eef..ee4efd0af278 100644
> --- a/drivers/infiniband/hw/mana/mana_ib.h
> +++ b/drivers/infiniband/hw/mana/mana_ib.h
> @@ -92,10 +92,11 @@ struct mana_ib_rwq_ind_table {
> struct ib_rwq_ind_table ib_ind_table;
> };
>
> -int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct
> ib_umem *umem,
> +int mana_ib_gd_create_dma_region(struct mana_ib_dev *mib_dev,
> + struct ib_umem *umem,
> mana_handle_t *gdma_region);
>
> -int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev,
> +int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *mib_dev,
> mana_handle_t gdma_region);
>
> struct ib_wq *mana_ib_create_wq(struct ib_pd *pd, @@ -129,9 +130,9 @@
> int mana_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
>
> int mana_ib_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
>
> -int mana_ib_cfg_vport(struct mana_ib_dev *dev, u32 port_id,
> +int mana_ib_cfg_vport(struct mana_ib_dev *mib_dev, u32 port_id,
> struct mana_ib_pd *pd, u32 doorbell_id); -void
> mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd,
> +void mana_ib_uncfg_vport(struct mana_ib_dev *mib_dev, struct
> mana_ib_pd
> +*pd,
> u32 port);
>
> int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
> diff --git a/drivers/infiniband/hw/mana/mr.c
> b/drivers/infiniband/hw/mana/mr.c index 351207c60eb6..f6a53906204d
> 100644
> --- a/drivers/infiniband/hw/mana/mr.c
> +++ b/drivers/infiniband/hw/mana/mr.c
> @@ -25,12 +25,13 @@ mana_ib_verbs_to_gdma_access_flags(int
> access_flags)
> return flags;
> }
>
> -static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct
> mana_ib_mr *mr,
> +static int mana_ib_gd_create_mr(struct mana_ib_dev *mib_dev,
> + struct mana_ib_mr *mr,
> struct gdma_create_mr_params *mr_params)
> {
> + struct gdma_dev *mdev = mib_dev->gdma_dev;
> struct gdma_create_mr_response resp = {};
> struct gdma_create_mr_request req = {};
> - struct gdma_dev *mdev = dev->gdma_dev;
> struct gdma_context *gc;
> int err;
>
> @@ -49,7 +50,7 @@ static int mana_ib_gd_create_mr(struct mana_ib_dev
> *dev, struct mana_ib_mr *mr,
> break;
>
> default:
> - ibdev_dbg(&dev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "invalid param (GDMA_MR_TYPE) passed,
> type %d\n",
> req.mr_type);
> return -EINVAL;
> @@ -58,7 +59,7 @@ static int mana_ib_gd_create_mr(struct mana_ib_dev
> *dev, struct mana_ib_mr *mr,
> err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp),
> &resp);
>
> if (err || resp.hdr.status) {
> - ibdev_dbg(&dev->ib_dev, "Failed to create mr %d, %u", err,
> + ibdev_dbg(&mib_dev->ib_dev, "Failed to create mr %d, %u",
> err,
> resp.hdr.status);
> if (!err)
> err = -EPROTO;
> @@ -73,11 +74,11 @@ static int mana_ib_gd_create_mr(struct mana_ib_dev
> *dev, struct mana_ib_mr *mr,
> return 0;
> }
>
> -static int mana_ib_gd_destroy_mr(struct mana_ib_dev *dev, u64 mr_handle)
> +static int mana_ib_gd_destroy_mr(struct mana_ib_dev *mib_dev, u64
> +mr_handle)
> {
> struct gdma_destroy_mr_response resp = {};
> + struct gdma_dev *mdev = mib_dev->gdma_dev;
> struct gdma_destroy_mr_request req = {};
> - struct gdma_dev *mdev = dev->gdma_dev;
> struct gdma_context *gc;
> int err;
>
> @@ -107,12 +108,12 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd
> *ibpd, u64 start, u64 length,
> struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd,
> ibpd);
> struct gdma_create_mr_params mr_params = {};
> struct ib_device *ibdev = ibpd->device;
> - struct mana_ib_dev *dev;
> + struct mana_ib_dev *mib_dev;
> struct mana_ib_mr *mr;
> u64 dma_region_handle;
> int err;
>
> - dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
> + mib_dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
>
> ibdev_dbg(ibdev,
> "start 0x%llx, iova 0x%llx length 0x%llx access_flags 0x%x",
> @@ -133,7 +134,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd
> *ibpd, u64 start, u64 length,
> goto err_free;
> }
>
> - err = mana_ib_gd_create_dma_region(dev, mr->umem,
> &dma_region_handle);
> + err = mana_ib_gd_create_dma_region(mib_dev, mr->umem,
> +&dma_region_handle);
> if (err) {
> ibdev_dbg(ibdev, "Failed create dma region for user-
> mr, %d\n",
> err);
> @@ -151,7 +152,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd
> *ibpd, u64 start, u64 length,
> mr_params.gva.access_flags =
> mana_ib_verbs_to_gdma_access_flags(access_flags);
>
> - err = mana_ib_gd_create_mr(dev, mr, &mr_params);
> + err = mana_ib_gd_create_mr(mib_dev, mr, &mr_params);
> if (err)
> goto err_dma_region;
>
> @@ -164,7 +165,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd
> *ibpd, u64 start, u64 length,
> return &mr->ibmr;
>
> err_dma_region:
> - mana_gd_destroy_dma_region(dev->gdma_dev->gdma_context,
> + mana_gd_destroy_dma_region(mib_dev->gdma_dev-
> >gdma_context,
> dma_region_handle);
>
> err_umem:
> @@ -179,12 +180,12 @@ int mana_ib_dereg_mr(struct ib_mr *ibmr, struct
> ib_udata *udata) {
> struct mana_ib_mr *mr = container_of(ibmr, struct mana_ib_mr,
> ibmr);
> struct ib_device *ibdev = ibmr->device;
> - struct mana_ib_dev *dev;
> + struct mana_ib_dev *mib_dev;
> int err;
>
> - dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
> + mib_dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
>
> - err = mana_ib_gd_destroy_mr(dev, mr->mr_handle);
> + err = mana_ib_gd_destroy_mr(mib_dev, mr->mr_handle);
> if (err)
> return err;
>
> diff --git a/drivers/infiniband/hw/mana/qp.c
> b/drivers/infiniband/hw/mana/qp.c index 4b3b5b274e84..2e3a57123ed7
> 100644
> --- a/drivers/infiniband/hw/mana/qp.c
> +++ b/drivers/infiniband/hw/mana/qp.c
> @@ -5,7 +5,7 @@
>
> #include "mana_ib.h"
>
> -static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
> +static int mana_ib_cfg_vport_steering(struct mana_ib_dev *mib_dev,
> struct net_device *ndev,
> mana_handle_t default_rxobj,
> mana_handle_t ind_table[],
> @@ -21,7 +21,7 @@ static int mana_ib_cfg_vport_steering(struct
> mana_ib_dev *dev,
> u32 req_buf_size;
> int i, err;
>
> - mdev = dev->gdma_dev;
> + mdev = mib_dev->gdma_dev;
> gc = mdev->gdma_context;
>
> req_buf_size =
> @@ -55,10 +55,10 @@ static int mana_ib_cfg_vport_steering(struct
> mana_ib_dev *dev,
> * MANA_INDIRECT_TABLE_SIZE entries. Adjust the verb
> * ind_table to MANA_INDIRECT_TABLE_SIZE if required
> */
> - ibdev_dbg(&dev->ib_dev, "ind table size %u\n", 1 <<
> log_ind_tbl_size);
> + ibdev_dbg(&mib_dev->ib_dev, "ind table size %u\n", 1 <<
> +log_ind_tbl_size);
> for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
> req_indir_tab[i] = ind_table[i % (1 << log_ind_tbl_size)];
> - ibdev_dbg(&dev->ib_dev, "index %u handle 0x%llx\n", i,
> + ibdev_dbg(&mib_dev->ib_dev, "index %u handle 0x%llx\n", i,
> req_indir_tab[i]);
> }
>
> @@ -68,7 +68,7 @@ static int mana_ib_cfg_vport_steering(struct
> mana_ib_dev *dev,
> else
> netdev_rss_key_fill(req->hashkey, MANA_HASH_KEY_SIZE);
>
> - ibdev_dbg(&dev->ib_dev, "vport handle %llu default_rxobj 0x%llx\n",
> + ibdev_dbg(&mib_dev->ib_dev, "vport handle %llu default_rxobj
> +0x%llx\n",
> req->vport, default_rxobj);
>
> err = mana_gd_send_request(gc, req_buf_size, req, sizeof(resp),
> &resp); @@ -97,12 +97,12 @@ static int mana_ib_create_qp_rss(struct ib_qp
> *ibqp, struct ib_pd *pd,
> struct ib_udata *udata)
> {
> struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp,
> ibqp);
> - struct mana_ib_dev *mdev =
> + struct mana_ib_dev *mib_dev =
> container_of(pd->device, struct mana_ib_dev, ib_dev);
> struct ib_rwq_ind_table *ind_tbl = attr->rwq_ind_tbl;
> struct mana_ib_create_qp_rss_resp resp = {};
> struct mana_ib_create_qp_rss ucmd = {};
> - struct gdma_dev *gd = mdev->gdma_dev;
> + struct gdma_dev *gd = mib_dev->gdma_dev;

Need to follow the "reverse tree" style along with the rest of driver.

> mana_handle_t *mana_ind_table;
> struct mana_port_context *mpc;
> struct mana_context *mc;
> @@ -123,21 +123,21 @@ static int mana_ib_create_qp_rss(struct ib_qp
> *ibqp, struct ib_pd *pd,
>
> ret = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata-
> >inlen));
> if (ret) {
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Failed copy from udata for create rss-qp, err %d\n",
> ret);
> return ret;
> }
>
> if (attr->cap.max_recv_wr > MAX_SEND_BUFFERS_PER_QUEUE) {
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Requested max_recv_wr %d exceeding limit\n",
> attr->cap.max_recv_wr);
> return -EINVAL;
> }
>
> if (attr->cap.max_recv_sge > MAX_RX_WQE_SGL_ENTRIES) {
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Requested max_recv_sge %d exceeding limit\n",
> attr->cap.max_recv_sge);
> return -EINVAL;
> @@ -145,14 +145,14 @@ static int mana_ib_create_qp_rss(struct ib_qp
> *ibqp, struct ib_pd *pd,
>
> ind_tbl_size = 1 << ind_tbl->log_ind_tbl_size;
> if (ind_tbl_size > MANA_INDIRECT_TABLE_SIZE) {
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Indirect table size %d exceeding limit\n",
> ind_tbl_size);
> return -EINVAL;
> }
>
> if (ucmd.rx_hash_function != MANA_IB_RX_HASH_FUNC_TOEPLITZ) {
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "RX Hash function is not supported, %d\n",
> ucmd.rx_hash_function);
> return -EINVAL;
> @@ -161,14 +161,14 @@ static int mana_ib_create_qp_rss(struct ib_qp
> *ibqp, struct ib_pd *pd,
> /* IB ports start with 1, MANA start with 0 */
> port = ucmd.port;
> if (port < 1 || port > mc->num_ports) {
> - ibdev_dbg(&mdev->ib_dev, "Invalid port %u in creating
> qp\n",
> + ibdev_dbg(&mib_dev->ib_dev, "Invalid port %u in creating
> qp\n",
> port);
> return -EINVAL;
> }
> ndev = mc->ports[port - 1];
> mpc = netdev_priv(ndev);
>
> - ibdev_dbg(&mdev->ib_dev, "rx_hash_function %d port %d\n",
> + ibdev_dbg(&mib_dev->ib_dev, "rx_hash_function %d port %d\n",
> ucmd.rx_hash_function, port);
>
> mana_ind_table = kcalloc(ind_tbl_size, sizeof(mana_handle_t), @@ -
> 210,7 +210,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct
> ib_pd *pd,
> wq->id = wq_spec.queue_index;
> cq->id = cq_spec.queue_index;
>
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "ret %d rx_object 0x%llx wq id %llu cq id %llu\n",
> ret, wq->rx_object, wq->id, cq->id);
>
> @@ -221,7 +221,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp,
> struct ib_pd *pd,
> }
> resp.num_entries = i;
>
> - ret = mana_ib_cfg_vport_steering(mdev, ndev, wq->rx_object,
> + ret = mana_ib_cfg_vport_steering(mib_dev, ndev, wq->rx_object,
> mana_ind_table,
> ind_tbl->log_ind_tbl_size,
> ucmd.rx_hash_key_len,
> @@ -231,7 +231,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp,
> struct ib_pd *pd,
>
> ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
> if (ret) {
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Failed to copy to udata create rss-qp, %d\n",
> ret);
> goto fail;
> @@ -259,7 +259,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp,
> struct ib_pd *ibpd, {
> struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd,
> ibpd);
> struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp,
> ibqp);
> - struct mana_ib_dev *mdev =
> + struct mana_ib_dev *mib_dev =
> container_of(ibpd->device, struct mana_ib_dev, ib_dev);
> struct mana_ib_cq *send_cq =
> container_of(attr->send_cq, struct mana_ib_cq, ibcq); @@ -
> 267,7 +267,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp,
> struct ib_pd *ibpd,
> rdma_udata_to_drv_context(udata, struct
> mana_ib_ucontext,
> ibucontext);
> struct mana_ib_create_qp_resp resp = {};
> - struct gdma_dev *gd = mdev->gdma_dev;
> + struct gdma_dev *gd = mib_dev->gdma_dev;
> struct mana_ib_create_qp ucmd = {};
> struct mana_obj_spec wq_spec = {};
> struct mana_obj_spec cq_spec = {};
> @@ -285,7 +285,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp,
> struct ib_pd *ibpd,
>
> err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata-
> >inlen));
> if (err) {
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Failed to copy from udata create qp-raw, %d\n",
> err);
> return err;
> }
> @@ -296,14 +296,14 @@ static int mana_ib_create_qp_raw(struct ib_qp
> *ibqp, struct ib_pd *ibpd,
> return -EINVAL;
>
> if (attr->cap.max_send_wr > MAX_SEND_BUFFERS_PER_QUEUE) {
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Requested max_send_wr %d exceeding limit\n",
> attr->cap.max_send_wr);
> return -EINVAL;
> }
>
> if (attr->cap.max_send_sge > MAX_TX_WQE_SGL_ENTRIES) {
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Requested max_send_sge %d exceeding limit\n",
> attr->cap.max_send_sge);
> return -EINVAL;
> @@ -311,38 +311,38 @@ static int mana_ib_create_qp_raw(struct ib_qp
> *ibqp, struct ib_pd *ibpd,
>
> ndev = mc->ports[port - 1];
> mpc = netdev_priv(ndev);
> - ibdev_dbg(&mdev->ib_dev, "port %u ndev %p mpc %p\n", port,
> ndev, mpc);
> + ibdev_dbg(&mib_dev->ib_dev, "port %u ndev %p mpc %p\n", port,
> ndev,
> +mpc);
>
> - err = mana_ib_cfg_vport(mdev, port - 1, pd, mana_ucontext-
> >doorbell);
> + err = mana_ib_cfg_vport(mib_dev, port - 1, pd,
> +mana_ucontext->doorbell);
> if (err)
> return -ENODEV;
>
> qp->port = port;
>
> - ibdev_dbg(&mdev->ib_dev, "ucmd sq_buf_addr 0x%llx port %u\n",
> + ibdev_dbg(&mib_dev->ib_dev, "ucmd sq_buf_addr 0x%llx
> port %u\n",
> ucmd.sq_buf_addr, ucmd.port);
>
> umem = ib_umem_get(ibpd->device, ucmd.sq_buf_addr,
> ucmd.sq_buf_size,
> IB_ACCESS_LOCAL_WRITE);
> if (IS_ERR(umem)) {
> err = PTR_ERR(umem);
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Failed to get umem for create qp-raw, err %d\n",
> err);
> goto err_free_vport;
> }
> qp->sq_umem = umem;
>
> - err = mana_ib_gd_create_dma_region(mdev, qp->sq_umem,
> + err = mana_ib_gd_create_dma_region(mib_dev, qp->sq_umem,
> &qp->sq_gdma_region);
> if (err) {
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Failed to create dma region for create qp-
> raw, %d\n",
> err);
> goto err_release_umem;
> }
>
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "mana_ib_gd_create_dma_region ret %d gdma_region
> 0x%llx\n",
> err, qp->sq_gdma_region);
>
> @@ -358,7 +358,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp,
> struct ib_pd *ibpd,
> err = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_SQ,
> &wq_spec,
> &cq_spec, &qp->tx_object);
> if (err) {
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Failed to create wq for create raw-qp, err %d\n",
> err);
> goto err_destroy_dma_region;
> @@ -371,7 +371,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp,
> struct ib_pd *ibpd,
> qp->sq_id = wq_spec.queue_index;
> send_cq->id = cq_spec.queue_index;
>
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "ret %d qp->tx_object 0x%llx sq id %llu cq id %llu\n", err,
> qp->tx_object, qp->sq_id, send_cq->id);
>
> @@ -381,7 +381,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp,
> struct ib_pd *ibpd,
>
> err = ib_copy_to_udata(udata, &resp, sizeof(resp));
> if (err) {
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Failed copy udata for create qp-raw, %d\n",
> err);
> goto err_destroy_wq_obj;
> @@ -393,13 +393,13 @@ static int mana_ib_create_qp_raw(struct ib_qp
> *ibqp, struct ib_pd *ibpd,
> mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object);
>
> err_destroy_dma_region:
> - mana_ib_gd_destroy_dma_region(mdev, qp->sq_gdma_region);
> + mana_ib_gd_destroy_dma_region(mib_dev, qp->sq_gdma_region);
>
> err_release_umem:
> ib_umem_release(umem);
>
> err_free_vport:
> - mana_ib_uncfg_vport(mdev, pd, port - 1);
> + mana_ib_uncfg_vport(mib_dev, pd, port - 1);
>
> return err;
> }
> @@ -435,9 +435,9 @@ static int mana_ib_destroy_qp_rss(struct
> mana_ib_qp *qp,
> struct ib_rwq_ind_table *ind_tbl,
> struct ib_udata *udata)
> {
> - struct mana_ib_dev *mdev =
> + struct mana_ib_dev *mib_dev =
> container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
> - struct gdma_dev *gd = mdev->gdma_dev;
> + struct gdma_dev *gd = mib_dev->gdma_dev;
> struct mana_port_context *mpc;
> struct mana_context *mc;
> struct net_device *ndev;
> @@ -452,7 +452,7 @@ static int mana_ib_destroy_qp_rss(struct
> mana_ib_qp *qp,
> for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) {
> ibwq = ind_tbl->ind_tbl[i];
> wq = container_of(ibwq, struct mana_ib_wq, ibwq);
> - ibdev_dbg(&mdev->ib_dev, "destroying wq-
> >rx_object %llu\n",
> + ibdev_dbg(&mib_dev->ib_dev, "destroying wq-
> >rx_object %llu\n",
> wq->rx_object);
> mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
> }
> @@ -462,9 +462,9 @@ static int mana_ib_destroy_qp_rss(struct
> mana_ib_qp *qp,
>
> static int mana_ib_destroy_qp_raw(struct mana_ib_qp *qp, struct ib_udata
> *udata) {
> - struct mana_ib_dev *mdev =
> + struct mana_ib_dev *mib_dev =
> container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
> - struct gdma_dev *gd = mdev->gdma_dev;
> + struct gdma_dev *gd = mib_dev->gdma_dev;
> struct ib_pd *ibpd = qp->ibqp.pd;
> struct mana_port_context *mpc;
> struct mana_context *mc;
> @@ -479,11 +479,11 @@ static int mana_ib_destroy_qp_raw(struct
> mana_ib_qp *qp, struct ib_udata *udata)
> mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object);
>
> if (qp->sq_umem) {
> - mana_ib_gd_destroy_dma_region(mdev, qp-
> >sq_gdma_region);
> + mana_ib_gd_destroy_dma_region(mib_dev, qp-
> >sq_gdma_region);
> ib_umem_release(qp->sq_umem);
> }
>
> - mana_ib_uncfg_vport(mdev, pd, qp->port - 1);
> + mana_ib_uncfg_vport(mib_dev, pd, qp->port - 1);
>
> return 0;
> }
> diff --git a/drivers/infiniband/hw/mana/wq.c
> b/drivers/infiniband/hw/mana/wq.c index 372d361510e0..56bc2b8b6690
> 100644
> --- a/drivers/infiniband/hw/mana/wq.c
> +++ b/drivers/infiniband/hw/mana/wq.c
> @@ -9,7 +9,7 @@ struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
> struct ib_wq_init_attr *init_attr,
> struct ib_udata *udata)
> {
> - struct mana_ib_dev *mdev =
> + struct mana_ib_dev *mib_dev =
> container_of(pd->device, struct mana_ib_dev, ib_dev);
> struct mana_ib_create_wq ucmd = {};
> struct mana_ib_wq *wq;
> @@ -21,7 +21,7 @@ struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
>
> err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata-
> >inlen));
> if (err) {
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Failed to copy from udata for create wq, %d\n", err);
> return ERR_PTR(err);
> }
> @@ -30,13 +30,14 @@ struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
> if (!wq)
> return ERR_PTR(-ENOMEM);
>
> - ibdev_dbg(&mdev->ib_dev, "ucmd wq_buf_addr 0x%llx\n",
> ucmd.wq_buf_addr);
> + ibdev_dbg(&mib_dev->ib_dev, "ucmd wq_buf_addr 0x%llx\n",
> + ucmd.wq_buf_addr);
>
> umem = ib_umem_get(pd->device, ucmd.wq_buf_addr,
> ucmd.wq_buf_size,
> IB_ACCESS_LOCAL_WRITE);
> if (IS_ERR(umem)) {
> err = PTR_ERR(umem);
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Failed to get umem for create wq, err %d\n", err);
> goto err_free_wq;
> }
> @@ -46,15 +47,15 @@ struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
> wq->wq_buf_size = ucmd.wq_buf_size;
> wq->rx_object = INVALID_MANA_HANDLE;
>
> - err = mana_ib_gd_create_dma_region(mdev, wq->umem, &wq-
> >gdma_region);
> + err = mana_ib_gd_create_dma_region(mib_dev, wq->umem,
> +&wq->gdma_region);
> if (err) {
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "Failed to create dma region for create wq, %d\n",
> err);
> goto err_release_umem;
> }
>
> - ibdev_dbg(&mdev->ib_dev,
> + ibdev_dbg(&mib_dev->ib_dev,
> "mana_ib_gd_create_dma_region ret %d gdma_region
> 0x%llx\n",
> err, wq->gdma_region);
>
> @@ -82,11 +83,11 @@ int mana_ib_destroy_wq(struct ib_wq *ibwq, struct
> ib_udata *udata) {
> struct mana_ib_wq *wq = container_of(ibwq, struct mana_ib_wq,
> ibwq);
> struct ib_device *ib_dev = ibwq->device;
> - struct mana_ib_dev *mdev;
> + struct mana_ib_dev *mib_dev;
>
> - mdev = container_of(ib_dev, struct mana_ib_dev, ib_dev);
> + mib_dev = container_of(ib_dev, struct mana_ib_dev, ib_dev);
>
> - mana_ib_gd_destroy_dma_region(mdev, wq->gdma_region);
> + mana_ib_gd_destroy_dma_region(mib_dev, wq->gdma_region);
> ib_umem_release(wq->umem);
>
> kfree(wq);
> --
> 2.25.1


2023-07-28 23:26:08

by Long Li

[permalink] [raw]
Subject: RE: [Patch v3 4/4] RDMA/mana_ib : Query adapter capabilities

> Subject: [Patch v3 4/4] RDMA/mana_ib : Query adapter capabilities
>
> From: Ajay Sharma <[email protected]>
>
> Query the adapter capabilities to expose to other clients and VF. This checks
> against the user supplied values and protects against overflows.
>
> Signed-off-by: Ajay Sharma <[email protected]>
> ---
> drivers/infiniband/hw/mana/device.c | 4 ++
> drivers/infiniband/hw/mana/main.c | 66 +++++++++++++++++++++++++-
> --
> drivers/infiniband/hw/mana/mana_ib.h | 53 +++++++++++++++++++++-
> 3 files changed, 115 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/infiniband/hw/mana/device.c
> b/drivers/infiniband/hw/mana/device.c
> index 4077e440657a..e15da43c73a0 100644
> --- a/drivers/infiniband/hw/mana/device.c
> +++ b/drivers/infiniband/hw/mana/device.c
> @@ -97,6 +97,10 @@ static int mana_ib_probe(struct auxiliary_device *adev,
> goto free_error_eq;
> }
>
> + ret = mana_ib_query_adapter_caps(mib_dev);
> + if (ret)
> + ibdev_dbg(&mib_dev->ib_dev, "Failed to get caps, use
> defaults");

There is an ibdev_err() in mana_ib_query_adapter_caps(), how about merging this message with that?

And you can remove the return value of mana_ib_query_adapter_caps(), since it doesn't do something meaningful.

> +
> ret = ib_register_device(&mib_dev->ib_dev, "mana_%d",
> mdev->gdma_context->dev);
> if (ret)
> diff --git a/drivers/infiniband/hw/mana/main.c
> b/drivers/infiniband/hw/mana/main.c
> index 1b1a8670d0fa..512815e1e64d 100644
> --- a/drivers/infiniband/hw/mana/main.c
> +++ b/drivers/infiniband/hw/mana/main.c
> @@ -469,21 +469,27 @@ int mana_ib_get_port_immutable(struct ib_device
> *ibdev, u32 port_num, int mana_ib_query_device(struct ib_device *ibdev,
> struct ib_device_attr *props,
> struct ib_udata *uhw)
> {
> + struct mana_ib_dev *mib_dev = container_of(ibdev,
> + struct mana_ib_dev, ib_dev);
> +
> props->max_qp = MANA_MAX_NUM_QUEUES;
> props->max_qp_wr = MAX_SEND_BUFFERS_PER_QUEUE;
> -
> - /*
> - * max_cqe could be potentially much bigger.
> - * As this version of driver only support RAW QP, set it to the same
> - * value as max_qp_wr
> - */
> props->max_cqe = MAX_SEND_BUFFERS_PER_QUEUE;
> -
> props->max_mr_size = MANA_IB_MAX_MR_SIZE;
> props->max_mr = MANA_IB_MAX_MR;
> props->max_send_sge = MAX_TX_WQE_SGL_ENTRIES;
> props->max_recv_sge = MAX_RX_WQE_SGL_ENTRIES;
>
> + /* If the Management SW is updated and supports adapter creation */
> + if (mib_dev->adapter_handle) {

Does this mean mana_ib_query_adapter_caps() was a success?


> + props->max_qp = mib_dev->adapter_caps.max_qp_count;
> + props->max_qp_wr = mib_dev-
> >adapter_caps.max_requester_sq_size;
> + props->max_cqe = mib_dev-
> >adapter_caps.max_requester_sq_size;
> + props->max_mr = mib_dev->adapter_caps.max_mr_count;
> + props->max_send_sge = mib_dev-
> >adapter_caps.max_send_wqe_size;
> + props->max_recv_sge = mib_dev-
> >adapter_caps.max_recv_wqe_size;
> + }
> +
> return 0;
> }
>
> @@ -599,3 +605,49 @@ int mana_ib_create_error_eq(struct mana_ib_dev
> *mib_dev)
>
> return 0;
> }
> +
> +static void assign_caps(struct mana_ib_adapter_caps *caps,
> + struct mana_ib_query_adapter_caps_resp *resp) {
> + caps->max_sq_id = resp->max_sq_id;
> + caps->max_rq_id = resp->max_rq_id;
> + caps->max_cq_id = resp->max_cq_id;
> + caps->max_qp_count = resp->max_qp_count;
> + caps->max_cq_count = resp->max_cq_count;
> + caps->max_mr_count = resp->max_mr_count;
> + caps->max_pd_count = resp->max_pd_count;
> + caps->max_inbound_read_limit = resp->max_inbound_read_limit;
> + caps->max_outbound_read_limit = resp->max_outbound_read_limit;
> + caps->mw_count = resp->mw_count;
> + caps->max_srq_count = resp->max_srq_count;
> + caps->max_requester_sq_size = resp->max_requester_sq_size;
> + caps->max_responder_sq_size = resp->max_responder_sq_size;
> + caps->max_requester_rq_size = resp->max_requester_rq_size;
> + caps->max_responder_rq_size = resp->max_responder_rq_size;
> + caps->max_send_wqe_size = resp->max_send_wqe_size;
> + caps->max_recv_wqe_size = resp->max_recv_wqe_size;
> + caps->max_inline_data_size = resp->max_inline_data_size; }
> +
> +int mana_ib_query_adapter_caps(struct mana_ib_dev *mib_dev) {
> + struct mana_ib_query_adapter_caps_resp resp = {};
> + struct mana_ib_query_adapter_caps_req req = {};
> + int err;
> +
> + mana_gd_init_req_hdr(&req.hdr, MANA_IB_GET_ADAPTER_CAP,
> sizeof(req),
> + sizeof(resp));
> + req.hdr.resp.msg_version =
> MANA_IB__GET_ADAPTER_CAP_RESPONSE_V3;
> + req.hdr.dev_id = mib_dev->gc->mana_ib.dev_id;
> +
> + err = mana_gd_send_request(mib_dev->gc, sizeof(req), &req,
> + sizeof(resp), &resp);
> +
> + if (err) {
> + ibdev_err(&mib_dev->ib_dev, "Failed to query adapter caps
> err %d", err);
> + return err;
> + }
> +
> + assign_caps(&mib_dev->adapter_caps, &resp);
> + return 0;
> +}
> diff --git a/drivers/infiniband/hw/mana/mana_ib.h
> b/drivers/infiniband/hw/mana/mana_ib.h
> index 8a652bccd978..1044358230d3 100644
> --- a/drivers/infiniband/hw/mana/mana_ib.h
> +++ b/drivers/infiniband/hw/mana/mana_ib.h
> @@ -20,19 +20,41 @@
>
> /* MANA doesn't have any limit for MR size */
> #define MANA_IB_MAX_MR_SIZE U64_MAX
> -
> +#define MANA_IB__GET_ADAPTER_CAP_RESPONSE_V3 3

This value is used in GDMA header of the request message? If so, define GDMA_MESSAGE_V3 in "include/net/mana/gdma.h".