2006-01-07 00:25:47

by Roland Dreier

[permalink] [raw]
Subject: [git patch review 1/8] IB/mthca: max_inline_data handling tweaks

Fix a case where copying max_inline_data from a successful create_qp
capabilities output to create_qp input could cause EINVAL error:

mthca_set_qp_size must check max_inline_data directly against
max_desc_sz; checking qp->sq.max_gs is wrong since max_inline_data
depends on the qp type and does not involve max_sg.

Signed-off-by: Jack Morgenstein <[email protected]>
Signed-off-by: Michael S. Tsirkin <[email protected]>
Signed-off-by: Roland Dreier <[email protected]>

---

drivers/infiniband/hw/mthca/mthca_qp.c | 62 +++++++++++++++++++-------------
1 files changed, 36 insertions(+), 26 deletions(-)

5b3bc7a68171138d52b1b62012c37ac888895460
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index ea45fa4..fd60cf3 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -890,18 +890,13 @@ int mthca_modify_qp(struct ib_qp *ibqp,
return err;
}

-static void mthca_adjust_qp_caps(struct mthca_dev *dev,
- struct mthca_pd *pd,
- struct mthca_qp *qp)
+static int mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int desc_sz)
{
- int max_data_size;
-
/*
* Calculate the maximum size of WQE s/g segments, excluding
* the next segment and other non-data segments.
*/
- max_data_size = min(dev->limits.max_desc_sz, 1 << qp->sq.wqe_shift) -
- sizeof (struct mthca_next_seg);
+ int max_data_size = desc_sz - sizeof (struct mthca_next_seg);

switch (qp->transport) {
case MLX:
@@ -920,11 +915,24 @@ static void mthca_adjust_qp_caps(struct
break;
}

+ return max_data_size;
+}
+
+static inline int mthca_max_inline_data(struct mthca_pd *pd, int max_data_size)
+{
/* We don't support inline data for kernel QPs (yet). */
- if (!pd->ibpd.uobject)
- qp->max_inline_data = 0;
- else
- qp->max_inline_data = max_data_size - MTHCA_INLINE_HEADER_SIZE;
+ return pd->ibpd.uobject ? max_data_size - MTHCA_INLINE_HEADER_SIZE : 0;
+}
+
+static void mthca_adjust_qp_caps(struct mthca_dev *dev,
+ struct mthca_pd *pd,
+ struct mthca_qp *qp)
+{
+ int max_data_size = mthca_max_data_size(dev, qp,
+ min(dev->limits.max_desc_sz,
+ 1 << qp->sq.wqe_shift));
+
+ qp->max_inline_data = mthca_max_inline_data(pd, max_data_size);

qp->sq.max_gs = min_t(int, dev->limits.max_sg,
max_data_size / sizeof (struct mthca_data_seg));
@@ -1191,13 +1199,23 @@ static int mthca_alloc_qp_common(struct
}

static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap,
- struct mthca_qp *qp)
+ struct mthca_pd *pd, struct mthca_qp *qp)
{
+ int max_data_size = mthca_max_data_size(dev, qp, dev->limits.max_desc_sz);
+
/* Sanity check QP size before proceeding */
- if (cap->max_send_wr > dev->limits.max_wqes ||
- cap->max_recv_wr > dev->limits.max_wqes ||
- cap->max_send_sge > dev->limits.max_sg ||
- cap->max_recv_sge > dev->limits.max_sg)
+ if (cap->max_send_wr > dev->limits.max_wqes ||
+ cap->max_recv_wr > dev->limits.max_wqes ||
+ cap->max_send_sge > dev->limits.max_sg ||
+ cap->max_recv_sge > dev->limits.max_sg ||
+ cap->max_inline_data > mthca_max_inline_data(pd, max_data_size))
+ return -EINVAL;
+
+ /*
+ * For MLX transport we need 2 extra S/G entries:
+ * one for the header and one for the checksum at the end
+ */
+ if (qp->transport == MLX && cap->max_recv_sge + 2 > dev->limits.max_sg)
return -EINVAL;

if (mthca_is_memfree(dev)) {
@@ -1216,14 +1234,6 @@ static int mthca_set_qp_size(struct mthc
MTHCA_INLINE_CHUNK_SIZE) /
sizeof (struct mthca_data_seg));

- /*
- * For MLX transport we need 2 extra S/G entries:
- * one for the header and one for the checksum at the end
- */
- if ((qp->transport == MLX && qp->sq.max_gs + 2 > dev->limits.max_sg) ||
- qp->sq.max_gs > dev->limits.max_sg || qp->rq.max_gs > dev->limits.max_sg)
- return -EINVAL;
-
return 0;
}

@@ -1238,7 +1248,7 @@ int mthca_alloc_qp(struct mthca_dev *dev
{
int err;

- err = mthca_set_qp_size(dev, cap, qp);
+ err = mthca_set_qp_size(dev, cap, pd, qp);
if (err)
return err;

@@ -1281,7 +1291,7 @@ int mthca_alloc_sqp(struct mthca_dev *de
u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1;
int err;

- err = mthca_set_qp_size(dev, cap, &sqp->qp);
+ err = mthca_set_qp_size(dev, cap, pd, &sqp->qp);
if (err)
return err;

--
0.99.9n


2006-01-07 00:25:50

by Roland Dreier

[permalink] [raw]
Subject: [git patch review 3/8] IB/mthca: fix for RTR-to-RTS transition in modify QP

PKEY_INDEX is not a legal parameter in the RTR->RTS transition.

Signed-off-by: Jack Morgenstein <[email protected]>
Signed-off-by: Roland Dreier <[email protected]>

---

drivers/infiniband/hw/mthca/mthca_qp.c | 2 --
1 files changed, 0 insertions(+), 2 deletions(-)

0d3b525fff40475e58dab9176740d2efc5f37838
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 623f514..ff2def3 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -383,12 +383,10 @@ static const struct {
[UC] = (IB_QP_CUR_STATE |
IB_QP_ALT_PATH |
IB_QP_ACCESS_FLAGS |
- IB_QP_PKEY_INDEX |
IB_QP_PATH_MIG_STATE),
[RC] = (IB_QP_CUR_STATE |
IB_QP_ALT_PATH |
IB_QP_ACCESS_FLAGS |
- IB_QP_PKEY_INDEX |
IB_QP_MIN_RNR_TIMER |
IB_QP_PATH_MIG_STATE),
[MLX] = (IB_QP_CUR_STATE |
--
0.99.9n

2006-01-07 00:26:16

by Roland Dreier

[permalink] [raw]
Subject: [git patch review 2/8] IB/mthca: fix for SQEr-to-RTS transition in modify QP

Fixes to SQEr->RTS transition in modify_qp:
1. The flag IB_QP_ACCESS_FLAGS is optional for UC qps
2. The SQEr state is not supported for RC qps

Signed-off-by: Jack Morgenstein <[email protected]>
Signed-off-by: Roland Dreier <[email protected]>

---

drivers/infiniband/hw/mthca/mthca_qp.c | 5 ++---
1 files changed, 2 insertions(+), 3 deletions(-)

0364ffc3e8c441d4185e3eb41ecc61dbb09614e4
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index fd60cf3..623f514 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -476,9 +476,8 @@ static const struct {
.opt_param = {
[UD] = (IB_QP_CUR_STATE |
IB_QP_QKEY),
- [UC] = IB_QP_CUR_STATE,
- [RC] = (IB_QP_CUR_STATE |
- IB_QP_MIN_RNR_TIMER),
+ [UC] = (IB_QP_CUR_STATE |
+ IB_QP_ACCESS_FLAGS),
[MLX] = (IB_QP_CUR_STATE |
IB_QP_QKEY),
}
--
0.99.9n

2006-01-07 00:26:16

by Roland Dreier

[permalink] [raw]
Subject: [git patch review 8/8] IB/uverbs: set ah_flags when creating address handle

AH attribute's ah_flags need to be set according to the is_global flag
passed in from userspace.

Signed-off-by: Roland Dreier <[email protected]>

---

drivers/infiniband/core/uverbs_cmd.c | 1 +
1 files changed, 1 insertions(+), 0 deletions(-)

ea5d4a6ad2bfd1006790666981645cab43d3afbd
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 6985a57..12d6cc0 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1454,6 +1454,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uv
attr.sl = cmd.attr.sl;
attr.src_path_bits = cmd.attr.src_path_bits;
attr.static_rate = cmd.attr.static_rate;
+ attr.ah_flags = cmd.attr.is_global ? IB_AH_GRH : 0;
attr.port_num = cmd.attr.port_num;
attr.grh.flow_label = cmd.attr.grh.flow_label;
attr.grh.sgid_index = cmd.attr.grh.sgid_index;
--
0.99.9n

2006-01-07 00:27:05

by Roland Dreier

[permalink] [raw]
Subject: [git patch review 6/8] IB/mthca: Add support for automatic path migration (APM)

Add code to modify QP operation to handle setting alternate paths for
connected QPs.

Signed-off-by: Dotan Barak <[email protected]>
Signed-off-by: Michael S. Tsirkin <[email protected]>
Signed-off-by: Roland Dreier <[email protected]>

---

drivers/infiniband/hw/mthca/mthca_qp.c | 57 +++++++++++++++++++++-----------
1 files changed, 37 insertions(+), 20 deletions(-)

4de144bf721e46e7ccc8fed45b20a640cc364904
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index ff2def3..564b6d5 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -549,6 +549,25 @@ static __be32 get_hw_access_flags(struct
return cpu_to_be32(hw_access_flags);
}

+static void mthca_path_set(struct ib_ah_attr *ah, struct mthca_qp_path *path)
+{
+ path->g_mylmc = ah->src_path_bits & 0x7f;
+ path->rlid = cpu_to_be16(ah->dlid);
+ path->static_rate = !!ah->static_rate;
+
+ if (ah->ah_flags & IB_AH_GRH) {
+ path->g_mylmc |= 1 << 7;
+ path->mgid_index = ah->grh.sgid_index;
+ path->hop_limit = ah->grh.hop_limit;
+ path->sl_tclass_flowlabel =
+ cpu_to_be32((ah->sl << 28) |
+ (ah->grh.traffic_class << 20) |
+ (ah->grh.flow_label));
+ memcpy(path->rgid, ah->grh.dgid.raw, 16);
+ } else
+ path->sl_tclass_flowlabel = cpu_to_be32(ah->sl << 28);
+}
+
int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
{
struct mthca_dev *dev = to_mdev(ibqp->device);
@@ -712,28 +731,14 @@ int mthca_modify_qp(struct ib_qp *ibqp,
}

if (attr_mask & IB_QP_RNR_RETRY) {
- qp_context->pri_path.rnr_retry = attr->rnr_retry << 5;
- qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY);
+ qp_context->alt_path.rnr_retry = qp_context->pri_path.rnr_retry =
+ attr->rnr_retry << 5;
+ qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY |
+ MTHCA_QP_OPTPAR_ALT_RNR_RETRY);
}

if (attr_mask & IB_QP_AV) {
- qp_context->pri_path.g_mylmc = attr->ah_attr.src_path_bits & 0x7f;
- qp_context->pri_path.rlid = cpu_to_be16(attr->ah_attr.dlid);
- qp_context->pri_path.static_rate = !!attr->ah_attr.static_rate;
- if (attr->ah_attr.ah_flags & IB_AH_GRH) {
- qp_context->pri_path.g_mylmc |= 1 << 7;
- qp_context->pri_path.mgid_index = attr->ah_attr.grh.sgid_index;
- qp_context->pri_path.hop_limit = attr->ah_attr.grh.hop_limit;
- qp_context->pri_path.sl_tclass_flowlabel =
- cpu_to_be32((attr->ah_attr.sl << 28) |
- (attr->ah_attr.grh.traffic_class << 20) |
- (attr->ah_attr.grh.flow_label));
- memcpy(qp_context->pri_path.rgid,
- attr->ah_attr.grh.dgid.raw, 16);
- } else {
- qp_context->pri_path.sl_tclass_flowlabel =
- cpu_to_be32(attr->ah_attr.sl << 28);
- }
+ mthca_path_set(&attr->ah_attr, &qp_context->pri_path);
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH);
}

@@ -742,7 +747,19 @@ int mthca_modify_qp(struct ib_qp *ibqp,
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT);
}

- /* XXX alt_path */
+ if (attr_mask & IB_QP_ALT_PATH) {
+ if (attr->alt_port_num == 0 || attr->alt_port_num > dev->limits.num_ports) {
+ mthca_dbg(dev, "Alternate port number (%u) is invalid\n",
+ attr->alt_port_num);
+ return -EINVAL;
+ }
+
+ mthca_path_set(&attr->alt_ah_attr, &qp_context->alt_path);
+ qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index |
+ attr->alt_port_num << 24);
+ qp_context->alt_path.ackto = attr->alt_timeout << 3;
+ qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ALT_ADDR_PATH);
+ }

/* leave rdd as 0 */
qp_context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pd_num);
--
0.99.9n

2006-01-07 00:27:04

by Roland Dreier

[permalink] [raw]
Subject: [git patch review 5/8] IB/mthca: Fill in vendor_err field in completion with error

Fill vendor_err field in completion with error.

Signed-off-by: Michael S. Tsirkin <[email protected]>
Signed-off-by: Roland Dreier <[email protected]>

---

drivers/infiniband/hw/mthca/mthca_cq.c | 12 +++++++-----
1 files changed, 7 insertions(+), 5 deletions(-)

0f8e8f9607d77ffc1f9820446dfcf781e96fdfd4
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index fcef8dc..96f1a86 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -128,12 +128,12 @@ struct mthca_err_cqe {
__be32 my_qpn;
u32 reserved1[3];
u8 syndrome;
- u8 reserved2;
+ u8 vendor_err;
__be16 db_cnt;
- u32 reserved3;
+ u32 reserved2;
__be32 wqe;
u8 opcode;
- u8 reserved4[2];
+ u8 reserved3[2];
u8 owner;
};

@@ -342,8 +342,8 @@ static int handle_error_cqe(struct mthca
}

/*
- * For completions in error, only work request ID, status (and
- * freed resource count for RD) have to be set.
+ * For completions in error, only work request ID, status, vendor error
+ * (and freed resource count for RD) have to be set.
*/
switch (cqe->syndrome) {
case SYNDROME_LOCAL_LENGTH_ERR:
@@ -405,6 +405,8 @@ static int handle_error_cqe(struct mthca
break;
}

+ entry->vendor_err = cqe->vendor_err;
+
/*
* Mem-free HCAs always generate one CQE per WQE, even in the
* error case, so we don't have to check the doorbell count, etc.
--
0.99.9n

2006-01-07 00:27:03

by Roland Dreier

[permalink] [raw]
Subject: [git patch review 7/8] IB/uverbs: Fix reference counting on error paths

If an operation fails after incrementing an object's reference count,
then it should decrement the reference count on the error path.

Signed-off-by: Jack Morgenstein <[email protected]>
Signed-off-by: Michael S. Tsirkin <[email protected]>
Signed-off-by: Roland Dreier <[email protected]>

---

drivers/infiniband/core/uverbs_cmd.c | 7 +++++++
1 files changed, 7 insertions(+), 0 deletions(-)

b4ca1a3f8ca24033d7b7ef595faef97d9f8b2326
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index a57d021..6985a57 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -489,6 +489,7 @@ err_idr:

err_unreg:
ib_dereg_mr(mr);
+ atomic_dec(&pd->usecnt);

err_up:
up(&ib_uverbs_idr_mutex);
@@ -935,6 +936,11 @@ err_idr:

err_destroy:
ib_destroy_qp(qp);
+ atomic_dec(&pd->usecnt);
+ atomic_dec(&attr.send_cq->usecnt);
+ atomic_dec(&attr.recv_cq->usecnt);
+ if (attr.srq)
+ atomic_dec(&attr.srq->usecnt);

err_up:
up(&ib_uverbs_idr_mutex);
@@ -1729,6 +1735,7 @@ err_idr:

err_destroy:
ib_destroy_srq(srq);
+ atomic_dec(&pd->usecnt);

err_up:
up(&ib_uverbs_idr_mutex);
--
0.99.9n

2006-01-07 00:28:57

by Roland Dreier

[permalink] [raw]
Subject: [git patch review 4/8] IB/mthca: multiple fixes for multicast group handling

Multicast group management fixes:
. Fix leak of mailbox memory in error handling on multicast group operations.
. Free AMGM indices at detach and in attach error handling.
. Fix amount to shift for aligning next_gid_index in mailbox: it
starts at bit 6, not bit 5.
. Allocate AMGM index after end of MGM table, in the range num_mgms to
multicast table size - 1. Add some BUG_ON checks to catch cases
where the index falls in the MGM hash area.
. Initialize the list of QPs in a newly-allocated group from AMGM to 0
This is necessary since when a group is moved from AMGM to MGM (in the
case where the MGM entry has been emptied of QPs), the AMGM entry is
not reset to 0 (and we don't want an extra command to do that).

Signed-off-by: Jack Morgenstein <[email protected]>
Signed-off-by: Michael S. Tsirkin <[email protected]>
Signed-off-by: Roland Dreier <[email protected]>

---

drivers/infiniband/hw/mthca/mthca_mcg.c | 54 ++++++++++++++++++++-----------
1 files changed, 35 insertions(+), 19 deletions(-)

5ceb74557c71465cf8f6fda050aac00e53f9ad3d
diff --git a/drivers/infiniband/hw/mthca/mthca_mcg.c b/drivers/infiniband/hw/mthca/mthca_mcg.c
index 2fc449d..77bc6c7 100644
--- a/drivers/infiniband/hw/mthca/mthca_mcg.c
+++ b/drivers/infiniband/hw/mthca/mthca_mcg.c
@@ -111,7 +111,8 @@ static int find_mgm(struct mthca_dev *de
goto out;
if (status) {
mthca_err(dev, "READ_MGM returned status %02x\n", status);
- return -EINVAL;
+ err = -EINVAL;
+ goto out;
}

if (!memcmp(mgm->gid, zero_gid, 16)) {
@@ -126,7 +127,7 @@ static int find_mgm(struct mthca_dev *de
goto out;

*prev = *index;
- *index = be32_to_cpu(mgm->next_gid_index) >> 5;
+ *index = be32_to_cpu(mgm->next_gid_index) >> 6;
} while (*index);

*index = -1;
@@ -153,8 +154,10 @@ int mthca_multicast_attach(struct ib_qp
return PTR_ERR(mailbox);
mgm = mailbox->buf;

- if (down_interruptible(&dev->mcg_table.sem))
- return -EINTR;
+ if (down_interruptible(&dev->mcg_table.sem)) {
+ err = -EINTR;
+ goto err_sem;
+ }

err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index);
if (err)
@@ -181,9 +184,8 @@ int mthca_multicast_attach(struct ib_qp
err = -EINVAL;
goto out;
}
-
+ memset(mgm, 0, sizeof *mgm);
memcpy(mgm->gid, gid->raw, 16);
- mgm->next_gid_index = 0;
}

for (i = 0; i < MTHCA_QP_PER_MGM; ++i)
@@ -209,6 +211,7 @@ int mthca_multicast_attach(struct ib_qp
if (status) {
mthca_err(dev, "WRITE_MGM returned status %02x\n", status);
err = -EINVAL;
+ goto out;
}

if (!link)
@@ -223,7 +226,7 @@ int mthca_multicast_attach(struct ib_qp
goto out;
}

- mgm->next_gid_index = cpu_to_be32(index << 5);
+ mgm->next_gid_index = cpu_to_be32(index << 6);

err = mthca_WRITE_MGM(dev, prev, mailbox, &status);
if (err)
@@ -234,7 +237,12 @@ int mthca_multicast_attach(struct ib_qp
}

out:
+ if (err && link && index != -1) {
+ BUG_ON(index < dev->limits.num_mgms);
+ mthca_free(&dev->mcg_table.alloc, index);
+ }
up(&dev->mcg_table.sem);
+ err_sem:
mthca_free_mailbox(dev, mailbox);
return err;
}
@@ -255,8 +263,10 @@ int mthca_multicast_detach(struct ib_qp
return PTR_ERR(mailbox);
mgm = mailbox->buf;

- if (down_interruptible(&dev->mcg_table.sem))
- return -EINTR;
+ if (down_interruptible(&dev->mcg_table.sem)) {
+ err = -EINTR;
+ goto err_sem;
+ }

err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index);
if (err)
@@ -305,13 +315,11 @@ int mthca_multicast_detach(struct ib_qp
if (i != 1)
goto out;

- goto out;
-
if (prev == -1) {
/* Remove entry from MGM */
- if (be32_to_cpu(mgm->next_gid_index) >> 5) {
- err = mthca_READ_MGM(dev,
- be32_to_cpu(mgm->next_gid_index) >> 5,
+ int amgm_index_to_free = be32_to_cpu(mgm->next_gid_index) >> 6;
+ if (amgm_index_to_free) {
+ err = mthca_READ_MGM(dev, amgm_index_to_free,
mailbox, &status);
if (err)
goto out;
@@ -332,9 +340,13 @@ int mthca_multicast_detach(struct ib_qp
err = -EINVAL;
goto out;
}
+ if (amgm_index_to_free) {
+ BUG_ON(amgm_index_to_free < dev->limits.num_mgms);
+ mthca_free(&dev->mcg_table.alloc, amgm_index_to_free);
+ }
} else {
/* Remove entry from AMGM */
- index = be32_to_cpu(mgm->next_gid_index) >> 5;
+ int curr_next_index = be32_to_cpu(mgm->next_gid_index) >> 6;
err = mthca_READ_MGM(dev, prev, mailbox, &status);
if (err)
goto out;
@@ -344,7 +356,7 @@ int mthca_multicast_detach(struct ib_qp
goto out;
}

- mgm->next_gid_index = cpu_to_be32(index << 5);
+ mgm->next_gid_index = cpu_to_be32(curr_next_index << 6);

err = mthca_WRITE_MGM(dev, prev, mailbox, &status);
if (err)
@@ -354,10 +366,13 @@ int mthca_multicast_detach(struct ib_qp
err = -EINVAL;
goto out;
}
+ BUG_ON(index < dev->limits.num_mgms);
+ mthca_free(&dev->mcg_table.alloc, index);
}

out:
up(&dev->mcg_table.sem);
+ err_sem:
mthca_free_mailbox(dev, mailbox);
return err;
}
@@ -365,11 +380,12 @@ int mthca_multicast_detach(struct ib_qp
int __devinit mthca_init_mcg_table(struct mthca_dev *dev)
{
int err;
+ int table_size = dev->limits.num_mgms + dev->limits.num_amgms;

err = mthca_alloc_init(&dev->mcg_table.alloc,
- dev->limits.num_amgms,
- dev->limits.num_amgms - 1,
- 0);
+ table_size,
+ table_size - 1,
+ dev->limits.num_mgms);
if (err)
return err;

--
0.99.9n