These two patches fix some ehca issues that should be fixed in 2.6.23.
[1/2] fixes regressions caused by the recent addition of Small QPs.
[2/2] adds missing SRQ-related functionality that would have broken IPoIB CM.
The patches should apply cleanly, in order, against Roland's git. Please
review the changes and apply the patches for 2.6.23-rc5 if they are okay.
Regards,
Joachim
--
Joachim Fenkes ?-- ?eHCA Linux Driver Developer and Hardware Tamer
IBM Deutschland Entwicklung GmbH ?-- ?Dept. 3627 (I/O Firmware Dev. 2)
Schoenaicher Strasse 220 ?-- ?71032 Boeblingen ?-- ?Germany
eMail: [email protected]
From: Stefan Roscher <[email protected]>
The new Small QP code had a few bugs that would also trigger for non-Small
QPs. Fix them.
Signed-off-by: Joachim Fenkes <[email protected]>
---
drivers/infiniband/hw/ehca/ehca_qp.c | 10 ++++++----
drivers/infiniband/hw/ehca/ipz_pt_fn.c | 2 +-
2 files changed, 7 insertions(+), 5 deletions(-)
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index b178cba..84d435a 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -600,10 +600,12 @@ static struct ehca_qp *internal_create_qp(
if (EHCA_BMASK_GET(HCA_CAP_MINI_QP, shca->hca_cap)
&& !(context && udata)) { /* no small QP support in userspace ATM */
- ehca_determine_small_queue(
- &parms.squeue, max_send_sge, is_llqp);
- ehca_determine_small_queue(
- &parms.rqueue, max_recv_sge, is_llqp);
+ if (HAS_SQ(my_qp))
+ ehca_determine_small_queue(
+ &parms.squeue, max_send_sge, is_llqp);
+ if (HAS_RQ(my_qp))
+ ehca_determine_small_queue(
+ &parms.rqueue, max_recv_sge, is_llqp);
parms.qp_storage =
(parms.squeue.is_small || parms.rqueue.is_small);
}
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.c b/drivers/infiniband/hw/ehca/ipz_pt_fn.c
index a090c67..29bd476 100644
--- a/drivers/infiniband/hw/ehca/ipz_pt_fn.c
+++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.c
@@ -172,7 +172,7 @@ static void free_small_queue_page(struct ipz_queue *queue, struct ehca_pd *pd)
unsigned long bit;
int free_page = 0;
- bit = ((unsigned long)queue->queue_pages[0] & PAGE_MASK)
+ bit = ((unsigned long)queue->queue_pages[0] & ~PAGE_MASK)
>> (order + 9);
mutex_lock(&pd->lock);
--
1.5.2
a) Report max_srq > 0 if SRQ is supported
b) Report "last wqe reached" event when base QP dies
Signed-off-by: Joachim Fenkes <[email protected]>
---
drivers/infiniband/hw/ehca/ehca_hca.c | 10 +++++--
drivers/infiniband/hw/ehca/ehca_irq.c | 48 +++++++++++++++++++++-----------
2 files changed, 38 insertions(+), 20 deletions(-)
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c
index fc19ef9..cf22472 100644
--- a/drivers/infiniband/hw/ehca/ehca_hca.c
+++ b/drivers/infiniband/hw/ehca/ehca_hca.c
@@ -93,9 +93,13 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
props->max_pd = min_t(int, rblock->max_pd, INT_MAX);
props->max_ah = min_t(int, rblock->max_ah, INT_MAX);
props->max_fmr = min_t(int, rblock->max_mr, INT_MAX);
- props->max_srq = 0;
- props->max_srq_wr = 0;
- props->max_srq_sge = 0;
+
+ if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
+ props->max_srq = props->max_qp;
+ props->max_srq_wr = props->max_qp_wr;
+ props->max_srq_sge = 3;
+ }
+
props->max_pkeys = 16;
props->local_ca_ack_delay
= rblock->local_ca_ack_delay;
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index ee06d8b..a925ea5 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -175,41 +175,55 @@ error_data1:
}
-static void qp_event_callback(struct ehca_shca *shca, u64 eqe,
- enum ib_event_type event_type, int fatal)
+static void dispatch_qp_event(struct ehca_shca *shca, struct ehca_qp *qp,
+ enum ib_event_type event_type)
{
struct ib_event event;
- struct ehca_qp *qp;
- u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe);
-
- read_lock(&ehca_qp_idr_lock);
- qp = idr_find(&ehca_qp_idr, token);
- read_unlock(&ehca_qp_idr_lock);
-
-
- if (!qp)
- return;
-
- if (fatal)
- ehca_error_data(shca, qp, qp->ipz_qp_handle.handle);
event.device = &shca->ib_device;
+ event.event = event_type;
if (qp->ext_type == EQPT_SRQ) {
if (!qp->ib_srq.event_handler)
return;
- event.event = fatal ? IB_EVENT_SRQ_ERR : event_type;
event.element.srq = &qp->ib_srq;
qp->ib_srq.event_handler(&event, qp->ib_srq.srq_context);
} else {
if (!qp->ib_qp.event_handler)
return;
- event.event = event_type;
event.element.qp = &qp->ib_qp;
qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context);
}
+}
+
+static void qp_event_callback(struct ehca_shca *shca, u64 eqe,
+ enum ib_event_type event_type, int fatal)
+{
+ struct ehca_qp *qp;
+ u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe);
+
+ read_lock(&ehca_qp_idr_lock);
+ qp = idr_find(&ehca_qp_idr, token);
+ read_unlock(&ehca_qp_idr_lock);
+
+ if (!qp)
+ return;
+
+ if (fatal)
+ ehca_error_data(shca, qp, qp->ipz_qp_handle.handle);
+
+ dispatch_qp_event(shca, qp, fatal && qp->ext_type == EQPT_SRQ ?
+ IB_EVENT_SRQ_ERR : event_type);
+
+ /*
+ * eHCA only processes one WQE at a time for SRQ base QPs,
+ * so the last WQE has been processed as soon as the QP enters
+ * error state.
+ */
+ if (fatal && qp->ext_type == EQPT_SRQBASE)
+ dispatch_qp_event(shca, qp, IB_EVENT_QP_LAST_WQE_REACHED);
return;
}
--
1.5.2