Most DPAA1 devices do not support a soft reset which is an issue if
Kexec starts a new kernel. This patch series allows Kexec to function
by detecting that the QBMan device was previously initialized.
The patches fix some issues with device cleanup as well as ensuring
that the location of the QBMan private memories has not changed
after the execution of the Kexec.
Changes since v1:
- Removed a bug fix and sent it separately to ease backporting
Roy Pledge (7):
soc/fsl/qbman: Rework QBMan private memory setup
soc/fsl/qbman: Cleanup buffer pools if BMan was initialized prior to
bootup
soc/fsl/qbman: Cleanup QMan queues if device was already initialized
soc/fsl/qbman: Fix drain_mr_fqni()
soc/fsl/qbman: Disable interrupts during portal recovery
soc/fsl/qbman: Fixup qman_shutdown_fq()
soc/fsl/qbman: Update device tree with reserved memory
drivers/soc/fsl/qbman/bman.c | 17 ++++----
drivers/soc/fsl/qbman/bman_ccsr.c | 36 +++++++++++++++-
drivers/soc/fsl/qbman/bman_portal.c | 18 +++++++-
drivers/soc/fsl/qbman/bman_priv.h | 5 +++
drivers/soc/fsl/qbman/dpaa_sys.c | 63 ++++++++++++++++------------
drivers/soc/fsl/qbman/qman.c | 83 +++++++++++++++++++++++++++++--------
drivers/soc/fsl/qbman/qman_ccsr.c | 59 +++++++++++++++++++++++---
drivers/soc/fsl/qbman/qman_portal.c | 18 +++++++-
drivers/soc/fsl/qbman/qman_priv.h | 8 ++++
9 files changed, 246 insertions(+), 61 deletions(-)
--
2.7.4
Rework QBMan private memory setup so that the areas are not
zeroed if the device was previously initialized
If the QMan private memory was already initialized skip the PFDR
initialization.
Signed-off-by: Roy Pledge <[email protected]>
---
drivers/soc/fsl/qbman/bman_ccsr.c | 26 ++++++++++++++++++++--
drivers/soc/fsl/qbman/dpaa_sys.c | 7 +++---
drivers/soc/fsl/qbman/qman_ccsr.c | 45 ++++++++++++++++++++++++++++++++++-----
3 files changed, 67 insertions(+), 11 deletions(-)
diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c
index 7c3cc96..dc6d7e5 100644
--- a/drivers/soc/fsl/qbman/bman_ccsr.c
+++ b/drivers/soc/fsl/qbman/bman_ccsr.c
@@ -97,17 +97,40 @@ static void bm_get_version(u16 *id, u8 *major, u8 *minor)
/* signal transactions for FBPRs with higher priority */
#define FBPR_AR_RPRIO_HI BIT(30)
-static void bm_set_memory(u64 ba, u32 size)
+/* Track if probe has occurred and if cleanup is required */
+static int __bman_probed;
+static int __bman_requires_cleanup;
+
+
+static int bm_set_memory(u64 ba, u32 size)
{
+ u32 bar, bare;
u32 exp = ilog2(size);
/* choke if size isn't within range */
DPAA_ASSERT(size >= 4096 && size <= 1024*1024*1024 &&
is_power_of_2(size));
/* choke if '[e]ba' has lower-alignment than 'size' */
DPAA_ASSERT(!(ba & (size - 1)));
+
+ /* Check to see if BMan has already been initialized */
+ bar = bm_ccsr_in(REG_FBPR_BAR);
+ if (bar) {
+ /* Maker sure ba == what was programmed) */
+ bare = bm_ccsr_in(REG_FBPR_BARE);
+ if (bare != upper_32_bits(ba) || bar != lower_32_bits(ba)) {
+ pr_err("Attempted to reinitialize BMan with different BAR, got 0x%llx read BARE=0x%x BAR=0x%x\n",
+ ba, bare, bar);
+ return -ENOMEM;
+ }
+ pr_info("BMan BAR already configured\n");
+ __bman_requires_cleanup = 1;
+ return 1;
+ }
+
bm_ccsr_out(REG_FBPR_BARE, upper_32_bits(ba));
bm_ccsr_out(REG_FBPR_BAR, lower_32_bits(ba));
bm_ccsr_out(REG_FBPR_AR, exp - 1);
+ return 0;
}
/*
@@ -120,7 +143,6 @@ static void bm_set_memory(u64 ba, u32 size)
*/
static dma_addr_t fbpr_a;
static size_t fbpr_sz;
-static int __bman_probed;
static int bman_fbpr(struct reserved_mem *rmem)
{
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.c b/drivers/soc/fsl/qbman/dpaa_sys.c
index e6d48dc..3e0a7f3 100644
--- a/drivers/soc/fsl/qbman/dpaa_sys.c
+++ b/drivers/soc/fsl/qbman/dpaa_sys.c
@@ -40,6 +40,7 @@ int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr,
int ret;
struct device_node *mem_node;
u64 size64;
+ struct reserved_mem *rmem;
ret = of_reserved_mem_device_init_by_idx(dev, dev->of_node, idx);
if (ret) {
@@ -62,10 +63,8 @@ int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr,
return -ENODEV;
}
- if (!dma_alloc_coherent(dev, *size, addr, 0)) {
- dev_err(dev, "DMA Alloc memory failed\n");
- return -ENODEV;
- }
+ rmem = of_reserved_mem_lookup(mem_node);
+ *addr = rmem->base;
/*
* Disassociate the reserved memory area from the device
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c
index a6bb430..39f6fc1 100644
--- a/drivers/soc/fsl/qbman/qman_ccsr.c
+++ b/drivers/soc/fsl/qbman/qman_ccsr.c
@@ -274,6 +274,7 @@ static u32 __iomem *qm_ccsr_start;
/* A SDQCR mask comprising all the available/visible pool channels */
static u32 qm_pools_sdqcr;
static int __qman_probed;
+static int __qman_requires_cleanup;
static inline u32 qm_ccsr_in(u32 offset)
{
@@ -340,19 +341,46 @@ static void qm_get_version(u16 *id, u8 *major, u8 *minor)
}
#define PFDR_AR_EN BIT(31)
-static void qm_set_memory(enum qm_memory memory, u64 ba, u32 size)
+static int qm_set_memory(enum qm_memory memory, u64 ba, u32 size)
{
+ void *ptr;
u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE;
u32 exp = ilog2(size);
+ u32 bar, bare;
/* choke if size isn't within range */
DPAA_ASSERT((size >= 4096) && (size <= 1024*1024*1024) &&
is_power_of_2(size));
/* choke if 'ba' has lower-alignment than 'size' */
DPAA_ASSERT(!(ba & (size - 1)));
+
+ /* Check to see if QMan has already been initialized */
+ bar = qm_ccsr_in(offset + REG_offset_BAR);
+ if (bar) {
+ /* Maker sure ba == what was programmed) */
+ bare = qm_ccsr_in(offset);
+ if (bare != upper_32_bits(ba) || bar != lower_32_bits(ba)) {
+ pr_err("Attempted to reinitialize QMan with different BAR, got 0x%llx read BARE=0x%x BAR=0x%x\n",
+ ba, bare, bar);
+ return -ENOMEM;
+ }
+ __qman_requires_cleanup = 1;
+ /* Return 1 to indicate memory was previously programmed */
+ return 1;
+ }
+ /* Need to temporarily map the area to make sure it is zeroed */
+ ptr = memremap(ba, size, MEMREMAP_WB);
+ if (!ptr) {
+ pr_crit("memremap() of QMan private memory failed\n");
+ return -ENOMEM;
+ }
+ memset(ptr, 0, size);
+ memunmap(ptr);
+
qm_ccsr_out(offset, upper_32_bits(ba));
qm_ccsr_out(offset + REG_offset_BAR, lower_32_bits(ba));
qm_ccsr_out(offset + REG_offset_AR, PFDR_AR_EN | (exp - 1));
+ return 0;
}
static void qm_set_pfdr_threshold(u32 th, u8 k)
@@ -571,12 +599,19 @@ static int qman_init_ccsr(struct device *dev)
int i, err;
/* FQD memory */
- qm_set_memory(qm_memory_fqd, fqd_a, fqd_sz);
+ err = qm_set_memory(qm_memory_fqd, fqd_a, fqd_sz);
+ if (err < 0)
+ return err;
/* PFDR memory */
- qm_set_memory(qm_memory_pfdr, pfdr_a, pfdr_sz);
- err = qm_init_pfdr(dev, 8, pfdr_sz / 64 - 8);
- if (err)
+ err = qm_set_memory(qm_memory_pfdr, pfdr_a, pfdr_sz);
+ if (err < 0)
return err;
+ /* Only initialize PFDRs if the QMan was not initialized before */
+ if (err == 0) {
+ err = qm_init_pfdr(dev, 8, pfdr_sz / 64 - 8);
+ if (err)
+ return err;
+ }
/* thresholds */
qm_set_pfdr_threshold(512, 64);
qm_set_sfdr_threshold(128);
--
2.7.4
Clean the BMan buffer pools if the device had been initialized
previously. This will ensure a consistent state if the kernel
was soft restarted (kexec for example)
Signed-off-by: Roy Pledge <[email protected]>
---
drivers/soc/fsl/qbman/bman.c | 17 +++++++++--------
drivers/soc/fsl/qbman/bman_ccsr.c | 10 ++++++++++
drivers/soc/fsl/qbman/bman_portal.c | 18 +++++++++++++++++-
drivers/soc/fsl/qbman/bman_priv.h | 5 +++++
4 files changed, 41 insertions(+), 9 deletions(-)
diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c
index f84ab59..f4fb527 100644
--- a/drivers/soc/fsl/qbman/bman.c
+++ b/drivers/soc/fsl/qbman/bman.c
@@ -635,30 +635,31 @@ int bman_p_irqsource_add(struct bman_portal *p, u32 bits)
return 0;
}
-static int bm_shutdown_pool(u32 bpid)
+int bm_shutdown_pool(u32 bpid)
{
+ int err = 0;
struct bm_mc_command *bm_cmd;
union bm_mc_result *bm_res;
+
+ struct bman_portal *p = get_affine_portal();
while (1) {
- struct bman_portal *p = get_affine_portal();
/* Acquire buffers until empty */
bm_cmd = bm_mc_start(&p->p);
bm_cmd->bpid = bpid;
bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | 1);
if (!bm_mc_result_timeout(&p->p, &bm_res)) {
- put_affine_portal();
pr_crit("BMan Acquire Command timedout\n");
- return -ETIMEDOUT;
+ err = -ETIMEDOUT;
+ goto done;
}
if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
- put_affine_portal();
/* Pool is empty */
- return 0;
+ goto done;
}
- put_affine_portal();
}
-
+done:
+ put_affine_portal();
return 0;
}
diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c
index dc6d7e5..cb24a08 100644
--- a/drivers/soc/fsl/qbman/bman_ccsr.c
+++ b/drivers/soc/fsl/qbman/bman_ccsr.c
@@ -195,6 +195,16 @@ int bman_is_probed(void)
}
EXPORT_SYMBOL_GPL(bman_is_probed);
+int bman_requires_cleanup(void)
+{
+ return __bman_requires_cleanup;
+}
+
+void bman_done_cleanup(void)
+{
+ __bman_requires_cleanup = 0;
+}
+
static int fsl_bman_probe(struct platform_device *pdev)
{
int ret, err_irq;
diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c
index c78cc69..cc06d95 100644
--- a/drivers/soc/fsl/qbman/bman_portal.c
+++ b/drivers/soc/fsl/qbman/bman_portal.c
@@ -100,7 +100,7 @@ static int bman_portal_probe(struct platform_device *pdev)
struct device_node *node = dev->of_node;
struct bm_portal_config *pcfg;
struct resource *addr_phys[2];
- int irq, cpu, err;
+ int irq, cpu, err, i;
err = bman_is_probed();
if (!err)
@@ -181,6 +181,22 @@ static int bman_portal_probe(struct platform_device *pdev)
if (!cpu_online(cpu))
bman_offline_cpu(cpu);
+ if (__bman_portals_probed == 1 && bman_requires_cleanup()) {
+ /*
+ * BMan wasn't reset prior to boot (Kexec for example)
+ * Empty all the buffer pools so they are in reset state
+ */
+ for (i = 0; i < BM_POOL_MAX; i++) {
+ err = bm_shutdown_pool(i);
+ if (err) {
+ dev_err(dev, "Failed to shutdown bpool %d\n",
+ i);
+ goto err_portal_init;
+ }
+ }
+ bman_done_cleanup();
+ }
+
return 0;
err_portal_init:
diff --git a/drivers/soc/fsl/qbman/bman_priv.h b/drivers/soc/fsl/qbman/bman_priv.h
index 751ce90..aa3981e 100644
--- a/drivers/soc/fsl/qbman/bman_priv.h
+++ b/drivers/soc/fsl/qbman/bman_priv.h
@@ -76,3 +76,8 @@ int bman_p_irqsource_add(struct bman_portal *p, u32 bits);
const struct bm_portal_config *
bman_get_bm_portal_config(const struct bman_portal *portal);
+
+int bman_requires_cleanup(void);
+void bman_done_cleanup(void);
+
+int bm_shutdown_pool(u32 bpid);
--
2.7.4
If the QMan device was previously initialized make sure all the
frame queues are out of service once all the portals are probed.
This handles the case where the kernel is restarted without the
SoC being reset (kexec for example)
Signed-off-by: Roy Pledge <[email protected]>
---
drivers/soc/fsl/qbman/qman.c | 4 ++--
drivers/soc/fsl/qbman/qman_ccsr.c | 13 ++++++++++++-
drivers/soc/fsl/qbman/qman_portal.c | 18 +++++++++++++++++-
drivers/soc/fsl/qbman/qman_priv.h | 7 +++++++
4 files changed, 38 insertions(+), 4 deletions(-)
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 636f83f..f10f77d 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -2581,7 +2581,7 @@ static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s,
#define qm_dqrr_drain_nomatch(p) \
_qm_dqrr_consume_and_match(p, 0, 0, false)
-static int qman_shutdown_fq(u32 fqid)
+int qman_shutdown_fq(u32 fqid)
{
struct qman_portal *p;
struct device *dev;
@@ -2754,7 +2754,7 @@ static int qman_shutdown_fq(u32 fqid)
DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
QM_MCR_VERB_ALTER_OOS);
- if (mcr->result) {
+ if (mcr->result != QM_MCR_RESULT_OK) {
dev_err(dev, "OOS fail: FQ 0x%x (0x%x)\n",
fqid, mcr->result);
ret = -EIO;
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c
index 39f6fc1..fcf77e0 100644
--- a/drivers/soc/fsl/qbman/qman_ccsr.c
+++ b/drivers/soc/fsl/qbman/qman_ccsr.c
@@ -483,7 +483,7 @@ RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr);
#endif
-static unsigned int qm_get_fqid_maxcnt(void)
+unsigned int qm_get_fqid_maxcnt(void)
{
return fqd_sz / 64;
}
@@ -728,6 +728,17 @@ int qman_is_probed(void)
}
EXPORT_SYMBOL_GPL(qman_is_probed);
+int qman_requires_cleanup(void)
+{
+ return __qman_requires_cleanup;
+}
+
+void qman_done_cleanup(void)
+{
+ __qman_requires_cleanup = 0;
+}
+
+
static int fsl_qman_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c
index 75717bc..153727c 100644
--- a/drivers/soc/fsl/qbman/qman_portal.c
+++ b/drivers/soc/fsl/qbman/qman_portal.c
@@ -233,7 +233,7 @@ static int qman_portal_probe(struct platform_device *pdev)
struct device_node *node = dev->of_node;
struct qm_portal_config *pcfg;
struct resource *addr_phys[2];
- int irq, cpu, err;
+ int irq, cpu, err, i;
u32 val;
err = qman_is_probed();
@@ -328,6 +328,22 @@ static int qman_portal_probe(struct platform_device *pdev)
if (!cpu_online(cpu))
qman_offline_cpu(cpu);
+ if (__qman_portals_probed == 1 && qman_requires_cleanup()) {
+ /*
+ * QMan wasn't reset prior to boot (Kexec for example)
+ * Empty all the frame queues so they are in reset state
+ */
+ for (i = 0; i < qm_get_fqid_maxcnt(); i++) {
+ err = qman_shutdown_fq(i);
+ if (err) {
+ dev_err(dev, "Failed to shutdown frame queue %d\n",
+ i);
+ goto err_portal_init;
+ }
+ }
+ qman_done_cleanup();
+ }
+
return 0;
err_portal_init:
diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h
index 0451571..a8a35fe 100644
--- a/drivers/soc/fsl/qbman/qman_priv.h
+++ b/drivers/soc/fsl/qbman/qman_priv.h
@@ -272,3 +272,10 @@ extern struct qman_portal *affine_portals[NR_CPUS];
extern struct qman_portal *qman_dma_portal;
const struct qm_portal_config *qman_get_qm_portal_config(
struct qman_portal *portal);
+
+unsigned int qm_get_fqid_maxcnt(void);
+
+int qman_shutdown_fq(u32 fqid);
+
+int qman_requires_cleanup(void);
+void qman_done_cleanup(void);
--
2.7.4
When using the reserved memory node in the device tree there are
two options - dynamic or static. If a dynamic allocation was
selected (where the kernel selects the address for the allocation)
convert it to a static allocation by inserting the reg property.
This will ensure the same memory is reused after a kexec()
Signed-off-by: Roy Pledge <[email protected]>
---
drivers/soc/fsl/qbman/dpaa_sys.c | 60 ++++++++++++++++++++++++----------------
1 file changed, 36 insertions(+), 24 deletions(-)
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.c b/drivers/soc/fsl/qbman/dpaa_sys.c
index 3e0a7f3..9dd8bb5 100644
--- a/drivers/soc/fsl/qbman/dpaa_sys.c
+++ b/drivers/soc/fsl/qbman/dpaa_sys.c
@@ -37,41 +37,53 @@
int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr,
size_t *size)
{
- int ret;
struct device_node *mem_node;
- u64 size64;
struct reserved_mem *rmem;
+ struct property *prop;
+ int len, err;
+ __be32 *res_array;
- ret = of_reserved_mem_device_init_by_idx(dev, dev->of_node, idx);
- if (ret) {
- dev_err(dev,
- "of_reserved_mem_device_init_by_idx(%d) failed 0x%x\n",
- idx, ret);
- return -ENODEV;
- }
- mem_node = of_parse_phandle(dev->of_node, "memory-region", 0);
- if (mem_node) {
- ret = of_property_read_u64(mem_node, "size", &size64);
- if (ret) {
- dev_err(dev, "of_address_to_resource fails 0x%x\n",
- ret);
- return -ENODEV;
- }
- *size = size64;
- } else {
+ mem_node = of_parse_phandle(dev->of_node, "memory-region", idx);
+ if (!mem_node) {
dev_err(dev, "No memory-region found for index %d\n", idx);
return -ENODEV;
}
rmem = of_reserved_mem_lookup(mem_node);
+ if (!rmem) {
+ dev_err(dev, "of_reserved_mem_lookup() returned NULL\n");
+ return -ENODEV;
+ }
*addr = rmem->base;
+ *size = rmem->size;
/*
- * Disassociate the reserved memory area from the device
- * because a device can only have one DMA memory area. This
- * should be fine since the memory is allocated and initialized
- * and only ever accessed by the QBMan device from now on
+ * Check if the reg property exists - if not insert the node
+ * so upon kexec() the same memory region address will be preserved.
+ * This is needed because QBMan HW does not allow the base address/
+ * size to be modified once set.
*/
- of_reserved_mem_device_release(dev);
+ prop = of_find_property(mem_node, "reg", &len);
+ if (!prop) {
+ prop = devm_kzalloc(dev, sizeof(*prop), GFP_KERNEL);
+ if (!prop)
+ return -ENOMEM;
+ prop->value = res_array = devm_kzalloc(dev, sizeof(__be32) * 4,
+ GFP_KERNEL);
+ if (!prop->value)
+ return -ENOMEM;
+ res_array[0] = cpu_to_be32(upper_32_bits(*addr));
+ res_array[1] = cpu_to_be32(lower_32_bits(*addr));
+ res_array[2] = cpu_to_be32(upper_32_bits(*size));
+ res_array[3] = cpu_to_be32(lower_32_bits(*size));
+ prop->length = sizeof(__be32) * 4;
+ prop->name = devm_kstrdup(dev, "reg", GFP_KERNEL);
+ if (!prop->name)
+ return -ENOMEM;
+ err = of_add_property(mem_node, prop);
+ if (err)
+ return err;
+ }
+
return 0;
}
--
2.7.4
The drain_mr_fqni() function may be called fron uninterruptable
context so convert the msleep() to an mdelay(). Also ensure that
the valid bit is updated while polling.
Signed-off-by: Roy Pledge <[email protected]>
---
drivers/soc/fsl/qbman/qman.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index f10f77d..2989504 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -1164,6 +1164,7 @@ static int drain_mr_fqrni(struct qm_portal *p)
{
const union qm_mr_entry *msg;
loop:
+ qm_mr_pvb_update(p);
msg = qm_mr_current(p);
if (!msg) {
/*
@@ -1180,7 +1181,8 @@ static int drain_mr_fqrni(struct qm_portal *p)
* entries well before the ring has been fully consumed, so
* we're being *really* paranoid here.
*/
- msleep(1);
+ mdelay(1);
+ qm_mr_pvb_update(p);
msg = qm_mr_current(p);
if (!msg)
return 0;
--
2.7.4
When shutting down a FQ on a dedicated channel only the
SW portal associated with that channel can dequeue from it.
Make sure the correct portal is use.
Signed-off-by: Roy Pledge <[email protected]>
---
drivers/soc/fsl/qbman/qman.c | 53 +++++++++++++++++++++++++++++++++++---------
1 file changed, 42 insertions(+), 11 deletions(-)
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 4a99ce5..bf68d86 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -1018,6 +1018,20 @@ static inline void put_affine_portal(void)
put_cpu_var(qman_affine_portal);
}
+
+static inline struct qman_portal *get_portal_for_channel(u16 channel)
+{
+ int i;
+
+ for (i = 0; i < num_possible_cpus(); i++) {
+ if (affine_portals[i] &&
+ affine_portals[i]->config->channel == channel)
+ return affine_portals[i];
+ }
+
+ return NULL;
+}
+
static struct workqueue_struct *qm_portal_wq;
int qman_dqrr_set_ithresh(struct qman_portal *portal, u8 ithresh)
@@ -2601,7 +2615,7 @@ static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s,
int qman_shutdown_fq(u32 fqid)
{
- struct qman_portal *p;
+ struct qman_portal *p, *channel_portal;
struct device *dev;
union qm_mc_command *mcc;
union qm_mc_result *mcr;
@@ -2641,17 +2655,28 @@ int qman_shutdown_fq(u32 fqid)
channel = qm_fqd_get_chan(&mcr->queryfq.fqd);
wq = qm_fqd_get_wq(&mcr->queryfq.fqd);
+ if (channel < qm_channel_pool1) {
+ channel_portal = get_portal_for_channel(channel);
+ if (channel_portal == NULL) {
+ dev_err(dev, "Can't find portal for dedicated channel 0x%x\n",
+ channel);
+ ret = -EIO;
+ goto out;
+ }
+ } else
+ channel_portal = p;
+
switch (state) {
case QM_MCR_NP_STATE_TEN_SCHED:
case QM_MCR_NP_STATE_TRU_SCHED:
case QM_MCR_NP_STATE_ACTIVE:
case QM_MCR_NP_STATE_PARKED:
orl_empty = 0;
- mcc = qm_mc_start(&p->p);
+ mcc = qm_mc_start(&channel_portal->p);
qm_fqid_set(&mcc->fq, fqid);
- qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
- if (!qm_mc_result_timeout(&p->p, &mcr)) {
- dev_err(dev, "QUERYFQ_NP timeout\n");
+ qm_mc_commit(&channel_portal->p, QM_MCC_VERB_ALTER_RETIRE);
+ if (!qm_mc_result_timeout(&channel_portal->p, &mcr)) {
+ dev_err(dev, "ALTER_RETIRE timeout\n");
ret = -ETIMEDOUT;
goto out;
}
@@ -2659,6 +2684,9 @@ int qman_shutdown_fq(u32 fqid)
QM_MCR_VERB_ALTER_RETIRE);
res = mcr->result; /* Make a copy as we reuse MCR below */
+ if (res == QM_MCR_RESULT_OK)
+ drain_mr_fqrni(&channel_portal->p);
+
if (res == QM_MCR_RESULT_PENDING) {
/*
* Need to wait for the FQRN in the message ring, which
@@ -2688,21 +2716,25 @@ int qman_shutdown_fq(u32 fqid)
}
/* Set the sdqcr to drain this channel */
if (channel < qm_channel_pool1)
- qm_dqrr_sdqcr_set(&p->p,
+ qm_dqrr_sdqcr_set(&channel_portal->p,
QM_SDQCR_TYPE_ACTIVE |
QM_SDQCR_CHANNELS_DEDICATED);
else
- qm_dqrr_sdqcr_set(&p->p,
+ qm_dqrr_sdqcr_set(&channel_portal->p,
QM_SDQCR_TYPE_ACTIVE |
QM_SDQCR_CHANNELS_POOL_CONV
(channel));
do {
/* Keep draining DQRR while checking the MR*/
- qm_dqrr_drain_nomatch(&p->p);
+ qm_dqrr_drain_nomatch(&channel_portal->p);
/* Process message ring too */
- found_fqrn = qm_mr_drain(&p->p, FQRN);
+ found_fqrn = qm_mr_drain(&channel_portal->p,
+ FQRN);
cpu_relax();
} while (!found_fqrn);
+ /* Restore SDQCR */
+ qm_dqrr_sdqcr_set(&channel_portal->p,
+ channel_portal->sdqcr);
}
if (res != QM_MCR_RESULT_OK &&
@@ -2733,9 +2765,8 @@ int qman_shutdown_fq(u32 fqid)
* Wait for a dequeue and process the dequeues,
* making sure to empty the ring completely
*/
- } while (qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY));
+ } while (!qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY));
}
- qm_dqrr_sdqcr_set(&p->p, 0);
while (!orl_empty) {
/* Wait for the ORL to have been completely drained */
--
2.7.4
Disable the QBMan interrupts during recovery.
Signed-off-by: Roy Pledge <[email protected]>
---
drivers/soc/fsl/qbman/qman.c | 22 +++++++++++++++++++---
drivers/soc/fsl/qbman/qman_ccsr.c | 1 +
drivers/soc/fsl/qbman/qman_priv.h | 1 +
3 files changed, 21 insertions(+), 3 deletions(-)
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 2989504..4a99ce5 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -1070,6 +1070,20 @@ int qman_wq_alloc(void)
return 0;
}
+
+void qman_enable_irqs(void)
+{
+ int i;
+
+ for (i = 0; i < num_possible_cpus(); i++) {
+ if (affine_portals[i]) {
+ qm_out(&affine_portals[i]->p, QM_REG_ISR, 0xffffffff);
+ qm_out(&affine_portals[i]->p, QM_REG_IIR, 0);
+ }
+
+ }
+}
+
/*
* This is what everything can wait on, even if it migrates to a different cpu
* to the one whose affine portal it is waiting on.
@@ -1269,8 +1283,8 @@ static int qman_create_portal(struct qman_portal *portal,
qm_out(p, QM_REG_ISDR, isdr);
portal->irq_sources = 0;
qm_out(p, QM_REG_IER, 0);
- qm_out(p, QM_REG_ISR, 0xffffffff);
snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
+ qm_out(p, QM_REG_IIR, 1);
if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
dev_err(c->dev, "request_irq() failed\n");
goto fail_irq;
@@ -1290,7 +1304,7 @@ static int qman_create_portal(struct qman_portal *portal,
isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
qm_out(p, QM_REG_ISDR, isdr);
if (qm_dqrr_current(p)) {
- dev_err(c->dev, "DQRR unclean\n");
+ dev_dbg(c->dev, "DQRR unclean\n");
qm_dqrr_cdc_consume_n(p, 0xffff);
}
if (qm_mr_current(p) && drain_mr_fqrni(p)) {
@@ -1303,8 +1317,10 @@ static int qman_create_portal(struct qman_portal *portal,
}
/* Success */
portal->config = c;
+ qm_out(p, QM_REG_ISR, 0xffffffff);
qm_out(p, QM_REG_ISDR, 0);
- qm_out(p, QM_REG_IIR, 0);
+ if (!qman_requires_cleanup())
+ qm_out(p, QM_REG_IIR, 0);
/* Write a sane SDQCR */
qm_dqrr_sdqcr_set(p, portal->sdqcr);
return 0;
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c
index fcf77e0..8d17643 100644
--- a/drivers/soc/fsl/qbman/qman_ccsr.c
+++ b/drivers/soc/fsl/qbman/qman_ccsr.c
@@ -735,6 +735,7 @@ int qman_requires_cleanup(void)
void qman_done_cleanup(void)
{
+ qman_enable_irqs();
__qman_requires_cleanup = 0;
}
diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h
index a8a35fe..fd1cf54 100644
--- a/drivers/soc/fsl/qbman/qman_priv.h
+++ b/drivers/soc/fsl/qbman/qman_priv.h
@@ -279,3 +279,4 @@ int qman_shutdown_fq(u32 fqid);
int qman_requires_cleanup(void);
void qman_done_cleanup(void);
+void qman_enable_irqs(void);
--
2.7.4