This series fixes some issues in rockchip iommu driver, and add of_iommu
support in it.
Changes in v2:
Move irq request to probe(in patch[0])
Move bus_set_iommu() to rk_iommu_probe().
Jeffy Chen (9):
iommu/rockchip: Request irqs in rk_iommu_probe()
iommu/rockchip: Suppress unbinding
iommu/rockchip: Fix error handling in probe
iommu/rockchip: Fix error handling in init
iommu/rockchip: Use iommu_group_get_for_dev() for add_device
iommu/rockchip: Use IOMMU device for dma mapping operations
iommu/rockchip: Use OF_IOMMU to attach devices automatically
iommu/rockchip: Add runtime PM support
iommu/rockchip: Support sharing IOMMU between masters
Tomasz Figa (4):
iommu/rockchip: Fix error handling in attach
iommu/rockchip: Use iopoll helpers to wait for hardware
iommu/rockchip: Fix TLB flush of secondary IOMMUs
iommu/rockchip: Control clocks needed to access the IOMMU
.../devicetree/bindings/iommu/rockchip,iommu.txt | 8 +
drivers/iommu/rockchip-iommu.c | 654 ++++++++++++---------
2 files changed, 392 insertions(+), 270 deletions(-)
--
2.11.0
From: Tomasz Figa <[email protected]>
Currently if the driver encounters an error while attaching device, it
will leave the IOMMU in an inconsistent state. Even though it shouldn't
really happen in reality, let's just add proper error path to keep
things consistent.
Signed-off-by: Tomasz Figa <[email protected]>
Signed-off-by: Jeffy Chen <[email protected]>
---
Changes in v2:
Move irq request to probe(in patch[0])
drivers/iommu/rockchip-iommu.c | 9 +++++++--
1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index da4afe016a4e..4ffb3a65c9d2 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -826,7 +826,7 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
ret = rk_iommu_force_reset(iommu);
if (ret)
- return ret;
+ goto err_disable_stall;
iommu->domain = domain;
@@ -839,7 +839,7 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
ret = rk_iommu_enable_paging(iommu);
if (ret)
- return ret;
+ goto err_disable_stall;
spin_lock_irqsave(&rk_domain->iommus_lock, flags);
list_add_tail(&iommu->node, &rk_domain->iommus);
@@ -850,6 +850,11 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
rk_iommu_disable_stall(iommu);
return 0;
+
+err_disable_stall:
+ rk_iommu_disable_stall(iommu);
+
+ return ret;
}
static void rk_iommu_detach_device(struct iommu_domain *domain,
--
2.11.0
From: Tomasz Figa <[email protected]>
This patch converts the rockchip-iommu driver to use the in-kernel
iopoll helpers to wait for certain status bits to change in registers
instead of an open-coded custom macro.
Signed-off-by: Tomasz Figa <[email protected]>
Signed-off-by: Jeffy Chen <[email protected]>
---
Changes in v2: None
drivers/iommu/rockchip-iommu.c | 68 ++++++++++++++++++++----------------------
1 file changed, 32 insertions(+), 36 deletions(-)
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index d2a0b0daf40d..8179306d464a 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -13,7 +13,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iommu.h>
-#include <linux/jiffies.h>
+#include <linux/iopoll.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/module.h>
@@ -37,7 +37,8 @@
#define RK_MMU_AUTO_GATING 0x24
#define DTE_ADDR_DUMMY 0xCAFEBABE
-#define FORCE_RESET_TIMEOUT 100 /* ms */
+#define FORCE_RESET_TIMEOUT 100000 /* us */
+#define POLL_TIMEOUT 1000 /* us */
/* RK_MMU_STATUS fields */
#define RK_MMU_STATUS_PAGING_ENABLED BIT(0)
@@ -74,8 +75,6 @@
*/
#define RK_IOMMU_PGSIZE_BITMAP 0x007ff000
-#define IOMMU_REG_POLL_COUNT_FAST 1000
-
struct rk_iommu_domain {
struct list_head iommus;
struct platform_device *pdev;
@@ -111,27 +110,6 @@ static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
return container_of(dom, struct rk_iommu_domain, domain);
}
-/**
- * Inspired by _wait_for in intel_drv.h
- * This is NOT safe for use in interrupt context.
- *
- * Note that it's important that we check the condition again after having
- * timed out, since the timeout could be due to preemption or similar and
- * we've never had a chance to check the condition before the timeout.
- */
-#define rk_wait_for(COND, MS) ({ \
- unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
- int ret__ = 0; \
- while (!(COND)) { \
- if (time_after(jiffies, timeout__)) { \
- ret__ = (COND) ? 0 : -ETIMEDOUT; \
- break; \
- } \
- usleep_range(50, 100); \
- } \
- ret__; \
-})
-
/*
* The Rockchip rk3288 iommu uses a 2-level page table.
* The first level is the "Directory Table" (DT).
@@ -335,9 +313,21 @@ static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
return enable;
}
+static bool rk_iommu_is_reset_done(struct rk_iommu *iommu)
+{
+ bool done = true;
+ int i;
+
+ for (i = 0; i < iommu->num_mmu; i++)
+ done &= rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0;
+
+ return done;
+}
+
static int rk_iommu_enable_stall(struct rk_iommu *iommu)
{
int ret, i;
+ bool val;
if (rk_iommu_is_stall_active(iommu))
return 0;
@@ -348,7 +338,8 @@ static int rk_iommu_enable_stall(struct rk_iommu *iommu)
rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL);
- ret = rk_wait_for(rk_iommu_is_stall_active(iommu), 1);
+ ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
+ val, 100, POLL_TIMEOUT);
if (ret)
for (i = 0; i < iommu->num_mmu; i++)
dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
@@ -360,13 +351,15 @@ static int rk_iommu_enable_stall(struct rk_iommu *iommu)
static int rk_iommu_disable_stall(struct rk_iommu *iommu)
{
int ret, i;
+ bool val;
if (!rk_iommu_is_stall_active(iommu))
return 0;
rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL);
- ret = rk_wait_for(!rk_iommu_is_stall_active(iommu), 1);
+ ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
+ !val, 100, POLL_TIMEOUT);
if (ret)
for (i = 0; i < iommu->num_mmu; i++)
dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
@@ -378,13 +371,15 @@ static int rk_iommu_disable_stall(struct rk_iommu *iommu)
static int rk_iommu_enable_paging(struct rk_iommu *iommu)
{
int ret, i;
+ bool val;
if (rk_iommu_is_paging_enabled(iommu))
return 0;
rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING);
- ret = rk_wait_for(rk_iommu_is_paging_enabled(iommu), 1);
+ ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
+ val, 100, POLL_TIMEOUT);
if (ret)
for (i = 0; i < iommu->num_mmu; i++)
dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
@@ -396,13 +391,15 @@ static int rk_iommu_enable_paging(struct rk_iommu *iommu)
static int rk_iommu_disable_paging(struct rk_iommu *iommu)
{
int ret, i;
+ bool val;
if (!rk_iommu_is_paging_enabled(iommu))
return 0;
rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING);
- ret = rk_wait_for(!rk_iommu_is_paging_enabled(iommu), 1);
+ ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
+ !val, 100, POLL_TIMEOUT);
if (ret)
for (i = 0; i < iommu->num_mmu; i++)
dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
@@ -415,6 +412,7 @@ static int rk_iommu_force_reset(struct rk_iommu *iommu)
{
int ret, i;
u32 dte_addr;
+ bool val;
if (iommu->reset_disabled)
return 0;
@@ -435,13 +433,11 @@ static int rk_iommu_force_reset(struct rk_iommu *iommu)
rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
- for (i = 0; i < iommu->num_mmu; i++) {
- ret = rk_wait_for(rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0x00000000,
- FORCE_RESET_TIMEOUT);
- if (ret) {
- dev_err(iommu->dev, "FORCE_RESET command timed out\n");
- return ret;
- }
+ ret = readx_poll_timeout(rk_iommu_is_reset_done, iommu, val,
+ val, 100, FORCE_RESET_TIMEOUT);
+ if (ret) {
+ dev_err(iommu->dev, "FORCE_RESET command timed out\n");
+ return ret;
}
return 0;
--
2.11.0
From: Tomasz Figa <[email protected]>
Due to the bug in current code, only first IOMMU has the TLB lines
flushed in rk_iommu_zap_lines. This patch fixes the inner loop to
execute for all IOMMUs and properly flush the TLB.
Signed-off-by: Tomasz Figa <[email protected]>
Signed-off-by: Jeffy Chen <[email protected]>
---
Changes in v2: None
drivers/iommu/rockchip-iommu.c | 12 +++++++-----
1 file changed, 7 insertions(+), 5 deletions(-)
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 8179306d464a..52c6ff8602d8 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -274,19 +274,21 @@ static void rk_iommu_base_command(void __iomem *base, u32 command)
{
writel(command, base + RK_MMU_COMMAND);
}
-static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova,
+static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start,
size_t size)
{
int i;
-
- dma_addr_t iova_end = iova + size;
+ dma_addr_t iova_end = iova_start + size;
/*
* TODO(djkurtz): Figure out when it is more efficient to shootdown the
* entire iotlb rather than iterate over individual iovas.
*/
- for (i = 0; i < iommu->num_mmu; i++)
- for (; iova < iova_end; iova += SPAGE_SIZE)
+ for (i = 0; i < iommu->num_mmu; i++) {
+ dma_addr_t iova;
+
+ for (iova = iova_start; iova < iova_end; iova += SPAGE_SIZE)
rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova);
+ }
}
static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
--
2.11.0
From: Tomasz Figa <[email protected]>
Current code relies on master driver enabling necessary clocks before
IOMMU is accessed, however there are cases when the IOMMU should be
accessed while the master is not running yet, for example allocating
V4L2 videobuf2 buffers, which is done by the VB2 framework using DMA
mapping API and doesn't engage the master driver at all.
This patch fixes the problem by letting clocks needed for IOMMU
operation to be listed in Device Tree and making the driver enable them
for the time of accessing the hardware.
Signed-off-by: Jeffy Chen <[email protected]>
Signed-off-by: Tomasz Figa <[email protected]>
---
Changes in v2: None
.../devicetree/bindings/iommu/rockchip,iommu.txt | 8 ++
drivers/iommu/rockchip-iommu.c | 115 +++++++++++++++++++--
2 files changed, 117 insertions(+), 6 deletions(-)
diff --git a/Documentation/devicetree/bindings/iommu/rockchip,iommu.txt b/Documentation/devicetree/bindings/iommu/rockchip,iommu.txt
index 2098f7732264..33dd853359fa 100644
--- a/Documentation/devicetree/bindings/iommu/rockchip,iommu.txt
+++ b/Documentation/devicetree/bindings/iommu/rockchip,iommu.txt
@@ -14,6 +14,13 @@ Required properties:
"single-master" device, and needs no additional information
to associate with its master device. See:
Documentation/devicetree/bindings/iommu/iommu.txt
+Optional properties:
+- clocks : A list of master clocks requires for the IOMMU to be accessible
+ by the host CPU. The number of clocks depends on the master
+ block and might as well be zero. See [1] for generic clock
+ bindings description.
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
Optional properties:
- rockchip,disable-mmu-reset : Don't use the mmu reset operation.
@@ -27,5 +34,6 @@ Example:
reg = <0xff940300 0x100>;
interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "vopl_mmu";
+ clocks = <&cru ACLK_VOP1>, <&cru DCLK_VOP1>, <&cru HCLK_VOP1>;
#iommu-cells = <0>;
};
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 52c6ff8602d8..a7442d59ca43 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -4,6 +4,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/clk.h>
#include <linux/compiler.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -90,6 +91,8 @@ struct rk_iommu {
struct device *dev;
void __iomem **bases;
int num_mmu;
+ struct clk **clocks;
+ int num_clocks;
int *irq;
bool reset_disabled;
struct iommu_device iommu;
@@ -445,6 +448,83 @@ static int rk_iommu_force_reset(struct rk_iommu *iommu)
return 0;
}
+static void rk_iommu_put_clocks(struct rk_iommu *iommu)
+{
+ int i;
+
+ for (i = 0; i < iommu->num_clocks; ++i) {
+ clk_unprepare(iommu->clocks[i]);
+ clk_put(iommu->clocks[i]);
+ }
+}
+
+static int rk_iommu_get_clocks(struct rk_iommu *iommu)
+{
+ struct device_node *np = iommu->dev->of_node;
+ int ret;
+ int i;
+
+ ret = of_count_phandle_with_args(np, "clocks", "#clock-cells");
+ if (ret == -ENOENT)
+ return 0;
+ else if (ret < 0)
+ return ret;
+
+ iommu->num_clocks = ret;
+ iommu->clocks = devm_kcalloc(iommu->dev, iommu->num_clocks,
+ sizeof(*iommu->clocks), GFP_KERNEL);
+ if (!iommu->clocks)
+ return -ENOMEM;
+
+ for (i = 0; i < iommu->num_clocks; ++i) {
+ iommu->clocks[i] = of_clk_get(np, i);
+ if (IS_ERR(iommu->clocks[i])) {
+ iommu->num_clocks = i;
+ goto err_clk_put;
+ }
+ ret = clk_prepare(iommu->clocks[i]);
+ if (ret) {
+ clk_put(iommu->clocks[i]);
+ iommu->num_clocks = i;
+ goto err_clk_put;
+ }
+ }
+
+ return 0;
+
+err_clk_put:
+ rk_iommu_put_clocks(iommu);
+
+ return ret;
+}
+
+static int rk_iommu_enable_clocks(struct rk_iommu *iommu)
+{
+ int i, ret;
+
+ for (i = 0; i < iommu->num_clocks; ++i) {
+ ret = clk_enable(iommu->clocks[i]);
+ if (ret)
+ goto err_disable;
+ }
+
+ return 0;
+
+err_disable:
+ for (--i; i >= 0; --i)
+ clk_disable(iommu->clocks[i]);
+
+ return ret;
+}
+
+static void rk_iommu_disable_clocks(struct rk_iommu *iommu)
+{
+ int i;
+
+ for (i = 0; i < iommu->num_clocks; ++i)
+ clk_disable(iommu->clocks[i]);
+}
+
static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
{
void __iomem *base = iommu->bases[index];
@@ -501,6 +581,8 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
irqreturn_t ret = IRQ_NONE;
int i;
+ WARN_ON(rk_iommu_enable_clocks(iommu));
+
for (i = 0; i < iommu->num_mmu; i++) {
int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS);
if (int_status == 0)
@@ -547,6 +629,8 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status);
}
+ rk_iommu_disable_clocks(iommu);
+
return ret;
}
@@ -589,7 +673,9 @@ static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
list_for_each(pos, &rk_domain->iommus) {
struct rk_iommu *iommu;
iommu = list_entry(pos, struct rk_iommu, node);
+ rk_iommu_enable_clocks(iommu);
rk_iommu_zap_lines(iommu, iova, size);
+ rk_iommu_disable_clocks(iommu);
}
spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
}
@@ -818,10 +904,14 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
if (!iommu)
return 0;
- ret = rk_iommu_enable_stall(iommu);
+ ret = rk_iommu_enable_clocks(iommu);
if (ret)
return ret;
+ ret = rk_iommu_enable_stall(iommu);
+ if (ret)
+ goto err_disable_clocks;
+
ret = rk_iommu_force_reset(iommu);
if (ret)
goto err_disable_stall;
@@ -846,11 +936,14 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
dev_dbg(dev, "Attached to iommu domain\n");
rk_iommu_disable_stall(iommu);
+ rk_iommu_disable_clocks(iommu);
return 0;
err_disable_stall:
rk_iommu_disable_stall(iommu);
+err_disable_clocks:
+ rk_iommu_disable_clocks(iommu);
return ret;
}
@@ -873,6 +966,7 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
/* Ignore error while disabling, just keep going */
+ WARN_ON(rk_iommu_enable_clocks(iommu));
rk_iommu_enable_stall(iommu);
rk_iommu_disable_paging(iommu);
for (i = 0; i < iommu->num_mmu; i++) {
@@ -880,6 +974,7 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
}
rk_iommu_disable_stall(iommu);
+ rk_iommu_disable_clocks(iommu);
iommu->domain = NULL;
@@ -1171,23 +1266,30 @@ static int rk_iommu_probe(struct platform_device *pdev)
return err;
}
+ err = rk_iommu_get_clocks(iommu);
+ if (err)
+ return err;
+
iommu->reset_disabled = device_property_read_bool(dev,
"rockchip,disable-mmu-reset");
err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev));
if (err)
- return err;
+ goto err_put_clocks;
iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops);
err = iommu_device_register(&iommu->iommu);
- if (err) {
- iommu_device_sysfs_remove(&iommu->iommu);
- return err;
- }
+ if (err)
+ goto err_remove_sysfs;
bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
return 0;
+err_remove_sysfs:
+ iommu_device_sysfs_remove(&iommu->iommu);
+err_put_clocks:
+ rk_iommu_put_clocks(iommu);
+ return err;
}
static int rk_iommu_remove(struct platform_device *pdev)
@@ -1196,6 +1298,7 @@ static int rk_iommu_remove(struct platform_device *pdev)
iommu_device_unregister(&iommu->iommu);
iommu_device_sysfs_remove(&iommu->iommu);
+ rk_iommu_put_clocks(iommu);
return 0;
}
--
2.11.0
IOMMU drivers are supposed to call this function instead of manually
creating a group in their .add_device callback. This behavior is not
strictly required by ARM DMA mapping implementation, but ARM64 already
relies on it. This patch fixes the rockchip-iommu driver to comply with
this requirement.
Signed-off-by: Tomasz Figa <[email protected]>
Signed-off-by: Jeffy Chen <[email protected]>
---
Changes in v2: None
drivers/iommu/rockchip-iommu.c | 120 +++++++++++++++++++++--------------------
1 file changed, 63 insertions(+), 57 deletions(-)
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index a7442d59ca43..ea4df162108b 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -888,6 +888,39 @@ static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
return rk_iommu;
}
+static void rk_iommu_detach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct rk_iommu *iommu;
+ struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
+ unsigned long flags;
+ int i;
+
+ /* Allow 'virtual devices' (eg drm) to detach from domain */
+ iommu = rk_iommu_from_dev(dev);
+ if (!iommu)
+ return;
+
+ spin_lock_irqsave(&rk_domain->iommus_lock, flags);
+ list_del_init(&iommu->node);
+ spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
+
+ /* Ignore error while disabling, just keep going */
+ WARN_ON(rk_iommu_enable_clocks(iommu));
+ rk_iommu_enable_stall(iommu);
+ rk_iommu_disable_paging(iommu);
+ for (i = 0; i < iommu->num_mmu; i++) {
+ rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
+ rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
+ }
+ rk_iommu_disable_stall(iommu);
+ rk_iommu_disable_clocks(iommu);
+
+ iommu->domain = NULL;
+
+ dev_dbg(dev, "Detached from iommu domain\n");
+}
+
static int rk_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
@@ -904,6 +937,9 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
if (!iommu)
return 0;
+ if (iommu->domain)
+ rk_iommu_detach_device(iommu->domain, dev);
+
ret = rk_iommu_enable_clocks(iommu);
if (ret)
return ret;
@@ -948,39 +984,6 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
return ret;
}
-static void rk_iommu_detach_device(struct iommu_domain *domain,
- struct device *dev)
-{
- struct rk_iommu *iommu;
- struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
- unsigned long flags;
- int i;
-
- /* Allow 'virtual devices' (eg drm) to detach from domain */
- iommu = rk_iommu_from_dev(dev);
- if (!iommu)
- return;
-
- spin_lock_irqsave(&rk_domain->iommus_lock, flags);
- list_del_init(&iommu->node);
- spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
-
- /* Ignore error while disabling, just keep going */
- WARN_ON(rk_iommu_enable_clocks(iommu));
- rk_iommu_enable_stall(iommu);
- rk_iommu_disable_paging(iommu);
- for (i = 0; i < iommu->num_mmu; i++) {
- rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
- rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
- }
- rk_iommu_disable_stall(iommu);
- rk_iommu_disable_clocks(iommu);
-
- iommu->domain = NULL;
-
- dev_dbg(dev, "Detached from iommu domain\n");
-}
-
static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
{
struct rk_iommu_domain *rk_domain;
@@ -1131,41 +1134,20 @@ static int rk_iommu_add_device(struct device *dev)
{
struct iommu_group *group;
struct rk_iommu *iommu;
- int ret;
if (!rk_iommu_is_dev_iommu_master(dev))
return -ENODEV;
- group = iommu_group_get(dev);
- if (!group) {
- group = iommu_group_alloc();
- if (IS_ERR(group)) {
- dev_err(dev, "Failed to allocate IOMMU group\n");
- return PTR_ERR(group);
- }
- }
-
- ret = iommu_group_add_device(group, dev);
- if (ret)
- goto err_put_group;
-
- ret = rk_iommu_group_set_iommudata(group, dev);
- if (ret)
- goto err_remove_device;
+ group = iommu_group_get_for_dev(dev);
+ if (IS_ERR(group))
+ return PTR_ERR(group);
iommu = rk_iommu_from_dev(dev);
if (iommu)
iommu_device_link(&iommu->iommu, dev);
iommu_group_put(group);
-
return 0;
-
-err_remove_device:
- iommu_group_remove_device(dev);
-err_put_group:
- iommu_group_put(group);
- return ret;
}
static void rk_iommu_remove_device(struct device *dev)
@@ -1182,6 +1164,29 @@ static void rk_iommu_remove_device(struct device *dev)
iommu_group_remove_device(dev);
}
+static struct iommu_group *rk_iommu_device_group(struct device *dev)
+{
+ struct iommu_group *group;
+ int ret;
+
+ group = iommu_group_get(dev);
+ if (!group) {
+ group = iommu_group_alloc();
+ if (IS_ERR(group))
+ return group;
+ }
+
+ ret = rk_iommu_group_set_iommudata(group, dev);
+ if (ret)
+ goto err_put_group;
+
+ return group;
+
+err_put_group:
+ iommu_group_put(group);
+ return ERR_PTR(ret);
+}
+
static const struct iommu_ops rk_iommu_ops = {
.domain_alloc = rk_iommu_domain_alloc,
.domain_free = rk_iommu_domain_free,
@@ -1193,6 +1198,7 @@ static const struct iommu_ops rk_iommu_ops = {
.add_device = rk_iommu_add_device,
.remove_device = rk_iommu_remove_device,
.iova_to_phys = rk_iommu_iova_to_phys,
+ .device_group = rk_iommu_device_group,
.pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
};
--
2.11.0
Use the first registered IOMMU device for dma mapping operations, and
drop the domain platform device.
This is similar to exynos iommu driver.
Signed-off-by: Jeffy Chen <[email protected]>
---
Changes in v2: None
drivers/iommu/rockchip-iommu.c | 91 +++++++++++-------------------------------
1 file changed, 24 insertions(+), 67 deletions(-)
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index ea4df162108b..51e4f982c4a6 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -78,7 +78,6 @@
struct rk_iommu_domain {
struct list_head iommus;
- struct platform_device *pdev;
u32 *dt; /* page directory table */
dma_addr_t dt_dma;
spinlock_t iommus_lock; /* lock for iommus list */
@@ -100,12 +99,14 @@ struct rk_iommu {
struct iommu_domain *domain; /* domain to which iommu is attached */
};
+static struct device *dma_dev;
+
static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma,
unsigned int count)
{
size_t size = count * sizeof(u32); /* count of u32 entry */
- dma_sync_single_for_device(&dom->pdev->dev, dma, size, DMA_TO_DEVICE);
+ dma_sync_single_for_device(dma_dev, dma, size, DMA_TO_DEVICE);
}
static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
@@ -692,7 +693,6 @@ static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain,
static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
dma_addr_t iova)
{
- struct device *dev = &rk_domain->pdev->dev;
u32 *page_table, *dte_addr;
u32 dte_index, dte;
phys_addr_t pt_phys;
@@ -710,9 +710,9 @@ static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
if (!page_table)
return ERR_PTR(-ENOMEM);
- pt_dma = dma_map_single(dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, pt_dma)) {
- dev_err(dev, "DMA mapping error while allocating page table\n");
+ pt_dma = dma_map_single(dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(dma_dev, pt_dma)) {
+ dev_err(dma_dev, "DMA mapping error while allocating page table\n");
free_page((unsigned long)page_table);
return ERR_PTR(-ENOMEM);
}
@@ -987,29 +987,20 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
{
struct rk_iommu_domain *rk_domain;
- struct platform_device *pdev;
- struct device *iommu_dev;
if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
return NULL;
- /* Register a pdev per domain, so DMA API can base on this *dev
- * even some virtual master doesn't have an iommu slave
- */
- pdev = platform_device_register_simple("rk_iommu_domain",
- PLATFORM_DEVID_AUTO, NULL, 0);
- if (IS_ERR(pdev))
+ if (!dma_dev)
return NULL;
- rk_domain = devm_kzalloc(&pdev->dev, sizeof(*rk_domain), GFP_KERNEL);
+ rk_domain = devm_kzalloc(dma_dev, sizeof(*rk_domain), GFP_KERNEL);
if (!rk_domain)
- goto err_unreg_pdev;
-
- rk_domain->pdev = pdev;
+ return NULL;
if (type == IOMMU_DOMAIN_DMA &&
iommu_get_dma_cookie(&rk_domain->domain))
- goto err_unreg_pdev;
+ return NULL;
/*
* rk32xx iommus use a 2 level pagetable.
@@ -1020,11 +1011,10 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
if (!rk_domain->dt)
goto err_put_cookie;
- iommu_dev = &pdev->dev;
- rk_domain->dt_dma = dma_map_single(iommu_dev, rk_domain->dt,
+ rk_domain->dt_dma = dma_map_single(dma_dev, rk_domain->dt,
SPAGE_SIZE, DMA_TO_DEVICE);
- if (dma_mapping_error(iommu_dev, rk_domain->dt_dma)) {
- dev_err(iommu_dev, "DMA map error for DT\n");
+ if (dma_mapping_error(dma_dev, rk_domain->dt_dma)) {
+ dev_err(dma_dev, "DMA map error for DT\n");
goto err_free_dt;
}
@@ -1045,8 +1035,6 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
err_put_cookie:
if (type == IOMMU_DOMAIN_DMA)
iommu_put_dma_cookie(&rk_domain->domain);
-err_unreg_pdev:
- platform_device_unregister(pdev);
return NULL;
}
@@ -1063,20 +1051,18 @@ static void rk_iommu_domain_free(struct iommu_domain *domain)
if (rk_dte_is_pt_valid(dte)) {
phys_addr_t pt_phys = rk_dte_pt_address(dte);
u32 *page_table = phys_to_virt(pt_phys);
- dma_unmap_single(&rk_domain->pdev->dev, pt_phys,
+ dma_unmap_single(dma_dev, pt_phys,
SPAGE_SIZE, DMA_TO_DEVICE);
free_page((unsigned long)page_table);
}
}
- dma_unmap_single(&rk_domain->pdev->dev, rk_domain->dt_dma,
+ dma_unmap_single(dma_dev, rk_domain->dt_dma,
SPAGE_SIZE, DMA_TO_DEVICE);
free_page((unsigned long)rk_domain->dt);
if (domain->type == IOMMU_DOMAIN_DMA)
iommu_put_dma_cookie(&rk_domain->domain);
-
- platform_device_unregister(rk_domain->pdev);
}
static bool rk_iommu_is_dev_iommu_master(struct device *dev)
@@ -1202,30 +1188,6 @@ static const struct iommu_ops rk_iommu_ops = {
.pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
};
-static int rk_iommu_domain_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
-
- dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL);
- if (!dev->dma_parms)
- return -ENOMEM;
-
- /* Set dma_ops for dev, otherwise it would be dummy_dma_ops */
- arch_setup_dma_ops(dev, 0, DMA_BIT_MASK(32), NULL, false);
-
- dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
- dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
-
- return 0;
-}
-
-static struct platform_driver rk_iommu_domain_driver = {
- .probe = rk_iommu_domain_probe,
- .driver = {
- .name = "rk_iommu_domain",
- },
-};
-
static int rk_iommu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1288,6 +1250,14 @@ static int rk_iommu_probe(struct platform_device *pdev)
if (err)
goto err_remove_sysfs;
+ /*
+ * Use the first registered IOMMU device for domain to use with DMA
+ * API, since a domain might not physically correspond to a single
+ * IOMMU device..
+ */
+ if (!dma_dev)
+ dma_dev = &pdev->dev;
+
bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
return 0;
@@ -1327,24 +1297,11 @@ static struct platform_driver rk_iommu_driver = {
static int __init rk_iommu_init(void)
{
- int ret;
-
- ret = platform_driver_register(&rk_iommu_domain_driver);
- if (ret)
- return ret;
-
- ret = platform_driver_register(&rk_iommu_driver);
- if (ret) {
- platform_driver_unregister(&rk_iommu_domain_driver);
- return ret;
- }
-
- return 0;
+ return platform_driver_register(&rk_iommu_driver);
}
static void __exit rk_iommu_exit(void)
{
platform_driver_unregister(&rk_iommu_driver);
- platform_driver_unregister(&rk_iommu_domain_driver);
}
subsys_initcall(rk_iommu_init);
--
2.11.0
Converts the rockchip-iommu driver to use the OF_IOMMU infrastructure,
which allows attaching master devices to their IOMMUs automatically
according to DT properties.
Signed-off-by: Jeffy Chen <[email protected]>
---
Changes in v2: None
drivers/iommu/rockchip-iommu.c | 116 +++++++++++------------------------------
1 file changed, 31 insertions(+), 85 deletions(-)
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 51e4f982c4a6..c2d3ac82184e 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -19,6 +19,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_iommu.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
@@ -874,18 +875,7 @@ static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
{
- struct iommu_group *group;
- struct device *iommu_dev;
- struct rk_iommu *rk_iommu;
-
- group = iommu_group_get(dev);
- if (!group)
- return NULL;
- iommu_dev = iommu_group_get_iommudata(group);
- rk_iommu = dev_get_drvdata(iommu_dev);
- iommu_group_put(group);
-
- return rk_iommu;
+ return dev->archdata.iommu;
}
static void rk_iommu_detach_device(struct iommu_domain *domain,
@@ -1065,74 +1055,22 @@ static void rk_iommu_domain_free(struct iommu_domain *domain)
iommu_put_dma_cookie(&rk_domain->domain);
}
-static bool rk_iommu_is_dev_iommu_master(struct device *dev)
-{
- struct device_node *np = dev->of_node;
- int ret;
-
- /*
- * An iommu master has an iommus property containing a list of phandles
- * to iommu nodes, each with an #iommu-cells property with value 0.
- */
- ret = of_count_phandle_with_args(np, "iommus", "#iommu-cells");
- return (ret > 0);
-}
-
-static int rk_iommu_group_set_iommudata(struct iommu_group *group,
- struct device *dev)
-{
- struct device_node *np = dev->of_node;
- struct platform_device *pd;
- int ret;
- struct of_phandle_args args;
-
- /*
- * An iommu master has an iommus property containing a list of phandles
- * to iommu nodes, each with an #iommu-cells property with value 0.
- */
- ret = of_parse_phandle_with_args(np, "iommus", "#iommu-cells", 0,
- &args);
- if (ret) {
- dev_err(dev, "of_parse_phandle_with_args(%pOF) => %d\n",
- np, ret);
- return ret;
- }
- if (args.args_count != 0) {
- dev_err(dev, "incorrect number of iommu params found for %pOF (found %d, expected 0)\n",
- args.np, args.args_count);
- return -EINVAL;
- }
-
- pd = of_find_device_by_node(args.np);
- of_node_put(args.np);
- if (!pd) {
- dev_err(dev, "iommu %pOF not found\n", args.np);
- return -EPROBE_DEFER;
- }
-
- /* TODO(djkurtz): handle multiple slave iommus for a single master */
- iommu_group_set_iommudata(group, &pd->dev, NULL);
-
- return 0;
-}
-
static int rk_iommu_add_device(struct device *dev)
{
struct iommu_group *group;
struct rk_iommu *iommu;
- if (!rk_iommu_is_dev_iommu_master(dev))
+ iommu = rk_iommu_from_dev(dev);
+ if (!iommu)
return -ENODEV;
group = iommu_group_get_for_dev(dev);
if (IS_ERR(group))
return PTR_ERR(group);
+ iommu_group_put(group);
- iommu = rk_iommu_from_dev(dev);
- if (iommu)
- iommu_device_link(&iommu->iommu, dev);
+ iommu_device_link(&iommu->iommu, dev);
- iommu_group_put(group);
return 0;
}
@@ -1140,37 +1078,40 @@ static void rk_iommu_remove_device(struct device *dev)
{
struct rk_iommu *iommu;
- if (!rk_iommu_is_dev_iommu_master(dev))
- return;
-
iommu = rk_iommu_from_dev(dev);
- if (iommu)
- iommu_device_unlink(&iommu->iommu, dev);
+ if (!iommu)
+ return;
+ iommu_device_unlink(&iommu->iommu, dev);
iommu_group_remove_device(dev);
}
static struct iommu_group *rk_iommu_device_group(struct device *dev)
{
struct iommu_group *group;
- int ret;
group = iommu_group_get(dev);
- if (!group) {
+ if (!group)
group = iommu_group_alloc();
- if (IS_ERR(group))
- return group;
- }
-
- ret = rk_iommu_group_set_iommudata(group, dev);
- if (ret)
- goto err_put_group;
return group;
+}
-err_put_group:
- iommu_group_put(group);
- return ERR_PTR(ret);
+static int rk_iommu_of_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct platform_device *iommu_dev;
+
+ iommu_dev = of_find_device_by_node(args->np);
+ if (!iommu_dev) {
+ dev_err(dev, "iommu %pOF not found\n", args->np);
+ return -ENODEV;
+ }
+
+ dev->archdata.iommu = platform_get_drvdata(iommu_dev);
+ of_dev_put(iommu_dev);
+
+ return 0;
}
static const struct iommu_ops rk_iommu_ops = {
@@ -1186,6 +1127,7 @@ static const struct iommu_ops rk_iommu_ops = {
.iova_to_phys = rk_iommu_iova_to_phys,
.device_group = rk_iommu_device_group,
.pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
+ .of_xlate = rk_iommu_of_xlate,
};
static int rk_iommu_probe(struct platform_device *pdev)
@@ -1246,6 +1188,8 @@ static int rk_iommu_probe(struct platform_device *pdev)
goto err_put_clocks;
iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops);
+ iommu_device_set_fwnode(&iommu->iommu, &dev->of_node->fwnode);
+
err = iommu_device_register(&iommu->iommu);
if (err)
goto err_remove_sysfs;
@@ -1307,6 +1251,8 @@ static void __exit rk_iommu_exit(void)
subsys_initcall(rk_iommu_init);
module_exit(rk_iommu_exit);
+IOMMU_OF_DECLARE(rk_iommu_of, "rockchip,iommu", NULL);
+
MODULE_DESCRIPTION("IOMMU API for Rockchip");
MODULE_AUTHOR("Simon Xue <[email protected]> and Daniel Kurtz <[email protected]>");
MODULE_ALIAS("platform:rockchip-iommu");
--
2.11.0
When the power domain is powered off, the IOMMU cannot be accessed and
register programming must be deferred until the power domain becomes
enabled.
Add runtime PM support, and use runtime PM device link from IOMMU to
master to startup and shutdown IOMMU.
Signed-off-by: Jeffy Chen <[email protected]>
---
Changes in v2: None
drivers/iommu/rockchip-iommu.c | 191 ++++++++++++++++++++++++++++++-----------
1 file changed, 143 insertions(+), 48 deletions(-)
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index c2d3ac82184e..c8de1456a016 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -18,11 +18,13 @@
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_iommu.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -98,6 +100,12 @@ struct rk_iommu {
struct iommu_device iommu;
struct list_head node; /* entry in rk_iommu_domain.iommus */
struct iommu_domain *domain; /* domain to which iommu is attached */
+ struct mutex pm_mutex; /* serializes power transitions */
+};
+
+struct rk_iommudata {
+ struct device_link *link; /* runtime PM link from IOMMU to master */
+ struct rk_iommu *iommu;
};
static struct device *dma_dev;
@@ -675,9 +683,15 @@ static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
list_for_each(pos, &rk_domain->iommus) {
struct rk_iommu *iommu;
iommu = list_entry(pos, struct rk_iommu, node);
- rk_iommu_enable_clocks(iommu);
- rk_iommu_zap_lines(iommu, iova, size);
- rk_iommu_disable_clocks(iommu);
+
+ /* Only zap TLBs of IOMMUs that are powered on. */
+ if (pm_runtime_get_if_in_use(iommu->dev) > 0) {
+ rk_iommu_enable_clocks(iommu);
+ rk_iommu_zap_lines(iommu, iova, size);
+ rk_iommu_disable_clocks(iommu);
+
+ pm_runtime_put(iommu->dev);
+ }
}
spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
}
@@ -875,28 +889,19 @@ static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
{
- return dev->archdata.iommu;
+ struct rk_iommudata *data = dev->archdata.iommu;
+
+ return data ? data->iommu : NULL;
}
-static void rk_iommu_detach_device(struct iommu_domain *domain,
- struct device *dev)
+static void rk_iommu_shutdown(struct rk_iommu *iommu)
{
- struct rk_iommu *iommu;
- struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
- unsigned long flags;
int i;
- /* Allow 'virtual devices' (eg drm) to detach from domain */
- iommu = rk_iommu_from_dev(dev);
- if (!iommu)
- return;
-
- spin_lock_irqsave(&rk_domain->iommus_lock, flags);
- list_del_init(&iommu->node);
- spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
-
/* Ignore error while disabling, just keep going */
WARN_ON(rk_iommu_enable_clocks(iommu));
+
+ mutex_lock(&iommu->pm_mutex);
rk_iommu_enable_stall(iommu);
rk_iommu_disable_paging(iommu);
for (i = 0; i < iommu->num_mmu; i++) {
@@ -904,46 +909,30 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
}
rk_iommu_disable_stall(iommu);
- rk_iommu_disable_clocks(iommu);
+ mutex_unlock(&iommu->pm_mutex);
- iommu->domain = NULL;
-
- dev_dbg(dev, "Detached from iommu domain\n");
+ rk_iommu_disable_clocks(iommu);
}
-static int rk_iommu_attach_device(struct iommu_domain *domain,
- struct device *dev)
+static int rk_iommu_startup(struct rk_iommu *iommu)
{
- struct rk_iommu *iommu;
+ struct iommu_domain *domain = iommu->domain;
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
- unsigned long flags;
int ret, i;
- /*
- * Allow 'virtual devices' (e.g., drm) to attach to domain.
- * Such a device does not belong to an iommu group.
- */
- iommu = rk_iommu_from_dev(dev);
- if (!iommu)
- return 0;
-
- if (iommu->domain)
- rk_iommu_detach_device(iommu->domain, dev);
-
ret = rk_iommu_enable_clocks(iommu);
if (ret)
return ret;
+ mutex_lock(&iommu->pm_mutex);
ret = rk_iommu_enable_stall(iommu);
if (ret)
- goto err_disable_clocks;
+ goto err_unlock_mutex;
ret = rk_iommu_force_reset(iommu);
if (ret)
goto err_disable_stall;
- iommu->domain = domain;
-
for (i = 0; i < iommu->num_mmu; i++) {
rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
rk_domain->dt_dma);
@@ -955,21 +944,90 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
if (ret)
goto err_disable_stall;
- spin_lock_irqsave(&rk_domain->iommus_lock, flags);
- list_add_tail(&iommu->node, &rk_domain->iommus);
- spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
-
- dev_dbg(dev, "Attached to iommu domain\n");
-
rk_iommu_disable_stall(iommu);
+ mutex_unlock(&iommu->pm_mutex);
+
rk_iommu_disable_clocks(iommu);
return 0;
err_disable_stall:
rk_iommu_disable_stall(iommu);
-err_disable_clocks:
+err_unlock_mutex:
+ mutex_unlock(&iommu->pm_mutex);
rk_iommu_disable_clocks(iommu);
+ return ret;
+}
+
+static void rk_iommu_detach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct rk_iommu *iommu;
+ struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
+ unsigned long flags;
+
+ /* Allow 'virtual devices' (eg drm) to detach from domain */
+ iommu = rk_iommu_from_dev(dev);
+ if (!iommu)
+ return;
+
+ dev_dbg(dev, "Detaching from iommu domain\n");
+
+ /* iommu already detached */
+ if (iommu->domain != domain)
+ return;
+
+ iommu->domain = NULL;
+
+ spin_lock_irqsave(&rk_domain->iommus_lock, flags);
+ list_del_init(&iommu->node);
+ spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
+
+ if (pm_runtime_get_if_in_use(iommu->dev) > 0) {
+ rk_iommu_shutdown(iommu);
+ pm_runtime_put(iommu->dev);
+ }
+}
+
+static int rk_iommu_attach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct rk_iommu *iommu;
+ struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
+ unsigned long flags;
+ int ret;
+
+ /*
+ * Allow 'virtual devices' (e.g., drm) to attach to domain.
+ * Such a device does not belong to an iommu group.
+ */
+ iommu = rk_iommu_from_dev(dev);
+ if (!iommu)
+ return 0;
+
+ dev_dbg(dev, "Attaching to iommu domain\n");
+
+ /* iommu already attached */
+ if (iommu->domain == domain)
+ return 0;
+
+ if (iommu->domain)
+ rk_iommu_detach_device(iommu->domain, dev);
+
+ iommu->domain = domain;
+
+ spin_lock_irqsave(&rk_domain->iommus_lock, flags);
+ list_add_tail(&iommu->node, &rk_domain->iommus);
+ spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
+
+ if (pm_runtime_get_if_in_use(iommu->dev) <= 0)
+ return 0;
+
+ ret = rk_iommu_startup(iommu);
+ if (ret)
+ rk_iommu_detach_device(data->domain, dev);
+
+ pm_runtime_put(iommu->dev);
return ret;
}
@@ -1059,6 +1117,7 @@ static int rk_iommu_add_device(struct device *dev)
{
struct iommu_group *group;
struct rk_iommu *iommu;
+ struct rk_iommudata *data = dev->archdata.iommu;
iommu = rk_iommu_from_dev(dev);
if (!iommu)
@@ -1070,6 +1129,7 @@ static int rk_iommu_add_device(struct device *dev)
iommu_group_put(group);
iommu_device_link(&iommu->iommu, dev);
+ data->link = device_link_add(dev, iommu->dev, DL_FLAG_PM_RUNTIME);
return 0;
}
@@ -1077,11 +1137,13 @@ static int rk_iommu_add_device(struct device *dev)
static void rk_iommu_remove_device(struct device *dev)
{
struct rk_iommu *iommu;
+ struct rk_iommudata *data = dev->archdata.iommu;
iommu = rk_iommu_from_dev(dev);
if (!iommu)
return;
+ device_link_del(data->link);
iommu_device_unlink(&iommu->iommu, dev);
iommu_group_remove_device(dev);
}
@@ -1101,6 +1163,11 @@ static int rk_iommu_of_xlate(struct device *dev,
struct of_phandle_args *args)
{
struct platform_device *iommu_dev;
+ struct rk_iommudata *data;
+
+ data = devm_kzalloc(dma_dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
iommu_dev = of_find_device_by_node(args->np);
if (!iommu_dev) {
@@ -1108,7 +1175,9 @@ static int rk_iommu_of_xlate(struct device *dev,
return -ENODEV;
}
- dev->archdata.iommu = platform_get_drvdata(iommu_dev);
+ data->iommu = platform_get_drvdata(iommu_dev);
+ dev->archdata.iommu = data;
+
of_dev_put(iommu_dev);
return 0;
@@ -1204,6 +1273,8 @@ static int rk_iommu_probe(struct platform_device *pdev)
bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
+ pm_runtime_enable(dev);
+
return 0;
err_remove_sysfs:
iommu_device_sysfs_remove(&iommu->iommu);
@@ -1216,6 +1287,8 @@ static int rk_iommu_remove(struct platform_device *pdev)
{
struct rk_iommu *iommu = platform_get_drvdata(pdev);
+ pm_runtime_disable(&pdev->dev);
+
iommu_device_unregister(&iommu->iommu);
iommu_device_sysfs_remove(&iommu->iommu);
rk_iommu_put_clocks(iommu);
@@ -1223,6 +1296,27 @@ static int rk_iommu_remove(struct platform_device *pdev)
return 0;
}
+static int __maybe_unused rk_iommu_suspend(struct device *dev)
+{
+ struct rk_iommu *iommu = dev_get_drvdata(dev);
+
+ rk_iommu_shutdown(iommu);
+ return 0;
+}
+
+static int __maybe_unused rk_iommu_resume(struct device *dev)
+{
+ struct rk_iommu *iommu = dev_get_drvdata(dev);
+
+ return rk_iommu_startup(iommu);
+}
+
+static const struct dev_pm_ops rk_iommu_pm_ops = {
+ SET_RUNTIME_PM_OPS(rk_iommu_suspend, rk_iommu_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
static const struct of_device_id rk_iommu_dt_ids[] = {
{ .compatible = "rockchip,iommu" },
{ /* sentinel */ }
@@ -1235,6 +1329,7 @@ static struct platform_driver rk_iommu_driver = {
.driver = {
.name = "rk_iommu",
.of_match_table = rk_iommu_dt_ids,
+ .pm = &rk_iommu_pm_ops,
.suppress_bind_attrs = true,
},
};
--
2.11.0
There would be some masters sharing the same IOMMU device. Put them in
the same iommu group and share the same iommu domain.
Signed-off-by: Jeffy Chen <[email protected]>
---
Changes in v2: None
drivers/iommu/rockchip-iommu.c | 39 +++++++++++++++++++++++++++++++--------
1 file changed, 31 insertions(+), 8 deletions(-)
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index c8de1456a016..6c316dd0dd2d 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -100,11 +100,13 @@ struct rk_iommu {
struct iommu_device iommu;
struct list_head node; /* entry in rk_iommu_domain.iommus */
struct iommu_domain *domain; /* domain to which iommu is attached */
+ struct iommu_group *group;
struct mutex pm_mutex; /* serializes power transitions */
};
struct rk_iommudata {
struct device_link *link; /* runtime PM link from IOMMU to master */
+ struct iommu_domain *domain; /* domain to which device is attached */
struct rk_iommu *iommu;
};
@@ -964,6 +966,7 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
{
struct rk_iommu *iommu;
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
+ struct rk_iommudata *data = dev->archdata.iommu;
unsigned long flags;
/* Allow 'virtual devices' (eg drm) to detach from domain */
@@ -971,6 +974,12 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
if (!iommu)
return;
+ /* device already detached */
+ if (data->domain != domain)
+ return;
+
+ data->domain = NULL;
+
dev_dbg(dev, "Detaching from iommu domain\n");
/* iommu already detached */
@@ -994,6 +1003,7 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
{
struct rk_iommu *iommu;
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
+ struct rk_iommudata *data = dev->archdata.iommu;
unsigned long flags;
int ret;
@@ -1005,15 +1015,21 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
if (!iommu)
return 0;
+ /* device already attached */
+ if (data->domain == domain)
+ return 0;
+
+ if (data->domain)
+ rk_iommu_detach_device(data->domain, dev);
+
+ data->domain = domain;
+
dev_dbg(dev, "Attaching to iommu domain\n");
/* iommu already attached */
if (iommu->domain == domain)
return 0;
- if (iommu->domain)
- rk_iommu_detach_device(iommu->domain, dev);
-
iommu->domain = domain;
spin_lock_irqsave(&rk_domain->iommus_lock, flags);
@@ -1150,13 +1166,11 @@ static void rk_iommu_remove_device(struct device *dev)
static struct iommu_group *rk_iommu_device_group(struct device *dev)
{
- struct iommu_group *group;
+ struct rk_iommu *iommu;
- group = iommu_group_get(dev);
- if (!group)
- group = iommu_group_alloc();
+ iommu = rk_iommu_from_dev(dev);
- return group;
+ return iommu ? iommu->group : NULL;
}
static int rk_iommu_of_xlate(struct device *dev,
@@ -1263,6 +1277,12 @@ static int rk_iommu_probe(struct platform_device *pdev)
if (err)
goto err_remove_sysfs;
+ iommu->group = iommu_group_alloc();
+ if (IS_ERR(iommu->group)) {
+ err = PTR_ERR(iommu->group);
+ goto err_unreg_iommu;
+ }
+
/*
* Use the first registered IOMMU device for domain to use with DMA
* API, since a domain might not physically correspond to a single
@@ -1276,6 +1296,8 @@ static int rk_iommu_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
return 0;
+err_unreg_iommu:
+ iommu_device_unregister(&iommu->iommu);
err_remove_sysfs:
iommu_device_sysfs_remove(&iommu->iommu);
err_put_clocks:
@@ -1289,6 +1311,7 @@ static int rk_iommu_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
+ iommu_group_put(iommu->group);
iommu_device_unregister(&iommu->iommu);
iommu_device_sysfs_remove(&iommu->iommu);
rk_iommu_put_clocks(iommu);
--
2.11.0
It's hard to undo bus_set_iommu() in the error path, so move it to the
end of rk_iommu_probe().
Signed-off-by: Jeffy Chen <[email protected]>
---
Changes in v2:
Move bus_set_iommu() to rk_iommu_probe().
drivers/iommu/rockchip-iommu.c | 20 +++++++-------------
1 file changed, 7 insertions(+), 13 deletions(-)
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index bd8b32dc0db6..d2a0b0daf40d 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1187,6 +1187,8 @@ static int rk_iommu_probe(struct platform_device *pdev)
return err;
}
+ bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
+
return 0;
}
@@ -1218,27 +1220,19 @@ static struct platform_driver rk_iommu_driver = {
static int __init rk_iommu_init(void)
{
- struct device_node *np;
int ret;
- np = of_find_matching_node(NULL, rk_iommu_dt_ids);
- if (!np)
- return 0;
-
- of_node_put(np);
-
- ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
- if (ret)
- return ret;
-
ret = platform_driver_register(&rk_iommu_domain_driver);
if (ret)
return ret;
ret = platform_driver_register(&rk_iommu_driver);
- if (ret)
+ if (ret) {
platform_driver_unregister(&rk_iommu_domain_driver);
- return ret;
+ return ret;
+ }
+
+ return 0;
}
static void __exit rk_iommu_exit(void)
{
--
2.11.0
Add missing iommu_device_sysfs_remove in error path.
Also adjust sequence of deinit functions in the remove.
Signed-off-by: Jeffy Chen <[email protected]>
---
Changes in v2: None
drivers/iommu/rockchip-iommu.c | 12 +++++++-----
1 file changed, 7 insertions(+), 5 deletions(-)
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 4ffb3a65c9d2..bd8b32dc0db6 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1182,18 +1182,20 @@ static int rk_iommu_probe(struct platform_device *pdev)
iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops);
err = iommu_device_register(&iommu->iommu);
+ if (err) {
+ iommu_device_sysfs_remove(&iommu->iommu);
+ return err;
+ }
- return err;
+ return 0;
}
static int rk_iommu_remove(struct platform_device *pdev)
{
struct rk_iommu *iommu = platform_get_drvdata(pdev);
- if (iommu) {
- iommu_device_sysfs_remove(&iommu->iommu);
- iommu_device_unregister(&iommu->iommu);
- }
+ iommu_device_unregister(&iommu->iommu);
+ iommu_device_sysfs_remove(&iommu->iommu);
return 0;
}
--
2.11.0
Suggested-by: Robin Murphy <[email protected]>
Signed-off-by: Jeffy Chen <[email protected]>
---
Changes in v2: None
drivers/iommu/rockchip-iommu.c | 38 +++++++++++---------------------------
1 file changed, 11 insertions(+), 27 deletions(-)
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 9d991c2d8767..4a12d4746095 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -18,6 +18,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -91,7 +92,6 @@ struct rk_iommu {
void __iomem **bases;
int num_mmu;
int *irq;
- int num_irq;
bool reset_disabled;
struct iommu_device iommu;
struct list_head node; /* entry in rk_iommu_domain.iommus */
@@ -830,13 +830,6 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
iommu->domain = domain;
- for (i = 0; i < iommu->num_irq; i++) {
- ret = devm_request_irq(iommu->dev, iommu->irq[i], rk_iommu_irq,
- IRQF_SHARED, dev_name(dev), iommu);
- if (ret)
- return ret;
- }
-
for (i = 0; i < iommu->num_mmu; i++) {
rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
rk_domain->dt_dma);
@@ -885,9 +878,6 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
}
rk_iommu_disable_stall(iommu);
- for (i = 0; i < iommu->num_irq; i++)
- devm_free_irq(iommu->dev, iommu->irq[i], iommu);
-
iommu->domain = NULL;
dev_dbg(dev, "Detached from iommu domain\n");
@@ -1138,7 +1128,7 @@ static int rk_iommu_probe(struct platform_device *pdev)
struct rk_iommu *iommu;
struct resource *res;
int num_res = pdev->num_resources;
- int err, i;
+ int err, i, irq, num_irq;
iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
if (!iommu)
@@ -1165,23 +1155,17 @@ static int rk_iommu_probe(struct platform_device *pdev)
if (iommu->num_mmu == 0)
return PTR_ERR(iommu->bases[0]);
- iommu->num_irq = platform_irq_count(pdev);
- if (iommu->num_irq < 0)
- return iommu->num_irq;
- if (iommu->num_irq == 0)
- return -ENXIO;
-
- iommu->irq = devm_kcalloc(dev, iommu->num_irq, sizeof(*iommu->irq),
- GFP_KERNEL);
- if (!iommu->irq)
- return -ENOMEM;
-
- for (i = 0; i < iommu->num_irq; i++) {
- iommu->irq[i] = platform_get_irq(pdev, i);
- if (iommu->irq[i] < 0) {
- dev_err(dev, "Failed to get IRQ, %d\n", iommu->irq[i]);
+ num_irq = of_irq_count(dev->of_node);
+ for (i = 0; i < num_irq; i++) {
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0) {
+ dev_err(dev, "Failed to get IRQ, %d\n", irq);
return -ENXIO;
}
+ err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
+ IRQF_SHARED, dev_name(dev), iommu);
+ if (err)
+ return err;
}
iommu->reset_disabled = device_property_read_bool(dev,
--
2.11.0
It's not safe to unbind rockchip IOMMU driver.
Signed-off-by: Jeffy Chen <[email protected]>
---
Changes in v2: None
drivers/iommu/rockchip-iommu.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 4a12d4746095..da4afe016a4e 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1205,6 +1205,7 @@ static struct platform_driver rk_iommu_driver = {
.driver = {
.name = "rk_iommu",
.of_match_table = rk_iommu_dt_ids,
+ .suppress_bind_attrs = true,
},
};
--
2.11.0
Hi Jeffy,
Thanks for the patch. Please see my comments inline.
On Tue, Jan 16, 2018 at 10:25 PM, Jeffy Chen <[email protected]> wrote:
Please add patch description.
> Suggested-by: Robin Murphy <[email protected]>
> Signed-off-by: Jeffy Chen <[email protected]>
> ---
[snip]
> - for (i = 0; i < iommu->num_irq; i++) {
> - iommu->irq[i] = platform_get_irq(pdev, i);
> - if (iommu->irq[i] < 0) {
> - dev_err(dev, "Failed to get IRQ, %d\n", iommu->irq[i]);
> + num_irq = of_irq_count(dev->of_node);
> + for (i = 0; i < num_irq; i++) {
> + irq = platform_get_irq(pdev, i);
This lacks consistency. of_irq_count() is used for counting, but
platform_get_irq() is used for getting. Either platform_ or of_ API
should be used for both and I'd lean for platform_, since it's more
general.
> + if (irq < 0) {
> + dev_err(dev, "Failed to get IRQ, %d\n", irq);
> return -ENXIO;
> }
> + err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
> + IRQF_SHARED, dev_name(dev), iommu);
> + if (err)
> + return err;
> }
Looks like there is some more initialization below. Is the driver okay
with the IRQ being potentially fired right here? (Shared IRQ handlers
might be run at request_irq() time for testing.)
>
> iommu->reset_disabled = device_property_read_bool(dev,
^^
Best regards,
Tomasz
On Tue, Jan 16, 2018 at 10:25 PM, Jeffy Chen <[email protected]> wrote:
> It's not safe to unbind rockchip IOMMU driver.
Might be good to explain why it is not safe and actually add that it
does not make any sense for such low level devices.
Best regards,
Tomasz
On Tue, Jan 16, 2018 at 10:25 PM, Jeffy Chen <[email protected]> wrote:
> Add missing iommu_device_sysfs_remove in error path.
>
> Also adjust sequence of deinit functions in the remove.
>
> Signed-off-by: Jeffy Chen <[email protected]>
> ---
>
> Changes in v2: None
>
> drivers/iommu/rockchip-iommu.c | 12 +++++++-----
> 1 file changed, 7 insertions(+), 5 deletions(-)
>
Reviewed-by: Tomasz Figa <[email protected]>
Best regards,
Tomasz
On Tue, Jan 16, 2018 at 10:25 PM, Jeffy Chen <[email protected]> wrote:
> It's hard to undo bus_set_iommu() in the error path, so move it to the
> end of rk_iommu_probe().
Does this work fine now? I remember we used to need this called in an
early initcall for all the ARM/ARM64 DMA stuff to work.
Best regards,
Tomasz
On Wed, Jan 17, 2018 at 1:23 PM, Tomasz Figa <[email protected]> wrote:
> On Tue, Jan 16, 2018 at 10:25 PM, Jeffy Chen <[email protected]> wrote:
>> It's not safe to unbind rockchip IOMMU driver.
>
> Might be good to explain why it is not safe and actually add that it
> does not make any sense for such low level devices.
Actually, shouldn't we also remove support for .remove() and module
exit? I don't think it's actually feasible to unload this driver. We
actually even have ROCKCHIP_IOMMU Kconfig entry defined as bool, so
the driver can be only built-in.
Best regards,
Tomasz
On Tue, Jan 16, 2018 at 10:25 PM, Jeffy Chen <[email protected]> wrote:
> Use the first registered IOMMU device for dma mapping operations, and
> drop the domain platform device.
>
> This is similar to exynos iommu driver.
>
> Signed-off-by: Jeffy Chen <[email protected]>
> ---
>
> Changes in v2: None
>
> drivers/iommu/rockchip-iommu.c | 91 +++++++++++-------------------------------
> 1 file changed, 24 insertions(+), 67 deletions(-)
Looks good to me, but we need to remove platform driver .remove(), as
it was done for Exynos IOMMU as well. With that fixed (probably
squashed to the patch that prohibits unbind):
Reviewed-by: Tomasz Figa <[email protected]>
Best regards,
Tomasz
On Tue, Jan 16, 2018 at 10:25 PM, Jeffy Chen <[email protected]> wrote:
> Converts the rockchip-iommu driver to use the OF_IOMMU infrastructure,
> which allows attaching master devices to their IOMMUs automatically
> according to DT properties.
>
> Signed-off-by: Jeffy Chen <[email protected]>
> ---
>
> Changes in v2: None
>
> drivers/iommu/rockchip-iommu.c | 116 +++++++++++------------------------------
> 1 file changed, 31 insertions(+), 85 deletions(-)
>
> diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
> index 51e4f982c4a6..c2d3ac82184e 100644
> --- a/drivers/iommu/rockchip-iommu.c
> +++ b/drivers/iommu/rockchip-iommu.c
[snip]
> +static int rk_iommu_of_xlate(struct device *dev,
> + struct of_phandle_args *args)
> +{
> + struct platform_device *iommu_dev;
> +
> + iommu_dev = of_find_device_by_node(args->np);
> + if (!iommu_dev) {
> + dev_err(dev, "iommu %pOF not found\n", args->np);
> + return -ENODEV;
> + }
> +
> + dev->archdata.iommu = platform_get_drvdata(iommu_dev);
This will work only if that iommu was already probed. Do we have any
guarantees that this callback is not called earlier?
Best regards,
Tomasz
On Tue, Jan 16, 2018 at 10:25 PM, Jeffy Chen <[email protected]> wrote:
> When the power domain is powered off, the IOMMU cannot be accessed and
> register programming must be deferred until the power domain becomes
> enabled.
>
> Add runtime PM support, and use runtime PM device link from IOMMU to
> master to startup and shutdown IOMMU.
[snip]
> @@ -875,28 +889,19 @@ static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
>
> static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
> {
> - return dev->archdata.iommu;
> + struct rk_iommudata *data = dev->archdata.iommu;
> +
> + return data ? data->iommu : NULL;
> }
Is this change intentionally added to this patch? I see this
potentially relevant for the previous patch in this series.
[snip]
> +static int rk_iommu_startup(struct rk_iommu *iommu)
> {
> - struct rk_iommu *iommu;
> + struct iommu_domain *domain = iommu->domain;
> struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
> - unsigned long flags;
> int ret, i;
>
> - /*
> - * Allow 'virtual devices' (e.g., drm) to attach to domain.
> - * Such a device does not belong to an iommu group.
> - */
> - iommu = rk_iommu_from_dev(dev);
> - if (!iommu)
> - return 0;
> -
> - if (iommu->domain)
> - rk_iommu_detach_device(iommu->domain, dev);
> -
> ret = rk_iommu_enable_clocks(iommu);
> if (ret)
> return ret;
>
Don't we need to check here (and in _shutdown() too) if we have a
domain attached?
> + mutex_lock(&iommu->pm_mutex);
> ret = rk_iommu_enable_stall(iommu);
> if (ret)
> - goto err_disable_clocks;
> + goto err_unlock_mutex;
[snip]
> + iommu->domain = NULL;
> +
> + spin_lock_irqsave(&rk_domain->iommus_lock, flags);
> + list_del_init(&iommu->node);
> + spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
> +
> + if (pm_runtime_get_if_in_use(iommu->dev) > 0) {
Actually, if the above call returns -EINVAL, don't we still need to
call rk_iommu_shutdown(), because it just means runtime PM is disabled
and the IOMMU is always powered on?
> + rk_iommu_shutdown(iommu);
> + pm_runtime_put(iommu->dev);
> + }
> +}
[snip]
> + spin_lock_irqsave(&rk_domain->iommus_lock, flags);
> + list_add_tail(&iommu->node, &rk_domain->iommus);
> + spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
> +
> + if (pm_runtime_get_if_in_use(iommu->dev) <= 0)
Ditto.
> + return 0;
> +
> + ret = rk_iommu_startup(iommu);
> + if (ret)
> + rk_iommu_detach_device(data->domain, dev);
[snip]
> @@ -1108,7 +1175,9 @@ static int rk_iommu_of_xlate(struct device *dev,
> return -ENODEV;
> }
>
> - dev->archdata.iommu = platform_get_drvdata(iommu_dev);
> + data->iommu = platform_get_drvdata(iommu_dev);
> + dev->archdata.iommu = data;
> +
I think this change might be mistakenly squashed to this patch instead
of previous.
Best regards,
Tomasz
On Wed, Jan 17, 2018 at 4:08 PM, JeffyChen <[email protected]> wrote:
> Hi Tomasz,
>
> Thanks for your reply.
>
> On 01/17/2018 12:21 PM, Tomasz Figa wrote:
>>
>> Hi Jeffy,
>>
>> Thanks for the patch. Please see my comments inline.
>>
>> On Tue, Jan 16, 2018 at 10:25 PM, Jeffy Chen <[email protected]>
>> wrote:
>>
>> Please add patch description.
>
>
> ok, will do.
>>
>>
>>> Suggested-by: Robin Murphy <[email protected]>
>>> Signed-off-by: Jeffy Chen <[email protected]>
>>> ---
>>
>> [snip]
>>>
>>> - for (i = 0; i < iommu->num_irq; i++) {
>>> - iommu->irq[i] = platform_get_irq(pdev, i);
>>> - if (iommu->irq[i] < 0) {
>>> - dev_err(dev, "Failed to get IRQ, %d\n",
>>> iommu->irq[i]);
>>> + num_irq = of_irq_count(dev->of_node);
>>> + for (i = 0; i < num_irq; i++) {
>>> + irq = platform_get_irq(pdev, i);
>>
>>
>> This lacks consistency. of_irq_count() is used for counting, but
>> platform_get_irq() is used for getting. Either platform_ or of_ API
>> should be used for both and I'd lean for platform_, since it's more
>> general.
>
> hmmm, right, i was thinking of removing num_irq, and do something like:
> while (nr++) {
> err = platform_get_irq(dev, nr);
> if (err == -EPROBE_DEFER)
> break;
> if (err < 0)
> return err;
> ....
> }
>
> but forgot to do that..
Was there anything wrong with platform_irq_count() used by existing code?
>
>>
>>> + if (irq < 0) {
>>> + dev_err(dev, "Failed to get IRQ, %d\n", irq);
>>> return -ENXIO;
>>> }
>>> + err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
>>> + IRQF_SHARED, dev_name(dev),
>>> iommu);
>>> + if (err)
>>> + return err;
>>> }
>>
>>
>> Looks like there is some more initialization below. Is the driver okay
>> with the IRQ being potentially fired right here? (Shared IRQ handlers
>> might be run at request_irq() time for testing.)
>>
> right, forget about that. maybe we can check iommu->domain not NULL in
> rk_iommu_irq()
Maybe we could just move IRQ requesting to the end of probe?
Best regards,
Tomasz
On Wed, Jan 17, 2018 at 4:14 PM, JeffyChen <[email protected]> wrote:
> Hi Tomasz,
>
> On 01/17/2018 01:26 PM, Tomasz Figa wrote:
>>
>> On Tue, Jan 16, 2018 at 10:25 PM, Jeffy Chen <[email protected]>
>> wrote:
>>>
>>> It's hard to undo bus_set_iommu() in the error path, so move it to the
>>> end of rk_iommu_probe().
>>
>>
>> Does this work fine now? I remember we used to need this called in an
>> early initcall for all the ARM/ARM64 DMA stuff to work.
>>
> yes, i think it works now, i saw there are some other iommu drivers also do
> this(arm-smmu-v3, mtk_iommu) :)
Okay, if so:
Reviewed-by: Tomasz Figa <[email protected]>
Best regards,
Tomasz
P.S. Looks like your email client is set to HTML messages. Your
messages might end up dropped from the mailing list.
On Wed, Jan 17, 2018 at 4:20 PM, JeffyChen <[email protected]> wrote:
> Hi Tomasz,
>
> Thanks for your reply.
>
>
> On 01/17/2018 01:44 PM, Tomasz Figa wrote:
>>
>> On Tue, Jan 16, 2018 at 10:25 PM, Jeffy Chen <[email protected]>
>> wrote:
>>>
>>> Converts the rockchip-iommu driver to use the OF_IOMMU infrastructure,
>>> which allows attaching master devices to their IOMMUs automatically
>>> according to DT properties.
>>>
>>> Signed-off-by: Jeffy Chen <[email protected]>
>>> ---
>>>
>>> Changes in v2: None
>>>
>>> drivers/iommu/rockchip-iommu.c | 116
>>> +++++++++++------------------------------
>>> 1 file changed, 31 insertions(+), 85 deletions(-)
>>>
>>> diff --git a/drivers/iommu/rockchip-iommu.c
>>> b/drivers/iommu/rockchip-iommu.c
>>> index 51e4f982c4a6..c2d3ac82184e 100644
>>> --- a/drivers/iommu/rockchip-iommu.c
>>> +++ b/drivers/iommu/rockchip-iommu.c
>>
>> [snip]
>>>
>>> +static int rk_iommu_of_xlate(struct device *dev,
>>> + struct of_phandle_args *args)
>>> +{
>>> + struct platform_device *iommu_dev;
>>> +
>>> + iommu_dev = of_find_device_by_node(args->np);
>>> + if (!iommu_dev) {
>>> + dev_err(dev, "iommu %pOF not found\n", args->np);
>>> + return -ENODEV;
>>> + }
>>> +
>>> + dev->archdata.iommu = platform_get_drvdata(iommu_dev);
>>
>>
>> This will work only if that iommu was already probed. Do we have any
>> guarantees that this callback is not called earlier?
>
> in of_iommu.c, the of_iommu_xlate would check for fwnode before calling
> this.
Right, looks like deferred probe is handled there.
>
> but it's possible the probe failed after calling iommu_device_set_fwnode, so
> i'll try to add some checks here, and maybe adjust probe() to prevent it
> too.
I think iommu_device_set_fwnode() is not enough for of_iommu_xlate()
to find the IOMMU. The IOMMU is actually added to the IOMMU list by
iommu_device_register(), which is last in the sequence, so I guess we
should be fine.
Best regards,
Tomasz
On Wed, Jan 17, 2018 at 4:26 PM, JeffyChen <[email protected]> wrote:
> Hi Tomasz,
>
> Thanks for your reply.
>
> On 01/17/2018 02:20 PM, Tomasz Figa wrote:
>>
>> On Tue, Jan 16, 2018 at 10:25 PM, Jeffy Chen <[email protected]>
>> [snip]
>>>
>>> +static int rk_iommu_startup(struct rk_iommu *iommu)
>>> {
>>> - struct rk_iommu *iommu;
>>> + struct iommu_domain *domain = iommu->domain;
>>> struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
>>> - unsigned long flags;
>>> int ret, i;
>>>
>>> - /*
>>> - * Allow 'virtual devices' (e.g., drm) to attach to domain.
>>> - * Such a device does not belong to an iommu group.
>>> - */
>>> - iommu = rk_iommu_from_dev(dev);
>>> - if (!iommu)
>>> - return 0;
>>> -
>>> - if (iommu->domain)
>>> - rk_iommu_detach_device(iommu->domain, dev);
>>> -
>>> ret = rk_iommu_enable_clocks(iommu);
>>> if (ret)
>>> return ret;
>>>
>>
>> Don't we need to check here (and in _shutdown() too) if we have a
>> domain attached?
>
> hmmm, right, the startup might been called by resume, so should check
> iommu->domain here.
>
> but the shutdown would be called at the end of detach or suspend, it could
> be not attached or attached.
If startup might be called by resume, without domain attached, what
prevents shutdown from being called by suspend after that resume,
still without domain attached? Is it guaranteed that if resume is
called, someone will attach a domain before suspend is called?
Best regards,
Tomasz
Hi Tomasz,
On 01/17/2018 03:16 PM, Tomasz Figa wrote:
>>> >>
>>> >>This lacks consistency. of_irq_count() is used for counting, but
>>> >>platform_get_irq() is used for getting. Either platform_ or of_ API
>>> >>should be used for both and I'd lean for platform_, since it's more
>>> >>general.
>> >
>> >hmmm, right, i was thinking of removing num_irq, and do something like:
>> >while (nr++) {
>> > err = platform_get_irq(dev, nr);
>> > if (err == -EPROBE_DEFER)
>> > break;
>> > if (err < 0)
>> > return err;
>> > ....
>> >}
>> >
>> >but forgot to do that..
> Was there anything wrong with platform_irq_count() used by existing code?
just thought the platform_irq_count might ignore some errors(except for
EPROBE_DEFER):
int platform_irq_count(struct platform_device *dev)
{
int ret, nr = 0;
while ((ret = platform_get_irq(dev, nr)) >= 0)
nr++;
if (ret == -EPROBE_DEFER)
return ret;
return nr;
}
but guess that would not matter..
>
>> >
>>> >>
>>>> >>>+ if (irq < 0) {
>>>> >>>+ dev_err(dev, "Failed to get IRQ, %d\n", irq);
>>>> >>> return -ENXIO;
>>>> >>> }
>>>> >>>+ err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
>>>> >>>+ IRQF_SHARED, dev_name(dev),
>>>> >>>iommu);
>>>> >>>+ if (err)
>>>> >>>+ return err;
>>>> >>> }
>>> >>
>>> >>
>>> >>Looks like there is some more initialization below. Is the driver okay
>>> >>with the IRQ being potentially fired right here? (Shared IRQ handlers
>>> >>might be run at request_irq() time for testing.)
>>> >>
>> >right, forget about that. maybe we can check iommu->domain not NULL in
>> >rk_iommu_irq()
> Maybe we could just move IRQ requesting to the end of probe?
>
ok, that should work too.
and maybe we should also check power status in the irq handler? if it
get fired after powered off...
> Best regards,
> Tomasz
>
>
>
On 01/17/2018 03:30 PM, Tomasz Figa wrote:
>> >but it's possible the probe failed after calling iommu_device_set_fwnode, so
>> >i'll try to add some checks here, and maybe adjust probe() to prevent it
>> >too.
> I think iommu_device_set_fwnode() is not enough for of_iommu_xlate()
> to find the IOMMU. The IOMMU is actually added to the IOMMU list by
> iommu_device_register(), which is last in the sequence, so I guess we
> should be fine.
>
hmmm, the later patch would change that ;) i'll fix it in the next version.
> Best regards,
> Tomasz
>
>
>
Hi Tomasz,
On 01/17/2018 03:38 PM, Tomasz Figa wrote:
>>> >>Don't we need to check here (and in _shutdown() too) if we have a
>>> >>domain attached?
>> >
>> >hmmm, right, the startup might been called by resume, so should check
>> >iommu->domain here.
>> >
>> >but the shutdown would be called at the end of detach or suspend, it could
>> >be not attached or attached.
> If startup might be called by resume, without domain attached, what
> prevents shutdown from being called by suspend after that resume,
> still without domain attached? Is it guaranteed that if resume is
> called, someone will attach a domain before suspend is called?
>
no, the shutdown would be called by:
1/ end of detach_dev
so it would be not attached at that time
2/ suspend
so it could be attached, and also could be not attached
anyway, i think the shutdown would work without domain attached(just
disable paging and clear the iommu bases) ;)
> Best regards,
> Tomasz
>
>
>
Hi Tomasz,
On 01/17/2018 03:52 PM, JeffyChen wrote:
> Hi Tomasz,
>
> On 01/17/2018 03:38 PM, Tomasz Figa wrote:
>>>> >>Don't we need to check here (and in _shutdown() too) if we have a
>>>> >>domain attached?
>>> >
>>> >hmmm, right, the startup might been called by resume, so should check
>>> >iommu->domain here.
>>> >
>>> >but the shutdown would be called at the end of detach or suspend, it
>>> could
>>> >be not attached or attached.
>> If startup might be called by resume, without domain attached, what
>> prevents shutdown from being called by suspend after that resume,
>> still without domain attached? Is it guaranteed that if resume is
>> called, someone will attach a domain before suspend is called?
>>
> no, the shutdown would be called by:
> 1/ end of detach_dev
> so it would be not attached at that time
>
> 2/ suspend
> so it could be attached, and also could be not attached
>
>
> anyway, i think the shutdown would work without domain attached(just
> disable paging and clear the iommu bases) ;)
>
hmmm, i see the problem.
so we need to:
1/ move shutdown a little earlier in detach_dev, so it could still see
the iommu->domain
2/ check iommu->domain in shutdown, to prevent unnecessary shutdown
or maybe just add iommu->domain check in suspend and resume.
>> Best regards,
>> Tomasz
>>
>>
>>
>
On 17/01/18 05:26, Tomasz Figa wrote:
> On Tue, Jan 16, 2018 at 10:25 PM, Jeffy Chen <[email protected]> wrote:
>> It's hard to undo bus_set_iommu() in the error path, so move it to the
>> end of rk_iommu_probe().
>
> Does this work fine now? I remember we used to need this called in an
> early initcall for all the ARM/ARM64 DMA stuff to work.
It will do once we get to patch #11 (where the IOMMU_OF_DECLARE ensures
that masters defer until iommu_register() has set ops with a non-NULL
.of_xlate callback); in the meantime you might end up depending on DT
probe order as to whether the master uses the IOMMU or not. I'd say it's
up to you guys whether you consider that a bisection-breaker or not.
Robin.
On 16/01/18 13:25, Jeffy Chen wrote:
> Suggested-by: Robin Murphy <[email protected]>
> Signed-off-by: Jeffy Chen <[email protected]>
> ---
>
> Changes in v2: None
>
> drivers/iommu/rockchip-iommu.c | 38 +++++++++++---------------------------
> 1 file changed, 11 insertions(+), 27 deletions(-)
>
> diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
> index 9d991c2d8767..4a12d4746095 100644
> --- a/drivers/iommu/rockchip-iommu.c
> +++ b/drivers/iommu/rockchip-iommu.c
> @@ -18,6 +18,7 @@
> #include <linux/mm.h>
> #include <linux/module.h>
> #include <linux/of.h>
> +#include <linux/of_irq.h>
> #include <linux/of_platform.h>
> #include <linux/platform_device.h>
> #include <linux/slab.h>
> @@ -91,7 +92,6 @@ struct rk_iommu {
> void __iomem **bases;
> int num_mmu;
> int *irq;
Nit: irq seems to be redundant now as well.
> - int num_irq;
> bool reset_disabled;
> struct iommu_device iommu;
> struct list_head node; /* entry in rk_iommu_domain.iommus */
> @@ -830,13 +830,6 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
>
> iommu->domain = domain;
>
> - for (i = 0; i < iommu->num_irq; i++) {
> - ret = devm_request_irq(iommu->dev, iommu->irq[i], rk_iommu_irq,
> - IRQF_SHARED, dev_name(dev), iommu);
> - if (ret)
> - return ret;
> - }
> -
> for (i = 0; i < iommu->num_mmu; i++) {
> rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
> rk_domain->dt_dma);
> @@ -885,9 +878,6 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
> }
> rk_iommu_disable_stall(iommu);
>
> - for (i = 0; i < iommu->num_irq; i++)
> - devm_free_irq(iommu->dev, iommu->irq[i], iommu);
> -
> iommu->domain = NULL;
>
> dev_dbg(dev, "Detached from iommu domain\n");
> @@ -1138,7 +1128,7 @@ static int rk_iommu_probe(struct platform_device *pdev)
> struct rk_iommu *iommu;
> struct resource *res;
> int num_res = pdev->num_resources;
> - int err, i;
> + int err, i, irq, num_irq;
>
> iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
> if (!iommu)
> @@ -1165,23 +1155,17 @@ static int rk_iommu_probe(struct platform_device *pdev)
> if (iommu->num_mmu == 0)
> return PTR_ERR(iommu->bases[0]);
>
> - iommu->num_irq = platform_irq_count(pdev);
> - if (iommu->num_irq < 0)
> - return iommu->num_irq;
> - if (iommu->num_irq == 0)
> - return -ENXIO;
> -
> - iommu->irq = devm_kcalloc(dev, iommu->num_irq, sizeof(*iommu->irq),
> - GFP_KERNEL);
> - if (!iommu->irq)
> - return -ENOMEM;
> -
> - for (i = 0; i < iommu->num_irq; i++) {
> - iommu->irq[i] = platform_get_irq(pdev, i);
> - if (iommu->irq[i] < 0) {
> - dev_err(dev, "Failed to get IRQ, %d\n", iommu->irq[i]);
> + num_irq = of_irq_count(dev->of_node);
To follow up on the other reply, I'm not sure you really need to count
the IRQs beforehand at all - you're going to be looping through
platform_get_irq() and handling errors anyway, so you may as well just
start at 0 and keep going until -ENOENT (or use platform_get_resource()
to double-check whether an index should be valid, as we do in arm_smmu).
Otherwise, it looks like everything that the IRQ handler needs in the
iommu struct (dev, num_mmu and bases) is already initialised by this
point, so we should be OK with respect to races.
Robin.
> + for (i = 0; i < num_irq; i++) {
> + irq = platform_get_irq(pdev, i);
> + if (irq < 0) {
> + dev_err(dev, "Failed to get IRQ, %d\n", irq);
> return -ENXIO;
> }
> + err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
> + IRQF_SHARED, dev_name(dev), iommu);
> + if (err)
> + return err;
> }
>
> iommu->reset_disabled = device_property_read_bool(dev,
>
On Wed, Jan 17, 2018 at 4:19 PM, Tomasz Figa <[email protected]> wrote:
>
> P.S. Looks like your email client is set to HTML messages. Your
> messages might end up dropped from the mailing list.
Never mind. Looks like gmail started displaying quotations in plain
text as graphics.
Best regards,
Tomasz
On 16/01/18 13:25, Jeffy Chen wrote:
> IOMMU drivers are supposed to call this function instead of manually
> creating a group in their .add_device callback. This behavior is not
> strictly required by ARM DMA mapping implementation, but ARM64 already
> relies on it. This patch fixes the rockchip-iommu driver to comply with
> this requirement.
FWIW that's not 100% true: what arm64 relies on is the group having a
default DMA ops domain. Technically, you *could* open-code that in the
driver's group allocation, but obviously using the appropriate existing
API is nicer :)
[...]
> @@ -1182,6 +1164,29 @@ static void rk_iommu_remove_device(struct device *dev)
> iommu_group_remove_device(dev);
> }
>
> +static struct iommu_group *rk_iommu_device_group(struct device *dev)
> +{
> + struct iommu_group *group;
> + int ret;
> +
> + group = iommu_group_get(dev);
> + if (!group) {
This check is pointless - if dev->iommu_group were non-NULL you wouldn't
have been called in the first place.
Robin.
> + group = iommu_group_alloc();
> + if (IS_ERR(group))
> + return group;
> + }
> +
> + ret = rk_iommu_group_set_iommudata(group, dev);
> + if (ret)
> + goto err_put_group;
> +
> + return group;
> +
> +err_put_group:
> + iommu_group_put(group);
> + return ERR_PTR(ret);
> +}
> +
> static const struct iommu_ops rk_iommu_ops = {
> .domain_alloc = rk_iommu_domain_alloc,
> .domain_free = rk_iommu_domain_free,
> @@ -1193,6 +1198,7 @@ static const struct iommu_ops rk_iommu_ops = {
> .add_device = rk_iommu_add_device,
> .remove_device = rk_iommu_remove_device,
> .iova_to_phys = rk_iommu_iova_to_phys,
> + .device_group = rk_iommu_device_group,
> .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
> };
>
>
Hi Robin,
On 01/17/2018 08:18 PM, Robin Murphy wrote:
>>
>> @@ -91,7 +92,6 @@ struct rk_iommu {
>> void __iomem **bases;
>> int num_mmu;
>> int *irq;
>
> Nit: irq seems to be redundant now as well.
oops, will fix it.
>
>> - int num_irq;
>> bool reset_disabled;
>> struct iommu_device iommu;
>> struct list_head node; /* entry in rk_iommu_domain.iommus */
>> @@ -830,13 +830,6 @@ static int rk_iommu_attach_device(struct
>> iommu_domain *domain,
>> iommu->domain = domain;
>> - for (i = 0; i < iommu->num_irq; i++) {
>> - ret = devm_request_irq(iommu->dev, iommu->irq[i], rk_iommu_irq,
>> - IRQF_SHARED, dev_name(dev), iommu);
>> - if (ret)
>> - return ret;
>> - }
>> -
>> for (i = 0; i < iommu->num_mmu; i++) {
>> rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
>> rk_domain->dt_dma);
>> @@ -885,9 +878,6 @@ static void rk_iommu_detach_device(struct
>> iommu_domain *domain,
>> }
>> rk_iommu_disable_stall(iommu);
>> - for (i = 0; i < iommu->num_irq; i++)
>> - devm_free_irq(iommu->dev, iommu->irq[i], iommu);
>> -
>> iommu->domain = NULL;
>> dev_dbg(dev, "Detached from iommu domain\n");
>> @@ -1138,7 +1128,7 @@ static int rk_iommu_probe(struct platform_device
>> *pdev)
>> struct rk_iommu *iommu;
>> struct resource *res;
>> int num_res = pdev->num_resources;
>> - int err, i;
>> + int err, i, irq, num_irq;
>> iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
>> if (!iommu)
>> @@ -1165,23 +1155,17 @@ static int rk_iommu_probe(struct
>> platform_device *pdev)
>> if (iommu->num_mmu == 0)
>> return PTR_ERR(iommu->bases[0]);
>> - iommu->num_irq = platform_irq_count(pdev);
>> - if (iommu->num_irq < 0)
>> - return iommu->num_irq;
>> - if (iommu->num_irq == 0)
>> - return -ENXIO;
>> -
>> - iommu->irq = devm_kcalloc(dev, iommu->num_irq, sizeof(*iommu->irq),
>> - GFP_KERNEL);
>> - if (!iommu->irq)
>> - return -ENOMEM;
>> -
>> - for (i = 0; i < iommu->num_irq; i++) {
>> - iommu->irq[i] = platform_get_irq(pdev, i);
>> - if (iommu->irq[i] < 0) {
>> - dev_err(dev, "Failed to get IRQ, %d\n", iommu->irq[i]);
>> + num_irq = of_irq_count(dev->of_node);
>
> To follow up on the other reply, I'm not sure you really need to count
> the IRQs beforehand at all - you're going to be looping through
> platform_get_irq() and handling errors anyway, so you may as well just
> start at 0 and keep going until -ENOENT (or use platform_get_resource()
> to double-check whether an index should be valid, as we do in arm_smmu).
ok, will do that.
>
> Otherwise, it looks like everything that the IRQ handler needs in the
> iommu struct (dev, num_mmu and bases) is already initialised by this
> point, so we should be OK with respect to races.
ok.
>
> Robin.
Hi Robin,
On 01/17/2018 08:31 PM, Robin Murphy wrote:
> On 16/01/18 13:25, Jeffy Chen wrote:
>> IOMMU drivers are supposed to call this function instead of manually
>> creating a group in their .add_device callback. This behavior is not
>> strictly required by ARM DMA mapping implementation, but ARM64 already
>> relies on it. This patch fixes the rockchip-iommu driver to comply with
>> this requirement.
>
> FWIW that's not 100% true: what arm64 relies on is the group having a
> default DMA ops domain. Technically, you *could* open-code that in the
> driver's group allocation, but obviously using the appropriate existing
> API is nicer :)
ok, will rewrite the commit message.
>
> [...]
>> @@ -1182,6 +1164,29 @@ static void rk_iommu_remove_device(struct
>> device *dev)
>> iommu_group_remove_device(dev);
>> }
>> +static struct iommu_group *rk_iommu_device_group(struct device *dev)
>> +{
>> + struct iommu_group *group;
>> + int ret;
>> +
>> + group = iommu_group_get(dev);
>> + if (!group) {
>
> This check is pointless - if dev->iommu_group were non-NULL you wouldn't
> have been called in the first place.
right, it's allocated in the probe.
>
> Robin.
On 01/17/2018 08:47 PM, JeffyChen wrote:
>>>
>>> +static struct iommu_group *rk_iommu_device_group(struct device *dev)
>>> +{
>>> + struct iommu_group *group;
>>> + int ret;
>>> +
>>> + group = iommu_group_get(dev);
>>> + if (!group) {
>>
>> This check is pointless - if dev->iommu_group were non-NULL you wouldn't
>> have been called in the first place.
> right, it's allocated in the probe.
oops, sorry, i see, this is not needed because device_group() is only be
called when iommu_group_get() returns NULL
On 16/01/18 13:25, Jeffy Chen wrote:
> There would be some masters sharing the same IOMMU device. Put them in
> the same iommu group and share the same iommu domain.
>
> Signed-off-by: Jeffy Chen <[email protected]>
> ---
>
> Changes in v2: None
>
> drivers/iommu/rockchip-iommu.c | 39 +++++++++++++++++++++++++++++++--------
> 1 file changed, 31 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
> index c8de1456a016..6c316dd0dd2d 100644
> --- a/drivers/iommu/rockchip-iommu.c
> +++ b/drivers/iommu/rockchip-iommu.c
> @@ -100,11 +100,13 @@ struct rk_iommu {
> struct iommu_device iommu;
> struct list_head node; /* entry in rk_iommu_domain.iommus */
> struct iommu_domain *domain; /* domain to which iommu is attached */
> + struct iommu_group *group;
> struct mutex pm_mutex; /* serializes power transitions */
> };
>
> struct rk_iommudata {
> struct device_link *link; /* runtime PM link from IOMMU to master */
> + struct iommu_domain *domain; /* domain to which device is attached */
I don't see why this is needed - for example, mtk_iommu does the same
thing without needing to track the active domain in more than one place.
Fundamentally, for this kind of IOMMU without the notion of multiple
translation contexts, the logic should look like:
iommudrv_attach_device(dev, domain) {
iommu = dev_get_iommu(dev);
if (iommu->curr_domain != domain) {
update_hw_state(iommu, domain);
iommu->curr_domain = domain;
}
}
which I think is essentially what you have anyway. When a group contains
multiple devices, you update the IOMMU state for the first device, then
calls for subsequent devices in the group do nothing since they see the
IOMMU state is already up-to-date with the new domain.
Robin.
> struct rk_iommu *iommu;
> };
>
> @@ -964,6 +966,7 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
> {
> struct rk_iommu *iommu;
> struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
> + struct rk_iommudata *data = dev->archdata.iommu;
> unsigned long flags;
>
> /* Allow 'virtual devices' (eg drm) to detach from domain */
> @@ -971,6 +974,12 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
> if (!iommu)
> return;
>
> + /* device already detached */
> + if (data->domain != domain)
> + return;
> +
> + data->domain = NULL;
> +
> dev_dbg(dev, "Detaching from iommu domain\n");
>
> /* iommu already detached */
> @@ -994,6 +1003,7 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
> {
> struct rk_iommu *iommu;
> struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
> + struct rk_iommudata *data = dev->archdata.iommu;
> unsigned long flags;
> int ret;
>
> @@ -1005,15 +1015,21 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
> if (!iommu)
> return 0;
>
> + /* device already attached */
> + if (data->domain == domain)
> + return 0;
> +
> + if (data->domain)
> + rk_iommu_detach_device(data->domain, dev);
> +
> + data->domain = domain;
> +
> dev_dbg(dev, "Attaching to iommu domain\n");
>
> /* iommu already attached */
> if (iommu->domain == domain)
> return 0;
>
> - if (iommu->domain)
> - rk_iommu_detach_device(iommu->domain, dev);
> -
> iommu->domain = domain;
>
> spin_lock_irqsave(&rk_domain->iommus_lock, flags);
> @@ -1150,13 +1166,11 @@ static void rk_iommu_remove_device(struct device *dev)
>
> static struct iommu_group *rk_iommu_device_group(struct device *dev)
> {
> - struct iommu_group *group;
> + struct rk_iommu *iommu;
>
> - group = iommu_group_get(dev);
> - if (!group)
> - group = iommu_group_alloc();
> + iommu = rk_iommu_from_dev(dev);
>
> - return group;
> + return iommu ? iommu->group : NULL;
> }
>
> static int rk_iommu_of_xlate(struct device *dev,
> @@ -1263,6 +1277,12 @@ static int rk_iommu_probe(struct platform_device *pdev)
> if (err)
> goto err_remove_sysfs;
>
> + iommu->group = iommu_group_alloc();
> + if (IS_ERR(iommu->group)) {
> + err = PTR_ERR(iommu->group);
> + goto err_unreg_iommu;
> + }
> +
> /*
> * Use the first registered IOMMU device for domain to use with DMA
> * API, since a domain might not physically correspond to a single
> @@ -1276,6 +1296,8 @@ static int rk_iommu_probe(struct platform_device *pdev)
> pm_runtime_enable(dev);
>
> return 0;
> +err_unreg_iommu:
> + iommu_device_unregister(&iommu->iommu);
> err_remove_sysfs:
> iommu_device_sysfs_remove(&iommu->iommu);
> err_put_clocks:
> @@ -1289,6 +1311,7 @@ static int rk_iommu_remove(struct platform_device *pdev)
>
> pm_runtime_disable(&pdev->dev);
>
> + iommu_group_put(iommu->group);
> iommu_device_unregister(&iommu->iommu);
> iommu_device_sysfs_remove(&iommu->iommu);
> rk_iommu_put_clocks(iommu);
>
Hi Robin,
On 01/17/2018 09:00 PM, Robin Murphy wrote:
> On 16/01/18 13:25, Jeffy Chen wrote:
>> There would be some masters sharing the same IOMMU device. Put them in
>> the same iommu group and share the same iommu domain.
>>
>> Signed-off-by: Jeffy Chen <[email protected]>
>> ---
>>
>> Changes in v2: None
>>
>> drivers/iommu/rockchip-iommu.c | 39
>> +++++++++++++++++++++++++++++++--------
>> 1 file changed, 31 insertions(+), 8 deletions(-)
>>
>> diff --git a/drivers/iommu/rockchip-iommu.c
>> b/drivers/iommu/rockchip-iommu.c
>> index c8de1456a016..6c316dd0dd2d 100644
>> --- a/drivers/iommu/rockchip-iommu.c
>> +++ b/drivers/iommu/rockchip-iommu.c
>> @@ -100,11 +100,13 @@ struct rk_iommu {
>> struct iommu_device iommu;
>> struct list_head node; /* entry in rk_iommu_domain.iommus */
>> struct iommu_domain *domain; /* domain to which iommu is
>> attached */
>> + struct iommu_group *group;
>> struct mutex pm_mutex; /* serializes power transitions */
>> };
>> struct rk_iommudata {
>> struct device_link *link; /* runtime PM link from IOMMU to
>> master */
>> + struct iommu_domain *domain; /* domain to which device is
>> attached */
>
> I don't see why this is needed - for example, mtk_iommu does the same
> thing without needing to track the active domain in more than one place.
>
> Fundamentally, for this kind of IOMMU without the notion of multiple
> translation contexts, the logic should look like:
>
> iommudrv_attach_device(dev, domain) {
> iommu = dev_get_iommu(dev);
> if (iommu->curr_domain != domain) {
> update_hw_state(iommu, domain);
> iommu->curr_domain = domain;
> }
> }
>
> which I think is essentially what you have anyway. When a group contains
> multiple devices, you update the IOMMU state for the first device, then
> calls for subsequent devices in the group do nothing since they see the
> IOMMU state is already up-to-date with the new domain.
>
right, will remove it.
> Robin.