iommu/ipmmu-vmsa: IPMMU multi-arch update V2
[PATCH v2 01/04] iommu/ipmmu-vmsa: Remove platform data handling
[PATCH v2 02/04] iommu/ipmmu-vmsa: Rework interrupt code and use bitmap for context
[PATCH v2 03/04] iommu/ipmmu-vmsa: Break out 32-bit ARM mapping code
[PATCH v2 04/04] iommu/ipmmu-vmsa: Drop LPAE Kconfig dependency
These patches update the IPMMU driver with a few minor changes
to support build on multiple architectures.
With these patches applied the driver is known to compile without issues
on 32-bit ARM, 64-bit ARM and x86_64.
Changes since V1:
- Got rid of patch 2 and 3 from initial series
- Updated bitmap code locking and also used lighter bitop functions
- Updated the Kconfig bits to apply on top of ARCH_RENESAS
Signed-off-by: Magnus Damm <[email protected]>
---
Built on top of next-20160314
drivers/iommu/Kconfig | 1
drivers/iommu/ipmmu-vmsa.c | 146 +++++++++++++++++++++++++++++---------------
2 files changed, 97 insertions(+), 50 deletions(-)
From: Magnus Damm <[email protected]>
The IPMMU driver is using DT these days, and platform data is no longer
used by the driver. Remove unused code.
Signed-off-by: Magnus Damm <[email protected]>
Reviewed-by: Laurent Pinchart <[email protected]>
---
Changes since V1:
- Added Reviewed-by from Laurent
drivers/iommu/ipmmu-vmsa.c | 5 -----
1 file changed, 5 deletions(-)
--- 0001/drivers/iommu/ipmmu-vmsa.c
+++ work/drivers/iommu/ipmmu-vmsa.c 2016-03-15 10:59:25.590513000 +0900
@@ -766,11 +766,6 @@ static int ipmmu_probe(struct platform_d
int irq;
int ret;
- if (!IS_ENABLED(CONFIG_OF) && !pdev->dev.platform_data) {
- dev_err(&pdev->dev, "missing platform data\n");
- return -EINVAL;
- }
-
mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL);
if (!mmu) {
dev_err(&pdev->dev, "cannot allocate device data\n");
From: Magnus Damm <[email protected]>
Introduce a bitmap for context handing and convert the
interrupt routine to go handle all registered contexts.
At this point the number of contexts are still limited.
Also remove the use of the ARM specific mapping variable
from ipmmu_irq() to allow compile on ARM64.
Signed-off-by: Magnus Damm <[email protected]>
---
Changes since V1: (Thanks to Laurent for feedback!)
- Use simple find_first_zero()/set_bit()/clear_bit() for context management.
- For allocation rely on spinlock held when calling ipmmu_domain_init_context()
- For test/free use atomic bitops
- Return IRQ_HANDLED if any of the contexts generated interrupts
drivers/iommu/ipmmu-vmsa.c | 47 ++++++++++++++++++++++++++++++++------------
1 file changed, 35 insertions(+), 12 deletions(-)
--- 0003/drivers/iommu/ipmmu-vmsa.c
+++ work/drivers/iommu/ipmmu-vmsa.c 2016-03-15 12:42:18.940513000 +0900
@@ -8,6 +8,7 @@
* the Free Software Foundation; version 2 of the License.
*/
+#include <linux/bitmap.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
@@ -26,12 +27,16 @@
#include "io-pgtable.h"
+#define IPMMU_CTX_MAX 1
+
struct ipmmu_vmsa_device {
struct device *dev;
void __iomem *base;
struct list_head list;
unsigned int num_utlbs;
+ DECLARE_BITMAP(ctx, IPMMU_CTX_MAX);
+ struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX];
struct dma_iommu_mapping *mapping;
};
@@ -296,6 +301,7 @@ static struct iommu_gather_ops ipmmu_gat
static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
{
u64 ttbr;
+ int ret;
/*
* Allocate the page table operations.
@@ -325,10 +331,17 @@ static int ipmmu_domain_init_context(str
return -EINVAL;
/*
- * TODO: When adding support for multiple contexts, find an unused
- * context.
+ * Find an unused context.
*/
- domain->context_id = 0;
+ ret = find_first_zero_bit(domain->mmu->ctx, IPMMU_CTX_MAX);
+ if (ret == IPMMU_CTX_MAX) {
+ free_io_pgtable_ops(domain->iop);
+ return -EBUSY;
+ }
+
+ domain->context_id = ret;
+ domain->mmu->domains[ret] = domain;
+ set_bit(ret, domain->mmu->ctx);
/* TTBR0 */
ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0];
@@ -372,6 +385,8 @@ static int ipmmu_domain_init_context(str
static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
{
+ clear_bit(domain->context_id, domain->mmu->ctx);
+
/*
* Disable the context. Flush the TLB as required when modifying the
* context registers.
@@ -389,10 +404,15 @@ static void ipmmu_domain_destroy_context
static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
{
const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF;
- struct ipmmu_vmsa_device *mmu = domain->mmu;
+ struct ipmmu_vmsa_device *mmu;
u32 status;
u32 iova;
+ if (!domain)
+ return IRQ_NONE;
+
+ mmu = domain->mmu;
+
status = ipmmu_ctx_read(domain, IMSTR);
if (!(status & err_mask))
return IRQ_NONE;
@@ -437,16 +457,18 @@ static irqreturn_t ipmmu_domain_irq(stru
static irqreturn_t ipmmu_irq(int irq, void *dev)
{
struct ipmmu_vmsa_device *mmu = dev;
- struct iommu_domain *io_domain;
- struct ipmmu_vmsa_domain *domain;
-
- if (!mmu->mapping)
- return IRQ_NONE;
+ irqreturn_t status = IRQ_NONE;
+ unsigned int i;
- io_domain = mmu->mapping->domain;
- domain = to_vmsa_domain(io_domain);
+ /* Check interrupts for all active contexts */
+ for (i = 0; i < IPMMU_CTX_MAX; i++) {
+ if (!test_bit(i, mmu->ctx))
+ continue;
+ if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED)
+ status = IRQ_HANDLED;
+ }
- return ipmmu_domain_irq(domain);
+ return status;
}
/* -----------------------------------------------------------------------------
@@ -774,6 +796,7 @@ static int ipmmu_probe(struct platform_d
mmu->dev = &pdev->dev;
mmu->num_utlbs = 32;
+ bitmap_zero(mmu->ctx, IPMMU_CTX_MAX);
/* Map I/O memory and request IRQ. */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
From: Magnus Damm <[email protected]>
Make the driver compile on more than just 32-bit ARM
by breaking out and wrapping ARM specific functions
in #ifdefs. Not pretty, but needed to be able to use
the driver on other architectures like ARM64.
Signed-off-by: Magnus Damm <[email protected]>
---
Changes since V1:
- Rebased to work without patch 2 and 3 from V1 series
drivers/iommu/ipmmu-vmsa.c | 94 +++++++++++++++++++++++++++++---------------
1 file changed, 62 insertions(+), 32 deletions(-)
--- 0004/drivers/iommu/ipmmu-vmsa.c
+++ work/drivers/iommu/ipmmu-vmsa.c 2016-03-15 12:25:45.040513000 +0900
@@ -22,8 +22,10 @@
#include <linux/sizes.h>
#include <linux/slab.h>
+#ifdef CONFIG_ARM
#include <asm/dma-iommu.h>
#include <asm/pgalloc.h>
+#endif
#include "io-pgtable.h"
@@ -38,7 +40,9 @@ struct ipmmu_vmsa_device {
DECLARE_BITMAP(ctx, IPMMU_CTX_MAX);
struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX];
+#ifdef CONFIG_ARM
struct dma_iommu_mapping *mapping;
+#endif
};
struct ipmmu_vmsa_domain {
@@ -615,6 +619,60 @@ static int ipmmu_find_utlbs(struct ipmmu
return 0;
}
+#ifdef CONFIG_ARM
+static int ipmmu_map_attach(struct device *dev, struct ipmmu_vmsa_device *mmu)
+{
+ int ret;
+
+ /*
+ * Create the ARM mapping, used by the ARM DMA mapping core to allocate
+ * VAs. This will allocate a corresponding IOMMU domain.
+ *
+ * TODO:
+ * - Create one mapping per context (TLB).
+ * - Make the mapping size configurable ? We currently use a 2GB mapping
+ * at a 1GB offset to ensure that NULL VAs will fault.
+ */
+ if (!mmu->mapping) {
+ struct dma_iommu_mapping *mapping;
+
+ mapping = arm_iommu_create_mapping(&platform_bus_type,
+ SZ_1G, SZ_2G);
+ if (IS_ERR(mapping)) {
+ dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
+ return PTR_ERR(mapping);
+ }
+
+ mmu->mapping = mapping;
+ }
+
+ /* Attach the ARM VA mapping to the device. */
+ ret = arm_iommu_attach_device(dev, mmu->mapping);
+ if (ret < 0) {
+ dev_err(dev, "Failed to attach device to VA mapping\n");
+ arm_iommu_release_mapping(mmu->mapping);
+ }
+
+ return ret;
+}
+static inline void ipmmu_detach(struct device *dev)
+{
+ arm_iommu_detach_device(dev);
+}
+static inline void ipmmu_release_mapping(struct ipmmu_vmsa_device *mmu)
+{
+ arm_iommu_release_mapping(mmu->mapping);
+}
+#else
+static inline int ipmmu_map_attach(struct device *dev,
+ struct ipmmu_vmsa_device *mmu)
+{
+ return 0;
+}
+static inline void ipmmu_detach(struct device *dev) {}
+static inline void ipmmu_release_mapping(struct ipmmu_vmsa_device *mmu) {}
+#endif
+
static int ipmmu_add_device(struct device *dev)
{
struct ipmmu_vmsa_archdata *archdata;
@@ -695,41 +753,13 @@ static int ipmmu_add_device(struct devic
archdata->num_utlbs = num_utlbs;
dev->archdata.iommu = archdata;
- /*
- * Create the ARM mapping, used by the ARM DMA mapping core to allocate
- * VAs. This will allocate a corresponding IOMMU domain.
- *
- * TODO:
- * - Create one mapping per context (TLB).
- * - Make the mapping size configurable ? We currently use a 2GB mapping
- * at a 1GB offset to ensure that NULL VAs will fault.
- */
- if (!mmu->mapping) {
- struct dma_iommu_mapping *mapping;
-
- mapping = arm_iommu_create_mapping(&platform_bus_type,
- SZ_1G, SZ_2G);
- if (IS_ERR(mapping)) {
- dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
- ret = PTR_ERR(mapping);
- goto error;
- }
-
- mmu->mapping = mapping;
- }
-
- /* Attach the ARM VA mapping to the device. */
- ret = arm_iommu_attach_device(dev, mmu->mapping);
- if (ret < 0) {
- dev_err(dev, "Failed to attach device to VA mapping\n");
+ ret = ipmmu_map_attach(dev, mmu);
+ if (ret < 0)
goto error;
- }
return 0;
error:
- arm_iommu_release_mapping(mmu->mapping);
-
kfree(dev->archdata.iommu);
kfree(utlbs);
@@ -745,7 +775,7 @@ static void ipmmu_remove_device(struct d
{
struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
- arm_iommu_detach_device(dev);
+ ipmmu_detach(dev);
iommu_group_remove_device(dev);
kfree(archdata->utlbs);
@@ -856,7 +886,7 @@ static int ipmmu_remove(struct platform_
list_del(&mmu->list);
spin_unlock(&ipmmu_devices_lock);
- arm_iommu_release_mapping(mmu->mapping);
+ ipmmu_release_mapping(mmu);
ipmmu_device_reset(mmu);
From: Magnus Damm <[email protected]>
Neither the ARM page table code enabled by IOMMU_IO_PGTABLE_LPAE
nor the IPMMU_VMSA driver actually depends on ARM_LPAE, so get
rid of the dependency.
Tested with ipmmu-vmsa on r8a7794 ALT and a kernel config using:
# CONFIG_ARM_LPAE is not set
Signed-off-by: Magnus Damm <[email protected]>
Acked-by: Laurent Pinchart <[email protected]>
---
Changes since V1:
- Rebased on top of ARCH_RENESAS change
- Added Acked-by from Laurent
This time the result also compiles on x86. Need to be
applied as last patch in the following series:
[PATCH v2 00/04] iommu/ipmmu-vmsa: IPMMU multi-arch update V2
drivers/iommu/Kconfig | 1 -
1 file changed, 1 deletion(-)
--- 0001/drivers/iommu/Kconfig
+++ work/drivers/iommu/Kconfig 2016-03-15 12:28:45.210513000 +0900
@@ -284,7 +284,6 @@ config EXYNOS_IOMMU_DEBUG
config IPMMU_VMSA
bool "Renesas VMSA-compatible IPMMU"
- depends on ARM_LPAE
depends on ARCH_RENESAS || COMPILE_TEST
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE
Hi Magnus,
Thank you for the patch.
On Tuesday 15 March 2016 13:22:04 Magnus Damm wrote:
> From: Magnus Damm <[email protected]>
>
> Make the driver compile on more than just 32-bit ARM
> by breaking out and wrapping ARM specific functions
> in #ifdefs. Not pretty, but needed to be able to use
> the driver on other architectures like ARM64.
Given that the callers of the new ARM-specific functions will, after patch
"[PATCH 04/04] iommu/ipmmu-vmsa: Add new IOMMU_DOMAIN_DMA ops", be used only
on !CONFIG_IOMMU_DMA, and that ARM64 selects CONFIG_IOMMU_DMA, wouldn't it
make more sense not to extract the ARM-specific code in separate functions but
compile-out the whole ipmmu_ops structure and all the related operation
handlers when !CONFIG_IOMMU_DMA ?
I would order the patches as follows.
[PATCH v2 01/04] iommu/ipmmu-vmsa: Remove platform data handling
[PATCH v2 02/04] iommu/ipmmu-vmsa: Rework interrupt code and use bitmap for
context
[PATCH v2 04/04] iommu/ipmmu-vmsa: Drop LPAE Kconfig dependency
[PATCH 02/04] iommu/ipmmu-vmsa: Break out utlb parsing code
[PATCH 03/04] iommu/ipmmu-vmsa: Break out domain allocation code
and then squash the following two patches, with conditional compilation for
ipmmu_ops.
[PATCH 01/04] iommu/ipmmu-vmsa: 32-bit ARM may have CONFIG_IOMMU_DMA=y
[PATCH 04/04] iommu/ipmmu-vmsa: Add new IOMMU_DOMAIN_DMA ops
I think the result will be cleaner.
Please see below for a couple of other comments.
> Signed-off-by: Magnus Damm <[email protected]>
> ---
>
> Changes since V1:
> - Rebased to work without patch 2 and 3 from V1 series
>
> drivers/iommu/ipmmu-vmsa.c | 94 +++++++++++++++++++++++++++-------------
> 1 file changed, 62 insertions(+), 32 deletions(-)
>
> --- 0004/drivers/iommu/ipmmu-vmsa.c
> +++ work/drivers/iommu/ipmmu-vmsa.c 2016-03-15 12:25:45.040513000 +0900
> @@ -22,8 +22,10 @@
> #include <linux/sizes.h>
> #include <linux/slab.h>
>
> +#ifdef CONFIG_ARM
> #include <asm/dma-iommu.h>
> #include <asm/pgalloc.h>
> +#endif
>
> #include "io-pgtable.h"
>
> @@ -38,7 +40,9 @@ struct ipmmu_vmsa_device {
> DECLARE_BITMAP(ctx, IPMMU_CTX_MAX);
> struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX];
>
> +#ifdef CONFIG_ARM
> struct dma_iommu_mapping *mapping;
> +#endif
> };
>
> struct ipmmu_vmsa_domain {
> @@ -615,6 +619,60 @@ static int ipmmu_find_utlbs(struct ipmmu
> return 0;
> }
>
> +#ifdef CONFIG_ARM
> +static int ipmmu_map_attach(struct device *dev, struct ipmmu_vmsa_device
> *mmu)
> +{
> + int ret;
> +
> + /*
> + * Create the ARM mapping, used by the ARM DMA mapping core to allocate
> + * VAs. This will allocate a corresponding IOMMU domain.
> + *
> + * TODO:
> + * - Create one mapping per context (TLB).
> + * - Make the mapping size configurable ? We currently use a 2GB mapping
> + * at a 1GB offset to ensure that NULL VAs will fault.
> + */
> + if (!mmu->mapping) {
> + struct dma_iommu_mapping *mapping;
> +
> + mapping = arm_iommu_create_mapping(&platform_bus_type,
> + SZ_1G, SZ_2G);
> + if (IS_ERR(mapping)) {
> + dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
> + return PTR_ERR(mapping);
> + }
> +
> + mmu->mapping = mapping;
> + }
> +
> + /* Attach the ARM VA mapping to the device. */
> + ret = arm_iommu_attach_device(dev, mmu->mapping);
> + if (ret < 0) {
> + dev_err(dev, "Failed to attach device to VA mapping\n");
> + arm_iommu_release_mapping(mmu->mapping);
> + }
> +
> + return ret;
> +}
How about adding a blank line here ?
> +static inline void ipmmu_detach(struct device *dev)
> +{
> + arm_iommu_detach_device(dev);
> +}
And another one here ?
> +static inline void ipmmu_release_mapping(struct ipmmu_vmsa_device *mmu)
> +{
> + arm_iommu_release_mapping(mmu->mapping);
> +}
> +#else
> +static inline int ipmmu_map_attach(struct device *dev,
> + struct ipmmu_vmsa_device *mmu)
> +{
> + return 0;
> +}
> +static inline void ipmmu_detach(struct device *dev) {}
> +static inline void ipmmu_release_mapping(struct ipmmu_vmsa_device *mmu) {}
The compiler should be smart enough to inline all these functions if needed.
> +#endif
> +
> static int ipmmu_add_device(struct device *dev)
> {
> struct ipmmu_vmsa_archdata *archdata;
> @@ -695,41 +753,13 @@ static int ipmmu_add_device(struct devic
> archdata->num_utlbs = num_utlbs;
> dev->archdata.iommu = archdata;
>
> - /*
> - * Create the ARM mapping, used by the ARM DMA mapping core to allocate
> - * VAs. This will allocate a corresponding IOMMU domain.
> - *
> - * TODO:
> - * - Create one mapping per context (TLB).
> - * - Make the mapping size configurable ? We currently use a 2GB mapping
> - * at a 1GB offset to ensure that NULL VAs will fault.
> - */
> - if (!mmu->mapping) {
> - struct dma_iommu_mapping *mapping;
> -
> - mapping = arm_iommu_create_mapping(&platform_bus_type,
> - SZ_1G, SZ_2G);
> - if (IS_ERR(mapping)) {
> - dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
> - ret = PTR_ERR(mapping);
> - goto error;
> - }
> -
> - mmu->mapping = mapping;
> - }
> -
> - /* Attach the ARM VA mapping to the device. */
> - ret = arm_iommu_attach_device(dev, mmu->mapping);
> - if (ret < 0) {
> - dev_err(dev, "Failed to attach device to VA mapping\n");
> + ret = ipmmu_map_attach(dev, mmu);
> + if (ret < 0)
> goto error;
> - }
>
> return 0;
>
> error:
> - arm_iommu_release_mapping(mmu->mapping);
> -
> kfree(dev->archdata.iommu);
> kfree(utlbs);
>
> @@ -745,7 +775,7 @@ static void ipmmu_remove_device(struct d
> {
> struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
>
> - arm_iommu_detach_device(dev);
> + ipmmu_detach(dev);
> iommu_group_remove_device(dev);
>
> kfree(archdata->utlbs);
> @@ -856,7 +886,7 @@ static int ipmmu_remove(struct platform_
> list_del(&mmu->list);
> spin_unlock(&ipmmu_devices_lock);
>
> - arm_iommu_release_mapping(mmu->mapping);
> + ipmmu_release_mapping(mmu);
>
> ipmmu_device_reset(mmu);
--
Regards,
Laurent Pinchart
Hi Magnus,
Thank you for the patch.
On Tuesday 15 March 2016 13:21:55 Magnus Damm wrote:
> From: Magnus Damm <[email protected]>
>
> Introduce a bitmap for context handing and convert the
> interrupt routine to go handle all registered contexts.
>
> At this point the number of contexts are still limited.
>
> Also remove the use of the ARM specific mapping variable
> from ipmmu_irq() to allow compile on ARM64.
>
> Signed-off-by: Magnus Damm <[email protected]>
> ---
>
> Changes since V1: (Thanks to Laurent for feedback!)
> - Use simple find_first_zero()/set_bit()/clear_bit() for context
> management.
> - For allocation rely on spinlock held when calling
> ipmmu_domain_init_context()
I'm afraid this is still racy. That spinlock belongs to the domain, and we
have multiple domains. You need to add a new lock in the ipmmu_vmsa_device
structure.
> - For test/free use atomic bitops
> - Return IRQ_HANDLED if any of the contexts generated interrupts
>
> drivers/iommu/ipmmu-vmsa.c | 47 +++++++++++++++++++++++++++++------------
> 1 file changed, 35 insertions(+), 12 deletions(-)
>
> --- 0003/drivers/iommu/ipmmu-vmsa.c
> +++ work/drivers/iommu/ipmmu-vmsa.c 2016-03-15 12:42:18.940513000 +0900
> @@ -8,6 +8,7 @@
> * the Free Software Foundation; version 2 of the License.
> */
>
> +#include <linux/bitmap.h>
> #include <linux/delay.h>
> #include <linux/dma-mapping.h>
> #include <linux/err.h>
> @@ -26,12 +27,16 @@
>
> #include "io-pgtable.h"
>
> +#define IPMMU_CTX_MAX 1
> +
> struct ipmmu_vmsa_device {
> struct device *dev;
> void __iomem *base;
> struct list_head list;
>
> unsigned int num_utlbs;
> + DECLARE_BITMAP(ctx, IPMMU_CTX_MAX);
> + struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX];
>
> struct dma_iommu_mapping *mapping;
> };
> @@ -296,6 +301,7 @@ static struct iommu_gather_ops ipmmu_gat
> static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
> {
> u64 ttbr;
> + int ret;
>
> /*
> * Allocate the page table operations.
> @@ -325,10 +331,17 @@ static int ipmmu_domain_init_context(str
> return -EINVAL;
>
> /*
> - * TODO: When adding support for multiple contexts, find an unused
> - * context.
> + * Find an unused context.
> */
> - domain->context_id = 0;
> + ret = find_first_zero_bit(domain->mmu->ctx, IPMMU_CTX_MAX);
> + if (ret == IPMMU_CTX_MAX) {
> + free_io_pgtable_ops(domain->iop);
> + return -EBUSY;
> + }
> +
> + domain->context_id = ret;
> + domain->mmu->domains[ret] = domain;
> + set_bit(ret, domain->mmu->ctx);
>
> /* TTBR0 */
> ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0];
> @@ -372,6 +385,8 @@ static int ipmmu_domain_init_context(str
>
> static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
> {
> + clear_bit(domain->context_id, domain->mmu->ctx);
> +
> /*
> * Disable the context. Flush the TLB as required when modifying the
> * context registers.
> @@ -389,10 +404,15 @@ static void ipmmu_domain_destroy_context
> static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
> {
> const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF;
> - struct ipmmu_vmsa_device *mmu = domain->mmu;
> + struct ipmmu_vmsa_device *mmu;
> u32 status;
> u32 iova;
>
> + if (!domain)
> + return IRQ_NONE;
Can this happen, as you test for the corresponding context bit before calling
this function ?
> +
> + mmu = domain->mmu;
> +
> status = ipmmu_ctx_read(domain, IMSTR);
> if (!(status & err_mask))
> return IRQ_NONE;
> @@ -437,16 +457,18 @@ static irqreturn_t ipmmu_domain_irq(stru
> static irqreturn_t ipmmu_irq(int irq, void *dev)
> {
> struct ipmmu_vmsa_device *mmu = dev;
> - struct iommu_domain *io_domain;
> - struct ipmmu_vmsa_domain *domain;
> -
> - if (!mmu->mapping)
> - return IRQ_NONE;
> + irqreturn_t status = IRQ_NONE;
> + unsigned int i;
>
> - io_domain = mmu->mapping->domain;
> - domain = to_vmsa_domain(io_domain);
> + /* Check interrupts for all active contexts */
Nitpicking, could you add a period at the end of the sentence to match the
existing comment style ?
> + for (i = 0; i < IPMMU_CTX_MAX; i++) {
> + if (!test_bit(i, mmu->ctx))
test_bit() isn't atomic. Let's use explicit locking in every location where
the contexts bitmap is accessed in a racy way.
> + continue;
> + if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED)
> + status = IRQ_HANDLED;
> + }
>
> - return ipmmu_domain_irq(domain);
> + return status;
> }
>
> /* ------------------------------------------------------------------------
> @@ -774,6 +796,7 @@ static int ipmmu_probe(struct platform_d
>
> mmu->dev = &pdev->dev;
> mmu->num_utlbs = 32;
> + bitmap_zero(mmu->ctx, IPMMU_CTX_MAX);
>
> /* Map I/O memory and request IRQ. */
> res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
--
Regards,
Laurent Pinchart
Hi Magnus,
On 15/03/16 04:22, Magnus Damm wrote:
> From: Magnus Damm <[email protected]>
>
> Make the driver compile on more than just 32-bit ARM
> by breaking out and wrapping ARM specific functions
> in #ifdefs. Not pretty, but needed to be able to use
> the driver on other architectures like ARM64.
>
> Signed-off-by: Magnus Damm <[email protected]>
> ---
>
> Changes since V1:
> - Rebased to work without patch 2 and 3 from V1 series
>
> drivers/iommu/ipmmu-vmsa.c | 94 +++++++++++++++++++++++++++++---------------
> 1 file changed, 62 insertions(+), 32 deletions(-)
>
> --- 0004/drivers/iommu/ipmmu-vmsa.c
> +++ work/drivers/iommu/ipmmu-vmsa.c 2016-03-15 12:25:45.040513000 +0900
> @@ -22,8 +22,10 @@
> #include <linux/sizes.h>
> #include <linux/slab.h>
>
> +#ifdef CONFIG_ARM
> #include <asm/dma-iommu.h>
> #include <asm/pgalloc.h>
> +#endif
>
> #include "io-pgtable.h"
>
> @@ -38,7 +40,9 @@ struct ipmmu_vmsa_device {
> DECLARE_BITMAP(ctx, IPMMU_CTX_MAX);
> struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX];
>
> +#ifdef CONFIG_ARM
> struct dma_iommu_mapping *mapping;
> +#endif
> };
>
> struct ipmmu_vmsa_domain {
> @@ -615,6 +619,60 @@ static int ipmmu_find_utlbs(struct ipmmu
> return 0;
> }
>
> +#ifdef CONFIG_ARM
> +static int ipmmu_map_attach(struct device *dev, struct ipmmu_vmsa_device *mmu)
> +{
> + int ret;
> +
> + /*
> + * Create the ARM mapping, used by the ARM DMA mapping core to allocate
> + * VAs. This will allocate a corresponding IOMMU domain.
> + *
> + * TODO:
> + * - Create one mapping per context (TLB).
> + * - Make the mapping size configurable ? We currently use a 2GB mapping
> + * at a 1GB offset to ensure that NULL VAs will fault.
> + */
> + if (!mmu->mapping) {
> + struct dma_iommu_mapping *mapping;
> +
> + mapping = arm_iommu_create_mapping(&platform_bus_type,
> + SZ_1G, SZ_2G);
> + if (IS_ERR(mapping)) {
> + dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
> + return PTR_ERR(mapping);
> + }
> +
> + mmu->mapping = mapping;
> + }
> +
> + /* Attach the ARM VA mapping to the device. */
> + ret = arm_iommu_attach_device(dev, mmu->mapping);
> + if (ret < 0) {
> + dev_err(dev, "Failed to attach device to VA mapping\n");
> + arm_iommu_release_mapping(mmu->mapping);
> + }
> +
> + return ret;
> +}
This looks an awful lot like what the IOMMU core code now does
automatically via the (relatively new) default domain mechanism. I
suspect things might end up a fair bit simpler if you create the ARM
mapping in domain_alloc when asked for an IOMMU_DOMAIN_DMA domain. While
you're still using a single context, sticking all the client devices in
the same group will then keep everything together automatically (see
mtk_iommu.c in -next for an example) to retain the existing behaviour.
Since those mechanisms are all architecture-independent, that should
help minimise the mess when accommodating arm64 later.
Robin.
> +static inline void ipmmu_detach(struct device *dev)
> +{
> + arm_iommu_detach_device(dev);
> +}
> +static inline void ipmmu_release_mapping(struct ipmmu_vmsa_device *mmu)
> +{
> + arm_iommu_release_mapping(mmu->mapping);
> +}
> +#else
> +static inline int ipmmu_map_attach(struct device *dev,
> + struct ipmmu_vmsa_device *mmu)
> +{
> + return 0;
> +}
> +static inline void ipmmu_detach(struct device *dev) {}
> +static inline void ipmmu_release_mapping(struct ipmmu_vmsa_device *mmu) {}
> +#endif
> +
> static int ipmmu_add_device(struct device *dev)
> {
> struct ipmmu_vmsa_archdata *archdata;
> @@ -695,41 +753,13 @@ static int ipmmu_add_device(struct devic
> archdata->num_utlbs = num_utlbs;
> dev->archdata.iommu = archdata;
>
> - /*
> - * Create the ARM mapping, used by the ARM DMA mapping core to allocate
> - * VAs. This will allocate a corresponding IOMMU domain.
> - *
> - * TODO:
> - * - Create one mapping per context (TLB).
> - * - Make the mapping size configurable ? We currently use a 2GB mapping
> - * at a 1GB offset to ensure that NULL VAs will fault.
> - */
> - if (!mmu->mapping) {
> - struct dma_iommu_mapping *mapping;
> -
> - mapping = arm_iommu_create_mapping(&platform_bus_type,
> - SZ_1G, SZ_2G);
> - if (IS_ERR(mapping)) {
> - dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
> - ret = PTR_ERR(mapping);
> - goto error;
> - }
> -
> - mmu->mapping = mapping;
> - }
> -
> - /* Attach the ARM VA mapping to the device. */
> - ret = arm_iommu_attach_device(dev, mmu->mapping);
> - if (ret < 0) {
> - dev_err(dev, "Failed to attach device to VA mapping\n");
> + ret = ipmmu_map_attach(dev, mmu);
> + if (ret < 0)
> goto error;
> - }
>
> return 0;
>
> error:
> - arm_iommu_release_mapping(mmu->mapping);
> -
> kfree(dev->archdata.iommu);
> kfree(utlbs);
>
> @@ -745,7 +775,7 @@ static void ipmmu_remove_device(struct d
> {
> struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
>
> - arm_iommu_detach_device(dev);
> + ipmmu_detach(dev);
> iommu_group_remove_device(dev);
>
> kfree(archdata->utlbs);
> @@ -856,7 +886,7 @@ static int ipmmu_remove(struct platform_
> list_del(&mmu->list);
> spin_unlock(&ipmmu_devices_lock);
>
> - arm_iommu_release_mapping(mmu->mapping);
> + ipmmu_release_mapping(mmu);
>
> ipmmu_device_reset(mmu);
>
> _______________________________________________
> iommu mailing list
> [email protected]
> https://lists.linuxfoundation.org/mailman/listinfo/iommu
>