Hi all,
Thanks to Christoph's latest series, I'm reminded that, if we're going
to give the ARM DMA ops some cleanup this cycle, it's as good a time as
any to dust off these old patches and add them on top as well. I've
based these on the arm-dma-direct branch which I assume matches the
patches posted at [1].
Cheers,
Robin.
[1] https://lore.kernel.org/linux-arm-kernel/[email protected]/
Robin Murphy (3):
ARM/dma-mapping: Drop .dma_supported for IOMMU ops
ARM/dma-mapping: Consolidate IOMMU ops callbacks
ARM/dma-mapping: Merge IOMMU ops
arch/arm/mm/dma-mapping.c | 286 +++++++++-----------------------------
1 file changed, 62 insertions(+), 224 deletions(-)
--
2.35.3.dirty
On Thu, Apr 21, 2022 at 12:36:56PM +0100, Robin Murphy wrote:
> Hi all,
>
> Thanks to Christoph's latest series, I'm reminded that, if we're going
> to give the ARM DMA ops some cleanup this cycle, it's as good a time as
> any to dust off these old patches and add them on top as well. I've
> based these on the arm-dma-direct branch which I assume matches the
> patches posted at [1].
All these do look sensible to me. But weren't you working on replacing
the ARM iommu dma_ops with dma-Ń–ommu anyway?
The dma_sync_* operations are now the only difference between the
coherent and non-coherent IOMMU ops. Some minor tweaks to make those
safe for coherent devices with minimal overhead, and we can condense
down to a single set of DMA ops.
Signed-off-by: Robin Murphy <[email protected]>
---
arch/arm/mm/dma-mapping.c | 37 +++++++++++++------------------------
1 file changed, 13 insertions(+), 24 deletions(-)
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 10e5e5800d78..dd46cce61579 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1341,6 +1341,9 @@ static void arm_iommu_sync_sg_for_cpu(struct device *dev,
struct scatterlist *s;
int i;
+ if (dev->dma_coherent)
+ return;
+
for_each_sg(sg, s, nents, i)
__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
@@ -1360,6 +1363,9 @@ static void arm_iommu_sync_sg_for_device(struct device *dev,
struct scatterlist *s;
int i;
+ if (dev->dma_coherent)
+ return;
+
for_each_sg(sg, s, nents, i)
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
}
@@ -1493,12 +1499,13 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev,
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t iova = handle & PAGE_MASK;
- struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
+ struct page *page;
unsigned int offset = handle & ~PAGE_MASK;
- if (!iova)
+ if (dev->dma_coherent || !iova)
return;
+ page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
__dma_page_dev_to_cpu(page, offset, size, dir);
}
@@ -1507,12 +1514,13 @@ static void arm_iommu_sync_single_for_device(struct device *dev,
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t iova = handle & PAGE_MASK;
- struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
+ struct page *page;
unsigned int offset = handle & ~PAGE_MASK;
- if (!iova)
+ if (dev->dma_coherent || !iova)
return;
+ page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
__dma_page_cpu_to_dev(page, offset, size, dir);
}
@@ -1536,22 +1544,6 @@ static const struct dma_map_ops iommu_ops = {
.unmap_resource = arm_iommu_unmap_resource,
};
-static const struct dma_map_ops iommu_coherent_ops = {
- .alloc = arm_iommu_alloc_attrs,
- .free = arm_iommu_free_attrs,
- .mmap = arm_iommu_mmap_attrs,
- .get_sgtable = arm_iommu_get_sgtable,
-
- .map_page = arm_iommu_map_page,
- .unmap_page = arm_iommu_unmap_page,
-
- .map_sg = arm_iommu_map_sg,
- .unmap_sg = arm_iommu_unmap_sg,
-
- .map_resource = arm_iommu_map_resource,
- .unmap_resource = arm_iommu_unmap_resource,
-};
-
/**
* arm_iommu_create_mapping
* @bus: pointer to the bus holding the client device (for IOMMU calls)
@@ -1750,10 +1742,7 @@ static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
return;
}
- if (coherent)
- set_dma_ops(dev, &iommu_coherent_ops);
- else
- set_dma_ops(dev, &iommu_ops);
+ set_dma_ops(dev, &iommu_ops);
}
static void arm_teardown_iommu_dma_ops(struct device *dev)
--
2.35.3.dirty
Merge the coherent and non-coherent callbacks down to a single
implementation each, relying on the generic dev->dma_coherent
flag at the points where the difference matters.
Signed-off-by: Robin Murphy <[email protected]>
---
arch/arm/mm/dma-mapping.c | 240 +++++++++-----------------------------
1 file changed, 56 insertions(+), 184 deletions(-)
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 6b0095b84a58..10e5e5800d78 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1079,13 +1079,13 @@ static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
__free_from_pool(cpu_addr, size);
}
-static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp, unsigned long attrs,
- int coherent_flag)
+static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
{
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
struct page **pages;
void *addr = NULL;
+ int coherent_flag = dev->dma_coherent ? COHERENT : NORMAL;
*handle = DMA_MAPPING_ERROR;
size = PAGE_ALIGN(size);
@@ -1128,19 +1128,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
return NULL;
}
-static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
-{
- return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL);
-}
-
-static void *arm_coherent_iommu_alloc_attrs(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
-{
- return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT);
-}
-
-static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
@@ -1154,35 +1142,24 @@ static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma
if (vma->vm_pgoff >= nr_pages)
return -ENXIO;
+ if (!dev->dma_coherent)
+ vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
+
err = vm_map_pages(vma, pages, nr_pages);
if (err)
pr_err("Remapping memory failed: %d\n", err);
return err;
}
-static int arm_iommu_mmap_attrs(struct device *dev,
- struct vm_area_struct *vma, void *cpu_addr,
- dma_addr_t dma_addr, size_t size, unsigned long attrs)
-{
- vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
-
- return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
-}
-
-static int arm_coherent_iommu_mmap_attrs(struct device *dev,
- struct vm_area_struct *vma, void *cpu_addr,
- dma_addr_t dma_addr, size_t size, unsigned long attrs)
-{
- return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
-}
/*
* free a page as defined by the above mapping.
* Must not be called with IRQs disabled.
*/
-static void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t handle, unsigned long attrs, int coherent_flag)
+static void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle, unsigned long attrs)
{
+ int coherent_flag = dev->dma_coherent ? COHERENT : NORMAL;
struct page **pages;
size = PAGE_ALIGN(size);
@@ -1204,19 +1181,6 @@ static void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_ad
__iommu_free_buffer(dev, pages, size, attrs);
}
-static void arm_iommu_free_attrs(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t handle,
- unsigned long attrs)
-{
- __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL);
-}
-
-static void arm_coherent_iommu_free_attrs(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t handle, unsigned long attrs)
-{
- __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT);
-}
-
static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr,
size_t size, unsigned long attrs)
@@ -1236,8 +1200,7 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
*/
static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
size_t size, dma_addr_t *handle,
- enum dma_data_direction dir, unsigned long attrs,
- bool is_coherent)
+ enum dma_data_direction dir, unsigned long attrs)
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t iova, iova_base;
@@ -1257,7 +1220,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
phys_addr_t phys = page_to_phys(sg_page(s));
unsigned int len = PAGE_ALIGN(s->offset + s->length);
- if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+ if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
prot = __dma_info_to_prot(dir, attrs);
@@ -1277,9 +1240,20 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
return ret;
}
-static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, unsigned long attrs,
- bool is_coherent)
+/**
+ * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map
+ * @dir: DMA transfer direction
+ *
+ * Map a set of buffers described by scatterlist in streaming mode for DMA.
+ * The scatter gather list elements are merged together (if possible) and
+ * tagged with the appropriate dma address and length. They are obtained via
+ * sg_dma_{address,length}.
+ */
+static int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir, unsigned long attrs)
{
struct scatterlist *s = sg, *dma = sg, *start = sg;
int i, count = 0, ret;
@@ -1294,8 +1268,7 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
ret = __map_sg_chunk(dev, start, size,
- &dma->dma_address, dir, attrs,
- is_coherent);
+ &dma->dma_address, dir, attrs);
if (ret < 0)
goto bad_mapping;
@@ -1309,8 +1282,7 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
}
size += s->length;
}
- ret = __map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs,
- is_coherent);
+ ret = __map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs);
if (ret < 0)
goto bad_mapping;
@@ -1327,76 +1299,6 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
return -EINVAL;
}
-/**
- * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA
- * @dev: valid struct device pointer
- * @sg: list of buffers
- * @nents: number of buffers to map
- * @dir: DMA transfer direction
- *
- * Map a set of i/o coherent buffers described by scatterlist in streaming
- * mode for DMA. The scatter gather list elements are merged together (if
- * possible) and tagged with the appropriate dma address and length. They are
- * obtained via sg_dma_{address,length}.
- */
-static int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, unsigned long attrs)
-{
- return __iommu_map_sg(dev, sg, nents, dir, attrs, true);
-}
-
-/**
- * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
- * @dev: valid struct device pointer
- * @sg: list of buffers
- * @nents: number of buffers to map
- * @dir: DMA transfer direction
- *
- * Map a set of buffers described by scatterlist in streaming mode for DMA.
- * The scatter gather list elements are merged together (if possible) and
- * tagged with the appropriate dma address and length. They are obtained via
- * sg_dma_{address,length}.
- */
-static int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, unsigned long attrs)
-{
- return __iommu_map_sg(dev, sg, nents, dir, attrs, false);
-}
-
-static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir,
- unsigned long attrs, bool is_coherent)
-{
- struct scatterlist *s;
- int i;
-
- for_each_sg(sg, s, nents, i) {
- if (sg_dma_len(s))
- __iommu_remove_mapping(dev, sg_dma_address(s),
- sg_dma_len(s));
- if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
- __dma_page_dev_to_cpu(sg_page(s), s->offset,
- s->length, dir);
- }
-}
-
-/**
- * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
- * @dev: valid struct device pointer
- * @sg: list of buffers
- * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
- * @dir: DMA transfer direction (same as was passed to dma_map_sg)
- *
- * Unmap a set of streaming mode DMA translations. Again, CPU access
- * rules concerning calls here are the same as for dma_unmap_single().
- */
-static void arm_coherent_iommu_unmap_sg(struct device *dev,
- struct scatterlist *sg, int nents, enum dma_data_direction dir,
- unsigned long attrs)
-{
- __iommu_unmap_sg(dev, sg, nents, dir, attrs, true);
-}
-
/**
* arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
* @dev: valid struct device pointer
@@ -1412,7 +1314,17 @@ static void arm_iommu_unmap_sg(struct device *dev,
enum dma_data_direction dir,
unsigned long attrs)
{
- __iommu_unmap_sg(dev, sg, nents, dir, attrs, false);
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i) {
+ if (sg_dma_len(s))
+ __iommu_remove_mapping(dev, sg_dma_address(s),
+ sg_dma_len(s));
+ if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ __dma_page_dev_to_cpu(sg_page(s), s->offset,
+ s->length, dir);
+ }
}
/**
@@ -1452,18 +1364,17 @@ static void arm_iommu_sync_sg_for_device(struct device *dev,
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
}
-
/**
- * arm_coherent_iommu_map_page
+ * arm_iommu_map_page
* @dev: valid struct device pointer
* @page: page that buffer resides in
* @offset: offset into page for start of buffer
* @size: size of buffer to map
* @dir: DMA transfer direction
*
- * Coherent IOMMU aware version of arm_dma_map_page()
+ * IOMMU aware version of arm_dma_map_page()
*/
-static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page,
+static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
@@ -1471,6 +1382,9 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
dma_addr_t dma_addr;
int ret, prot, len = PAGE_ALIGN(size + offset);
+ if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ __dma_page_cpu_to_dev(page, offset, size, dir);
+
dma_addr = __alloc_iova(mapping, len);
if (dma_addr == DMA_MAPPING_ERROR)
return dma_addr;
@@ -1487,50 +1401,6 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
return DMA_MAPPING_ERROR;
}
-/**
- * arm_iommu_map_page
- * @dev: valid struct device pointer
- * @page: page that buffer resides in
- * @offset: offset into page for start of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
- *
- * IOMMU aware version of arm_dma_map_page()
- */
-static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
- __dma_page_cpu_to_dev(page, offset, size, dir);
-
- return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs);
-}
-
-/**
- * arm_coherent_iommu_unmap_page
- * @dev: valid struct device pointer
- * @handle: DMA address of buffer
- * @size: size of buffer (same as passed to dma_map_page)
- * @dir: DMA transfer direction (same as passed to dma_map_page)
- *
- * Coherent IOMMU aware version of arm_dma_unmap_page()
- */
-static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir, unsigned long attrs)
-{
- struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
- dma_addr_t iova = handle & PAGE_MASK;
- int offset = handle & ~PAGE_MASK;
- int len = PAGE_ALIGN(size + offset);
-
- if (!iova)
- return;
-
- iommu_unmap(mapping->domain, iova, len);
- __free_iova(mapping, iova, len);
-}
-
/**
* arm_iommu_unmap_page
* @dev: valid struct device pointer
@@ -1545,15 +1415,17 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t iova = handle & PAGE_MASK;
- struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
+ struct page *page;
int offset = handle & ~PAGE_MASK;
int len = PAGE_ALIGN(size + offset);
if (!iova)
return;
- if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+ if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
+ page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
__dma_page_dev_to_cpu(page, offset, size, dir);
+ }
iommu_unmap(mapping->domain, iova, len);
__free_iova(mapping, iova, len);
@@ -1665,16 +1537,16 @@ static const struct dma_map_ops iommu_ops = {
};
static const struct dma_map_ops iommu_coherent_ops = {
- .alloc = arm_coherent_iommu_alloc_attrs,
- .free = arm_coherent_iommu_free_attrs,
- .mmap = arm_coherent_iommu_mmap_attrs,
+ .alloc = arm_iommu_alloc_attrs,
+ .free = arm_iommu_free_attrs,
+ .mmap = arm_iommu_mmap_attrs,
.get_sgtable = arm_iommu_get_sgtable,
- .map_page = arm_coherent_iommu_map_page,
- .unmap_page = arm_coherent_iommu_unmap_page,
+ .map_page = arm_iommu_map_page,
+ .unmap_page = arm_iommu_unmap_page,
- .map_sg = arm_coherent_iommu_map_sg,
- .unmap_sg = arm_coherent_iommu_unmap_sg,
+ .map_sg = arm_iommu_map_sg,
+ .unmap_sg = arm_iommu_unmap_sg,
.map_resource = arm_iommu_map_resource,
.unmap_resource = arm_iommu_unmap_resource,
--
2.35.3.dirty
On Tue, Aug 30, 2022 at 04:36:19PM +0100, Robin Murphy wrote:
> On 2022-08-30 16:19, Yongqin Liu wrote:
> > Hi, Robin
> >
> > Thanks for the kind reply!
> >
> > On Tue, 30 Aug 2022 at 17:48, Robin Murphy <[email protected]> wrote:
> > >
> > > On 2022-08-27 13:24, Yongqin Liu wrote:
> > > > Hi, Robin, Christoph
> > > >
> > > > With the changes landed in the mainline kernel,
> > > > one problem is exposed with our out of tree pvr module.
> > > > Like the source here[1], arm_dma_ops.sync_single_for_cpu is called in
> > > > the format like the following:
> > > > arm_dma_ops.sync_single_for_cpu(NULL, pStart, pEnd - pStart,
> > > > DMA_FROM_DEVICE);
> > > >
> > > > Not sure if you could give some suggestions on what I should do next
> > > > to make the pvr module work again.
> > >
> > > Wow, that driver reinvents so many standard APIs for no apparent reason
> > > it's not even funny.
> > >
> > > Anyway, from a brief look it seemingly already knows how to call the DMA
> > > API semi-correctly, so WTF that's doing behind an #ifdef, who knows?
> > > However it's still so completely wrong in general - fundamentally broken
> > > AArch64 set/way cache maintenance!? - that it looks largely beyond help.
> > > "Throw CONFIG_DMA_API_DEBUG at it and cry" is about the extent of
> > > support I'm prepared to provide for that mess.
> >
> > For the moment, I do not care about the AArch64 lines, like if we only
> > say the following two lines:
> > arm_dma_ops.sync_single_for_device(NULL, pStart, pEnd - pStart,
> > DMA_TO_DEVICE);
> > arm_dma_ops.sync_single_for_cpu(NULL, pStart, pEnd - pStart,
> > DMA_FROM_DEVICE);
> >
> > Could you please give some suggestions for that?
>
> Remove them. Then remove the #ifdef __arch64__ too, since the code under
> there is doing a passable impression of generic DMA API usage, as long as
> one ignores the bigger picture.
>
> arm64 already uses dma-direct. To say you don't care about the arm64 code
> when asking how to deal with ARM having now been converted to use dma-direct
> as well is supremely missing the point.
It should also be pointed out that this culture of bodging around the
DMA API by graphics drivers is entirely their own problem. If they used
the proper interfaces that the kernel provides (like the DMA API) rather
than thinking "I need to flush the caches in such-and-such a way here"
so I need to find a function somewhere in the kernel's interfaces that
I can get to in order to achieve that, and I don't care how that's done
then maybe their code wouldn't keep breaking.
This is really not our problem to solve.
This is not limited to just PVR. I've seen it with other stuff as well,
and it's the reason I was not in favour of exposing the dmac_*
functions that we have in arch/arm/mm - which are part of the DMA API
implementation, being moved into a header file. One can see from PVR
that they also made use of these before I intentionally hid them from
driver modules.
Basically, out of tree graphics drivers will bodge around to get access
to the specific cache manangement that they want to use - even if it
means abusing stuff that may mean that their crappy drivers break when
we make later changes.
As I say, it's entirely _their_ problem to solve if they don't want to
use our official interfaces. Or, they can decide to use our official
interfaces, which would be nice.
--
RMK's Patch system: https://www.armlinux.org.uk/developer/patches/
FTTP is here! 40Mbps down 10Mbps up. Decent connectivity at last!