Wouldn't the smal patch below solve the same issue in a simple way?
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index 640a7e63c453..e8f8447d705b 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -290,9 +290,11 @@ EXPORT_SYMBOL(dma_mmap_from_coherent);
static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
{
struct dma_coherent_mem *mem = rmem->priv;
+ dma_addr_t dev_addr = ((rmem->base >> PAGE_SHIFT) -
+ dev->dma_pfn_offset) << PAGE_SHIFT;
if (!mem &&
- !dma_init_coherent_memory(rmem->base, rmem->base, rmem->size,
+ !dma_init_coherent_memory(rmem->base, dev_addr, rmem->size,
DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE,
&mem)) {
pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
On 20/06/17 14:42, Christoph Hellwig wrote:
> Wouldn't the smal patch below solve the same issue in a simple way?
That assumes that all devices accessing a shared pool will have the same
dma_pfn_offset as the first one, which cannot strictly be relied upon
(even if it is highly likely in practice).
Robin.
> diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
> index 640a7e63c453..e8f8447d705b 100644
> --- a/drivers/base/dma-coherent.c
> +++ b/drivers/base/dma-coherent.c
> @@ -290,9 +290,11 @@ EXPORT_SYMBOL(dma_mmap_from_coherent);
> static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
> {
> struct dma_coherent_mem *mem = rmem->priv;
> + dma_addr_t dev_addr = ((rmem->base >> PAGE_SHIFT) -
> + dev->dma_pfn_offset) << PAGE_SHIFT;
>
> if (!mem &&
> - !dma_init_coherent_memory(rmem->base, rmem->base, rmem->size,
> + !dma_init_coherent_memory(rmem->base, dev_addr, rmem->size,
> DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE,
> &mem)) {
> pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
>
On 20/06/17 14:42, Christoph Hellwig wrote:
> Wouldn't the smal patch below solve the same issue in a simple way?
>
> diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
> index 640a7e63c453..e8f8447d705b 100644
> --- a/drivers/base/dma-coherent.c
> +++ b/drivers/base/dma-coherent.c
> @@ -290,9 +290,11 @@ EXPORT_SYMBOL(dma_mmap_from_coherent);
> static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
> {
> struct dma_coherent_mem *mem = rmem->priv;
> + dma_addr_t dev_addr = ((rmem->base >> PAGE_SHIFT) -
> + dev->dma_pfn_offset) << PAGE_SHIFT;
>
> if (!mem &&
> - !dma_init_coherent_memory(rmem->base, rmem->base, rmem->size,
> + !dma_init_coherent_memory(rmem->base, dev_addr, rmem->size,
> DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE,
> &mem)) {
> pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
>
Robin has already commented on this - dma_pfn_offset may vary across devices
and reserved memory region may be shared between them.
Cheers
Vladimir
On Tue, Jun 20, 2017 at 02:50:34PM +0100, Robin Murphy wrote:
> On 20/06/17 14:42, Christoph Hellwig wrote:
> > Wouldn't the smal patch below solve the same issue in a simple way?
>
> That assumes that all devices accessing a shared pool will have the same
> dma_pfn_offset as the first one, which cannot strictly be relied upon
> (even if it is highly likely in practice).
Which seems like another argument why the global pool should be
a different implementation / interface than the per-device dma
mapping helpers..