Hi Michal,
can you take a look at this patch that moves microblaze over to the
generic DMA remap allocator? I've been trying to slowly get all
architectures over to the generic code, and microblaze is one that
seems very straightfoward to convert.
Stop providing our own arch alloc/free hooks for nommu platforms and
just expose the segment offset and use the generic dma-direct
allocator.
Signed-off-by: Christoph Hellwig <[email protected]>
---
arch/microblaze/Kconfig | 2 +
arch/microblaze/mm/consistent.c | 93 +++++++++++++++------------------
2 files changed, 43 insertions(+), 52 deletions(-)
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index d411de05b628..a0d749c309f3 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -5,9 +5,11 @@ config MICROBLAZE
select ARCH_NO_SWAP
select ARCH_HAS_BINFMT_FLAT if !MMU
select ARCH_HAS_DMA_COHERENT_TO_PFN if MMU
+ select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
+ select ARCH_HAS_UNCACHED_SEGMENT if !MMU
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_NO_COHERENT_DMA_MMAP if !MMU
select ARCH_WANT_IPC_PARSE_VERSION
diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c
index bc7042209c57..1a859e8b58c2 100644
--- a/arch/microblaze/mm/consistent.c
+++ b/arch/microblaze/mm/consistent.c
@@ -42,21 +42,48 @@
#include <asm/cpuinfo.h>
#include <asm/tlbflush.h>
-#ifndef CONFIG_MMU
-/* I have to use dcache values because I can't relate on ram size */
-# define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1)
-#endif
+void arch_dma_prep_coherent(struct page *page, size_t size)
+{
+ phys_addr_t paddr = page_to_phys(page);
+
+ flush_dcache_range(paddr, paddr + size);
+}
+#ifndef CONFIG_MMU
/*
- * Consistent memory allocators. Used for DMA devices that want to
- * share uncached memory with the processor core.
- * My crufty no-MMU approach is simple. In the HW platform we can optionally
- * mirror the DDR up above the processor cacheable region. So, memory accessed
- * in this mirror region will not be cached. It's alloced from the same
- * pool as normal memory, but the handle we return is shifted up into the
- * uncached region. This will no doubt cause big problems if memory allocated
- * here is not also freed properly. -- JW
+ * Consistent memory allocators. Used for DMA devices that want to share
+ * uncached memory with the processor core. My crufty no-MMU approach is
+ * simple. In the HW platform we can optionally mirror the DDR up above the
+ * processor cacheable region. So, memory accessed in this mirror region will
+ * not be cached. It's alloced from the same pool as normal memory, but the
+ * handle we return is shifted up into the uncached region. This will no doubt
+ * cause big problems if memory allocated here is not also freed properly. -- JW
+ *
+ * I have to use dcache values because I can't relate on ram size:
*/
+#ifdef CONFIG_XILINX_UNCACHED_SHADOW
+#define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1)
+#else
+#define UNCACHED_SHADOW_MASK 0
+#endif /* CONFIG_XILINX_UNCACHED_SHADOW */
+
+void *uncached_kernel_address(void *ptr)
+{
+ unsigned long addr = (unsigned long)ptr;
+
+ addr |= UNCACHED_SHADOW_MASK;
+ if (addr > cpuinfo.dcache_base && addr < cpuinfo.dcache_high)
+ pr_warn("ERROR: Your cache coherent area is CACHED!!!\n");
+ return (void *)addr;
+}
+
+void *cached_kernel_address(void *ptr)
+{
+ unsigned long addr = (unsigned long)ptr;
+
+ return (void *)(addr & ~UNCACHED_SHADOW_MASK);
+}
+#else /* CONFIG_MMU */
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs)
{
@@ -64,12 +91,9 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
void *ret;
unsigned int i, err = 0;
struct page *page, *end;
-
-#ifdef CONFIG_MMU
phys_addr_t pa;
struct vm_struct *area;
unsigned long va;
-#endif
if (in_interrupt())
BUG();
@@ -86,26 +110,8 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
* we need to ensure that there are no cachelines in use,
* or worse dirty in this area.
*/
- flush_dcache_range(virt_to_phys((void *)vaddr),
- virt_to_phys((void *)vaddr) + size);
-
-#ifndef CONFIG_MMU
- ret = (void *)vaddr;
- /*
- * Here's the magic! Note if the uncached shadow is not implemented,
- * it's up to the calling code to also test that condition and make
- * other arranegments, such as manually flushing the cache and so on.
- */
-# ifdef CONFIG_XILINX_UNCACHED_SHADOW
- ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK);
-# endif
- if ((unsigned int)ret > cpuinfo.dcache_base &&
- (unsigned int)ret < cpuinfo.dcache_high)
- pr_warn("ERROR: Your cache coherent area is CACHED!!!\n");
+ arch_dma_prep_coherent(virt_to_page((unsigned long)vaddr), size);
- /* dma_handle is same as physical (shadowed) address */
- *dma_handle = (dma_addr_t)ret;
-#else
/* Allocate some common virtual space to map the new pages. */
area = get_vm_area(size, VM_ALLOC);
if (!area) {
@@ -117,7 +123,6 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
/* This gives us the real physical address of the first page. */
*dma_handle = pa = __virt_to_phys(vaddr);
-#endif
/*
* free wasted pages. We skip the first page since we know
@@ -131,10 +136,8 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
split_page(page, order);
for (i = 0; i < size && err == 0; i += PAGE_SIZE) {
-#ifdef CONFIG_MMU
/* MS: This is the whole magic - use cache inhibit pages */
err = map_page(va + i, pa + i, _PAGE_KERNEL | _PAGE_NO_CACHE);
-#endif
SetPageReserved(page);
page++;
@@ -154,7 +157,6 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
return ret;
}
-#ifdef CONFIG_MMU
static pte_t *consistent_virt_to_pte(void *vaddr)
{
unsigned long addr = (unsigned long)vaddr;
@@ -172,7 +174,6 @@ long arch_dma_coherent_to_pfn(struct device *dev, void *vaddr,
return pte_pfn(*ptep);
}
-#endif
/*
* free page(s) as defined by the above mapping.
@@ -187,18 +188,6 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
size = PAGE_ALIGN(size);
-#ifndef CONFIG_MMU
- /* Clear SHADOW_MASK bit in address, and free as per usual */
-# ifdef CONFIG_XILINX_UNCACHED_SHADOW
- vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK);
-# endif
- page = virt_to_page(vaddr);
-
- do {
- __free_reserved_page(page);
- page++;
- } while (size -= PAGE_SIZE);
-#else
do {
pte_t *ptep = consistent_virt_to_pte(vaddr);
unsigned long pfn;
@@ -216,5 +205,5 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
/* flush tlb */
flush_tlb_all();
-#endif
}
+#endif /* CONFIG_MMU */
--
2.20.1
This switches to using common code for the DMA allocations, including
potential use of the CMA allocator if configured.
Switching to the generic code enables DMA allocations from atomic
context, which is required by the DMA API documentation, and also
adds various other minor features drivers start relying upon. It
also makes sure we have on tested code base for all architectures
that require uncached pte bits for coherent DMA allocations.
Signed-off-by: Christoph Hellwig <[email protected]>
---
arch/microblaze/Kconfig | 1 +
arch/microblaze/mm/consistent.c | 152 +-------------------------------
2 files changed, 5 insertions(+), 148 deletions(-)
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index a0d749c309f3..e477896fbae6 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -17,6 +17,7 @@ config MICROBLAZE
select TIMER_OF
select CLONE_BACKWARDS3
select COMMON_CLK
+ select DMA_DIRECT_REMAP if MMU
select GENERIC_ATOMIC64
select GENERIC_CLOCKEVENTS
select GENERIC_CPU_DEVICES
diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c
index 1a859e8b58c2..0e0f733eb846 100644
--- a/arch/microblaze/mm/consistent.c
+++ b/arch/microblaze/mm/consistent.c
@@ -4,43 +4,16 @@
* Copyright (C) 2010 Michal Simek <[email protected]>
* Copyright (C) 2010 PetaLogix
* Copyright (C) 2005 John Williams <[email protected]>
- *
- * Based on PowerPC version derived from arch/arm/mm/consistent.c
- * Copyright (C) 2001 Dan Malek ([email protected])
- * Copyright (C) 2000 Russell King
*/
-#include <linux/export.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
#include <linux/kernel.h>
-#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
#include <linux/mm.h>
-#include <linux/swap.h>
-#include <linux/stddef.h>
-#include <linux/vmalloc.h>
#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/memblock.h>
-#include <linux/highmem.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/gfp.h>
#include <linux/dma-noncoherent.h>
-
-#include <asm/pgalloc.h>
-#include <linux/io.h>
-#include <linux/hardirq.h>
-#include <linux/mmu_context.h>
-#include <asm/mmu.h>
-#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/cpuinfo.h>
-#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
void arch_dma_prep_coherent(struct page *page, size_t size)
{
@@ -84,126 +57,9 @@ void *cached_kernel_address(void *ptr)
return (void *)(addr & ~UNCACHED_SHADOW_MASK);
}
#else /* CONFIG_MMU */
-void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
- gfp_t gfp, unsigned long attrs)
-{
- unsigned long order, vaddr;
- void *ret;
- unsigned int i, err = 0;
- struct page *page, *end;
- phys_addr_t pa;
- struct vm_struct *area;
- unsigned long va;
-
- if (in_interrupt())
- BUG();
-
- /* Only allocate page size areas. */
- size = PAGE_ALIGN(size);
- order = get_order(size);
-
- vaddr = __get_free_pages(gfp | __GFP_ZERO, order);
- if (!vaddr)
- return NULL;
-
- /*
- * we need to ensure that there are no cachelines in use,
- * or worse dirty in this area.
- */
- arch_dma_prep_coherent(virt_to_page((unsigned long)vaddr), size);
-
- /* Allocate some common virtual space to map the new pages. */
- area = get_vm_area(size, VM_ALLOC);
- if (!area) {
- free_pages(vaddr, order);
- return NULL;
- }
- va = (unsigned long) area->addr;
- ret = (void *)va;
-
- /* This gives us the real physical address of the first page. */
- *dma_handle = pa = __virt_to_phys(vaddr);
-
- /*
- * free wasted pages. We skip the first page since we know
- * that it will have count = 1 and won't require freeing.
- * We also mark the pages in use as reserved so that
- * remap_page_range works.
- */
- page = virt_to_page(vaddr);
- end = page + (1 << order);
-
- split_page(page, order);
-
- for (i = 0; i < size && err == 0; i += PAGE_SIZE) {
- /* MS: This is the whole magic - use cache inhibit pages */
- err = map_page(va + i, pa + i, _PAGE_KERNEL | _PAGE_NO_CACHE);
-
- SetPageReserved(page);
- page++;
- }
-
- /* Free the otherwise unused pages. */
- while (page < end) {
- __free_page(page);
- page++;
- }
-
- if (err) {
- free_pages(vaddr, order);
- return NULL;
- }
-
- return ret;
-}
-
-static pte_t *consistent_virt_to_pte(void *vaddr)
-{
- unsigned long addr = (unsigned long)vaddr;
-
- return pte_offset_kernel(pmd_offset(pgd_offset_k(addr), addr), addr);
-}
-
-long arch_dma_coherent_to_pfn(struct device *dev, void *vaddr,
- dma_addr_t dma_addr)
+static int __init atomic_pool_init(void)
{
- pte_t *ptep = consistent_virt_to_pte(vaddr);
-
- if (pte_none(*ptep) || !pte_present(*ptep))
- return 0;
-
- return pte_pfn(*ptep);
-}
-
-/*
- * free page(s) as defined by the above mapping.
- */
-void arch_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_addr, unsigned long attrs)
-{
- struct page *page;
-
- if (in_interrupt())
- BUG();
-
- size = PAGE_ALIGN(size);
-
- do {
- pte_t *ptep = consistent_virt_to_pte(vaddr);
- unsigned long pfn;
-
- if (!pte_none(*ptep) && pte_present(*ptep)) {
- pfn = pte_pfn(*ptep);
- pte_clear(&init_mm, (unsigned int)vaddr, ptep);
- if (pfn_valid(pfn)) {
- page = pfn_to_page(pfn);
- __free_reserved_page(page);
- }
- }
- vaddr += PAGE_SIZE;
- } while (size -= PAGE_SIZE);
-
- /* flush tlb */
- flush_tlb_all();
+ return dma_atomic_pool_init(GFP_KERNEL, pgprot_noncached(PAGE_KERNEL));
}
+postcore_initcall(atomic_pool_init);
#endif /* CONFIG_MMU */
--
2.20.1
On Wed, Aug 14, 2019 at 04:03:46PM +0200, Christoph Hellwig wrote:
> Hi Michal,
>
> can you take a look at this patch that moves microblaze over to the
> generic DMA remap allocator? I've been trying to slowly get all
> architectures over to the generic code, and microblaze is one that
> seems very straightfoward to convert.
Michal, any chance you could look over this series?
Hi,
On 26. 08. 19 9:02, Christoph Hellwig wrote:
> On Wed, Aug 14, 2019 at 04:03:46PM +0200, Christoph Hellwig wrote:
>> Hi Michal,
>>
>> can you take a look at this patch that moves microblaze over to the
>> generic DMA remap allocator? I've been trying to slowly get all
>> architectures over to the generic code, and microblaze is one that
>> seems very straightfoward to convert.
>
> Michal, any chance you could look over this series?
>
let me take a look.
M
--
Michal Simek, Ing. (M.Eng), OpenPGP -> KeyID: FE3D1F91
w: http://www.monstr.eu p: +42-0-721842854
Maintainer of Linux kernel - Xilinx Microblaze
Maintainer of Linux kernel - Xilinx Zynq ARM and ZynqMP ARM64 SoCs
U-Boot custodian - Xilinx Microblaze/Zynq/ZynqMP/Versal SoCs
Hi Christoph,
On 14. 08. 19 16:03, Christoph Hellwig wrote:
> Hi Michal,
>
> can you take a look at this patch that moves microblaze over to the
> generic DMA remap allocator? I've been trying to slowly get all
> architectures over to the generic code, and microblaze is one that
> seems very straightfoward to convert.
>
I took at look at this series and tested it on kc705 and I can't see any
issue.
Patches applied.
Thanks,
Michal
--
Michal Simek, Ing. (M.Eng), OpenPGP -> KeyID: FE3D1F91
w: http://www.monstr.eu p: +42-0-721842854
Maintainer of Linux kernel - Xilinx Microblaze
Maintainer of Linux kernel - Xilinx Zynq ARM and ZynqMP ARM64 SoCs
U-Boot custodian - Xilinx Microblaze/Zynq/ZynqMP/Versal SoCs