Now each architecture has the own dma_get_cache_alignment
implementation.
dma_get_cache_alignment returns the minimum DMA
alignment. Architectures defines it as ARCH_KMALLOC_MINALIGN (it's
used to make sure that malloc'ed buffer is DMA-safe; the buffer
doesn't share a cache with the others). So we can unify
dma_get_cache_alignment implementations.
This can be applied to -mm.
=
arch/alpha/include/asm/dma-mapping.h | 1 -
arch/arm/include/asm/cache.h | 2 +-
arch/arm/include/asm/dma-mapping.h | 5 -----
arch/avr32/include/asm/cache.h | 2 +-
arch/avr32/include/asm/dma-mapping.h | 5 -----
arch/blackfin/include/asm/cache.h | 2 +-
arch/blackfin/include/asm/dma-mapping.h | 1 -
arch/cris/include/asm/dma-mapping.h | 6 ------
arch/frv/include/asm/dma-mapping.h | 6 ------
arch/frv/include/asm/mem-layout.h | 2 +-
arch/ia64/include/asm/dma-mapping.h | 2 --
arch/ia64/kernel/setup.c | 6 ------
arch/m68k/include/asm/cache.h | 2 +-
arch/m68k/include/asm/dma-mapping.h | 5 -----
arch/microblaze/include/asm/dma-mapping.h | 5 -----
arch/microblaze/include/asm/page.h | 2 +-
arch/mips/include/asm/dma-mapping.h | 7 -------
arch/mips/include/asm/mach-generic/kmalloc.h | 2 +-
arch/mips/include/asm/mach-ip27/kmalloc.h | 2 +-
arch/mips/include/asm/mach-ip32/kmalloc.h | 4 ++--
arch/mips/include/asm/mach-tx49xx/kmalloc.h | 2 +-
arch/mn10300/include/asm/cache.h | 2 +-
arch/mn10300/include/asm/dma-mapping.h | 6 ------
arch/parisc/include/asm/cache.h | 2 ++
arch/parisc/include/asm/dma-mapping.h | 6 ------
arch/powerpc/include/asm/dma-mapping.h | 15 ---------------
arch/powerpc/include/asm/page_32.h | 2 +-
arch/sh/include/asm/dma-mapping.h | 9 ---------
arch/sh/include/asm/page.h | 2 +-
arch/sparc/include/asm/dma-mapping.h | 9 ---------
arch/tile/include/asm/dma-mapping.h | 7 -------
arch/x86/include/asm/dma-mapping.h | 7 -------
arch/xtensa/include/asm/cache.h | 2 +-
arch/xtensa/include/asm/dma-mapping.h | 6 ------
include/linux/dma-mapping.h | 8 ++++++++
include/linux/slab_def.h | 4 +++-
include/linux/slob_def.h | 4 +++-
include/linux/slub_def.h | 8 +++++---
38 files changed, 36 insertions(+), 134 deletions(-)
dma_get_cache_alignment() needs to know if an architecture defines
ARCH_KMALLOC_MINALIGN or not (needs to know if architecture has DMA
alignment restriction). However, slab.h define ARCH_KMALLOC_MINALIGN
if architectures doesn't define it.
Let's rename ARCH_KMALLOC_MINALIGN to
ARCH_DMA_MINALIGN. ARCH_KMALLOC_MINALIGN is used only in the internals
of slab/slob/slub (except for crypto).
Signed-off-by: FUJITA Tomonori <[email protected]>
---
arch/arm/include/asm/cache.h | 2 +-
arch/avr32/include/asm/cache.h | 2 +-
arch/blackfin/include/asm/cache.h | 2 +-
arch/frv/include/asm/mem-layout.h | 2 +-
arch/m68k/include/asm/cache.h | 2 +-
arch/microblaze/include/asm/page.h | 2 +-
arch/mips/include/asm/mach-generic/kmalloc.h | 2 +-
arch/mips/include/asm/mach-ip27/kmalloc.h | 2 +-
arch/mips/include/asm/mach-ip32/kmalloc.h | 4 ++--
arch/mips/include/asm/mach-tx49xx/kmalloc.h | 2 +-
arch/mn10300/include/asm/cache.h | 2 +-
arch/powerpc/include/asm/page_32.h | 2 +-
arch/sh/include/asm/page.h | 2 +-
arch/xtensa/include/asm/cache.h | 2 +-
include/linux/slab_def.h | 4 +++-
include/linux/slob_def.h | 4 +++-
include/linux/slub_def.h | 8 +++++---
17 files changed, 26 insertions(+), 20 deletions(-)
diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
index 66c160b..9d61220 100644
--- a/arch/arm/include/asm/cache.h
+++ b/arch/arm/include/asm/cache.h
@@ -14,7 +14,7 @@
* cache before the transfer is done, causing old data to be seen by
* the CPU.
*/
-#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
/*
* With EABI on ARMv5 and above we must have 64-bit aligned slab pointers.
diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
index d3cf35a..c3a58a1 100644
--- a/arch/avr32/include/asm/cache.h
+++ b/arch/avr32/include/asm/cache.h
@@ -11,7 +11,7 @@
* cache before the transfer is done, causing old data to be seen by
* the CPU.
*/
-#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#ifndef __ASSEMBLER__
struct cache_info {
diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
index 93f6c63..bd0641a 100644
--- a/arch/blackfin/include/asm/cache.h
+++ b/arch/blackfin/include/asm/cache.h
@@ -15,7 +15,7 @@
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define SMP_CACHE_BYTES L1_CACHE_BYTES
-#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#ifdef CONFIG_SMP
#define __cacheline_aligned
diff --git a/arch/frv/include/asm/mem-layout.h b/arch/frv/include/asm/mem-layout.h
index ccae981..e9a0ec8 100644
--- a/arch/frv/include/asm/mem-layout.h
+++ b/arch/frv/include/asm/mem-layout.h
@@ -35,7 +35,7 @@
* the slab must be aligned such that load- and store-double instructions don't
* fault if used
*/
-#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#define ARCH_SLAB_MINALIGN L1_CACHE_BYTES
/*****************************************************************************/
diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
index ecafbe1..0395c51 100644
--- a/arch/m68k/include/asm/cache.h
+++ b/arch/m68k/include/asm/cache.h
@@ -8,6 +8,6 @@
#define L1_CACHE_SHIFT 4
#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
-#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#endif
diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h
index c12c6df..2b108db 100644
--- a/arch/microblaze/include/asm/page.h
+++ b/arch/microblaze/include/asm/page.h
@@ -40,7 +40,7 @@
#ifndef __ASSEMBLY__
/* MS be sure that SLAB allocates aligned objects */
-#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#define ARCH_SLAB_MINALIGN L1_CACHE_BYTES
diff --git a/arch/mips/include/asm/mach-generic/kmalloc.h b/arch/mips/include/asm/mach-generic/kmalloc.h
index b8e6deb..a5d6690 100644
--- a/arch/mips/include/asm/mach-generic/kmalloc.h
+++ b/arch/mips/include/asm/mach-generic/kmalloc.h
@@ -7,7 +7,7 @@
* Total overkill for most systems but need as a safe default.
* Set this one if any device in the system might do non-coherent DMA.
*/
-#define ARCH_KMALLOC_MINALIGN 128
+#define ARCH_DMA_MINALIGN 128
#endif
#endif /* __ASM_MACH_GENERIC_KMALLOC_H */
diff --git a/arch/mips/include/asm/mach-ip27/kmalloc.h b/arch/mips/include/asm/mach-ip27/kmalloc.h
index 426bd04..82c23ce 100644
--- a/arch/mips/include/asm/mach-ip27/kmalloc.h
+++ b/arch/mips/include/asm/mach-ip27/kmalloc.h
@@ -2,7 +2,7 @@
#define __ASM_MACH_IP27_KMALLOC_H
/*
- * All happy, no need to define ARCH_KMALLOC_MINALIGN
+ * All happy, no need to define ARCH_DMA_MINALIGN
*/
#endif /* __ASM_MACH_IP27_KMALLOC_H */
diff --git a/arch/mips/include/asm/mach-ip32/kmalloc.h b/arch/mips/include/asm/mach-ip32/kmalloc.h
index b1e0be6..042ca92 100644
--- a/arch/mips/include/asm/mach-ip32/kmalloc.h
+++ b/arch/mips/include/asm/mach-ip32/kmalloc.h
@@ -3,9 +3,9 @@
#if defined(CONFIG_CPU_R5000) || defined(CONFIG_CPU_RM7000)
-#define ARCH_KMALLOC_MINALIGN 32
+#define ARCH_DMA_MINALIGN 32
#else
-#define ARCH_KMALLOC_MINALIGN 128
+#define ARCH_DMA_MINALIGN 128
#endif
#endif /* __ASM_MACH_IP32_KMALLOC_H */
diff --git a/arch/mips/include/asm/mach-tx49xx/kmalloc.h b/arch/mips/include/asm/mach-tx49xx/kmalloc.h
index 913ff19..6ac7f1f 100644
--- a/arch/mips/include/asm/mach-tx49xx/kmalloc.h
+++ b/arch/mips/include/asm/mach-tx49xx/kmalloc.h
@@ -2,7 +2,7 @@
#define __ASM_MACH_TX49XX_KMALLOC_H
/*
- * All happy, no need to define ARCH_KMALLOC_MINALIGN
+ * All happy, no need to define ARCH_DMA_MINALIGN
*/
#endif /* __ASM_MACH_TX49XX_KMALLOC_H */
diff --git a/arch/mn10300/include/asm/cache.h b/arch/mn10300/include/asm/cache.h
index 6e2fe28..781bf61 100644
--- a/arch/mn10300/include/asm/cache.h
+++ b/arch/mn10300/include/asm/cache.h
@@ -21,7 +21,7 @@
#define L1_CACHE_DISPARITY L1_CACHE_NENTRIES * L1_CACHE_BYTES
#endif
-#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
/* data cache purge registers
* - read from the register to unconditionally purge that cache line
diff --git a/arch/powerpc/include/asm/page_32.h b/arch/powerpc/include/asm/page_32.h
index bd0849d..68d73b2 100644
--- a/arch/powerpc/include/asm/page_32.h
+++ b/arch/powerpc/include/asm/page_32.h
@@ -10,7 +10,7 @@
#define VM_DATA_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS32
#ifdef CONFIG_NOT_COHERENT_CACHE
-#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#endif
#ifdef CONFIG_PTE_64BIT
diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h
index fb703d1..c4e0b3d 100644
--- a/arch/sh/include/asm/page.h
+++ b/arch/sh/include/asm/page.h
@@ -180,7 +180,7 @@ typedef struct page *pgtable_t;
* Some drivers need to perform DMA into kmalloc'ed buffers
* and so we have to increase the kmalloc minalign for this.
*/
-#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#ifdef CONFIG_SUPERH64
/*
diff --git a/arch/xtensa/include/asm/cache.h b/arch/xtensa/include/asm/cache.h
index ed8cd3c..d2fd932 100644
--- a/arch/xtensa/include/asm/cache.h
+++ b/arch/xtensa/include/asm/cache.h
@@ -29,6 +29,6 @@
# define CACHE_WAY_SIZE ICACHE_WAY_SIZE
#endif
-#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#endif /* _XTENSA_CACHE_H */
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 1acfa73..791a502 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -17,7 +17,6 @@
#include <trace/events/kmem.h>
-#ifndef ARCH_KMALLOC_MINALIGN
/*
* Enforce a minimum alignment for the kmalloc caches.
* Usually, the kmalloc caches are cache_line_size() aligned, except when
@@ -27,6 +26,9 @@
* ARCH_KMALLOC_MINALIGN allows that.
* Note that increasing this value may disable some debug features.
*/
+#ifdef ARCH_DMA_MINALIGN
+#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
+#else
#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
#endif
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
index 62667f7..4382db0 100644
--- a/include/linux/slob_def.h
+++ b/include/linux/slob_def.h
@@ -1,7 +1,9 @@
#ifndef __LINUX_SLOB_DEF_H
#define __LINUX_SLOB_DEF_H
-#ifndef ARCH_KMALLOC_MINALIGN
+#ifdef ARCH_DMA_MINALIGN
+#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
+#else
#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long)
#endif
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 6447a72..6d14409 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -106,15 +106,17 @@ struct kmem_cache {
/*
* Kmalloc subsystem.
*/
-#if defined(ARCH_KMALLOC_MINALIGN) && ARCH_KMALLOC_MINALIGN > 8
-#define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN
+#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
+#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
#else
#define KMALLOC_MIN_SIZE 8
#endif
#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
-#ifndef ARCH_KMALLOC_MINALIGN
+#ifdef ARCH_DMA_MINALIGN
+#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
+#else
#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
#endif
--
1.6.5
Architectures that handle DMA-non-coherent memory need to set
ARCH_DMA_MINALIGN to make sure that kmalloc'ed buffer is DMA-safe: the
buffer doesn't share a cache with the others.
Signed-off-by: FUJITA Tomonori <[email protected]>
Cc: Kyle McMartin <[email protected]>
Cc: Helge Deller <[email protected]>
Cc: James E.J. Bottomley <[email protected]>
---
arch/parisc/include/asm/cache.h | 2 ++
1 files changed, 2 insertions(+), 0 deletions(-)
diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
index 45effe6..039880e 100644
--- a/arch/parisc/include/asm/cache.h
+++ b/arch/parisc/include/asm/cache.h
@@ -28,6 +28,8 @@
#define SMP_CACHE_BYTES L1_CACHE_BYTES
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
void parisc_cache_init(void); /* initializes cache-flushing */
--
1.6.5
dma_get_cache_alignment returns the minimum DMA
alignment. Architectures defines it as ARCH_DMA_MINALIGN (formally
ARCH_KMALLOC_MINALIGN). So we can unify dma_get_cache_alignment
implementations.
Note that some architectures implement dma_get_cache_alignment
wrongly. dma_get_cache_alignment() should return the minimum DMA
alignment. So fully-coherent architectures should return 1. This patch
also fixes this issue.
Signed-off-by: FUJITA Tomonori <[email protected]>
---
arch/alpha/include/asm/dma-mapping.h | 1 -
arch/arm/include/asm/dma-mapping.h | 5 -----
arch/avr32/include/asm/dma-mapping.h | 5 -----
arch/blackfin/include/asm/dma-mapping.h | 1 -
arch/cris/include/asm/dma-mapping.h | 6 ------
arch/frv/include/asm/dma-mapping.h | 6 ------
arch/ia64/include/asm/dma-mapping.h | 2 --
arch/ia64/kernel/setup.c | 6 ------
arch/m68k/include/asm/dma-mapping.h | 5 -----
arch/microblaze/include/asm/dma-mapping.h | 5 -----
arch/mips/include/asm/dma-mapping.h | 7 -------
arch/mn10300/include/asm/dma-mapping.h | 6 ------
arch/parisc/include/asm/dma-mapping.h | 6 ------
arch/powerpc/include/asm/dma-mapping.h | 15 ---------------
arch/sh/include/asm/dma-mapping.h | 9 ---------
arch/sparc/include/asm/dma-mapping.h | 9 ---------
arch/tile/include/asm/dma-mapping.h | 7 -------
arch/x86/include/asm/dma-mapping.h | 7 -------
arch/xtensa/include/asm/dma-mapping.h | 6 ------
include/linux/dma-mapping.h | 8 ++++++++
20 files changed, 8 insertions(+), 114 deletions(-)
diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h
index 1bce816..b3423d9 100644
--- a/arch/alpha/include/asm/dma-mapping.h
+++ b/arch/alpha/include/asm/dma-mapping.h
@@ -44,6 +44,5 @@ static inline int dma_set_mask(struct device *dev, u64 mask)
#define dma_is_consistent(d, h) (1)
#define dma_cache_sync(dev, va, size, dir) ((void)0)
-#define dma_get_cache_alignment() L1_CACHE_BYTES
#endif /* _ALPHA_DMA_MAPPING_H */
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 69ce072..f4a996d 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -144,11 +144,6 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
return 0;
}
-static inline int dma_get_cache_alignment(void)
-{
- return 32;
-}
-
static inline int dma_is_consistent(struct device *dev, dma_addr_t handle)
{
return !!arch_is_coherent();
diff --git a/arch/avr32/include/asm/dma-mapping.h b/arch/avr32/include/asm/dma-mapping.h
index 0399359..af6b816 100644
--- a/arch/avr32/include/asm/dma-mapping.h
+++ b/arch/avr32/include/asm/dma-mapping.h
@@ -341,9 +341,4 @@ static inline int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
return 1;
}
-static inline int dma_get_cache_alignment(void)
-{
- return boot_cpu_data.dcache.linesz;
-}
-
#endif /* __ASM_AVR32_DMA_MAPPING_H */
diff --git a/arch/blackfin/include/asm/dma-mapping.h b/arch/blackfin/include/asm/dma-mapping.h
index 212cb80..6694a0f 100644
--- a/arch/blackfin/include/asm/dma-mapping.h
+++ b/arch/blackfin/include/asm/dma-mapping.h
@@ -21,7 +21,6 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#define dma_supported(d, m) (1)
-#define dma_get_cache_alignment() (32)
#define dma_is_consistent(d, h) (1)
static inline int
diff --git a/arch/cris/include/asm/dma-mapping.h b/arch/cris/include/asm/dma-mapping.h
index da8ef8e..fc30fd0 100644
--- a/arch/cris/include/asm/dma-mapping.h
+++ b/arch/cris/include/asm/dma-mapping.h
@@ -152,12 +152,6 @@ dma_set_mask(struct device *dev, u64 mask)
return 0;
}
-static inline int
-dma_get_cache_alignment(void)
-{
- return (1 << INTERNODE_CACHE_SHIFT);
-}
-
#define dma_is_consistent(d, h) (1)
static inline void
diff --git a/arch/frv/include/asm/dma-mapping.h b/arch/frv/include/asm/dma-mapping.h
index 6af5d83..7b05ce1 100644
--- a/arch/frv/include/asm/dma-mapping.h
+++ b/arch/frv/include/asm/dma-mapping.h
@@ -125,12 +125,6 @@ int dma_set_mask(struct device *dev, u64 mask)
return 0;
}
-static inline
-int dma_get_cache_alignment(void)
-{
- return 1 << L1_CACHE_SHIFT;
-}
-
#define dma_is_consistent(d, h) (1)
static inline
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index 7d09a09..8d52dee 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -86,8 +86,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
return daddr;
}
-extern int dma_get_cache_alignment(void);
-
static inline void
dma_cache_sync (struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir)
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 41ae6a5..8fb958a 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -98,12 +98,6 @@ static struct resource bss_resource = {
unsigned long ia64_max_cacheline_size;
-int dma_get_cache_alignment(void)
-{
- return ia64_max_cacheline_size;
-}
-EXPORT_SYMBOL(dma_get_cache_alignment);
-
unsigned long ia64_iobase; /* virtual address for I/O accesses */
EXPORT_SYMBOL(ia64_iobase);
struct io_space io_space[MAX_IO_SPACES];
diff --git a/arch/m68k/include/asm/dma-mapping.h b/arch/m68k/include/asm/dma-mapping.h
index 26f5054..a1ae732 100644
--- a/arch/m68k/include/asm/dma-mapping.h
+++ b/arch/m68k/include/asm/dma-mapping.h
@@ -16,11 +16,6 @@ static inline int dma_set_mask(struct device *dev, u64 mask)
return 0;
}
-static inline int dma_get_cache_alignment(void)
-{
- return 1 << L1_CACHE_SHIFT;
-}
-
static inline int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
{
return 0;
diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h
index 18b3731..e9f7c81 100644
--- a/arch/microblaze/include/asm/dma-mapping.h
+++ b/arch/microblaze/include/asm/dma-mapping.h
@@ -138,11 +138,6 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
ops->free_coherent(dev, size, cpu_addr, dma_handle);
}
-static inline int dma_get_cache_alignment(void)
-{
- return L1_CACHE_BYTES;
-}
-
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index 664ba53..d724a15 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -62,13 +62,6 @@ dma_set_mask(struct device *dev, u64 mask)
return 0;
}
-static inline int
-dma_get_cache_alignment(void)
-{
- /* XXX Largest on any MIPS */
- return 128;
-}
-
extern int dma_is_consistent(struct device *dev, dma_addr_t dma_addr);
extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
diff --git a/arch/mn10300/include/asm/dma-mapping.h b/arch/mn10300/include/asm/dma-mapping.h
index 4ed1522..8d452a6 100644
--- a/arch/mn10300/include/asm/dma-mapping.h
+++ b/arch/mn10300/include/asm/dma-mapping.h
@@ -161,12 +161,6 @@ int dma_set_mask(struct device *dev, u64 mask)
return 0;
}
-static inline
-int dma_get_cache_alignment(void)
-{
- return 1 << L1_CACHE_SHIFT;
-}
-
#define dma_is_consistent(d) (1)
static inline
diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h
index da69433..44d3f62 100644
--- a/arch/parisc/include/asm/dma-mapping.h
+++ b/arch/parisc/include/asm/dma-mapping.h
@@ -185,12 +185,6 @@ dma_set_mask(struct device *dev, u64 mask)
}
static inline int
-dma_get_cache_alignment(void)
-{
- return dcache_stride;
-}
-
-static inline int
dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
{
return (hppa_dma_ops->dma_sync_single_for_cpu == NULL);
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index c85ef23..a77ba28 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -215,21 +215,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
#define dma_is_consistent(d, h) (1)
#endif
-static inline int dma_get_cache_alignment(void)
-{
-#ifdef CONFIG_PPC64
- /* no easy way to get cache size on all processors, so return
- * the maximum possible, to be safe */
- return (1 << INTERNODE_CACHE_SHIFT);
-#else
- /*
- * Each processor family will define its own L1_CACHE_SHIFT,
- * L1_CACHE_BYTES wraps to this, so this is always safe.
- */
- return L1_CACHE_BYTES;
-#endif
-}
-
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h
index bea3337..6bb5cc9 100644
--- a/arch/sh/include/asm/dma-mapping.h
+++ b/arch/sh/include/asm/dma-mapping.h
@@ -48,15 +48,6 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
#define dma_is_consistent(d, h) (0)
#endif
-static inline int dma_get_cache_alignment(void)
-{
- /*
- * Each processor family will define its own L1_CACHE_SHIFT,
- * L1_CACHE_BYTES wraps to this, so this is always safe.
- */
- return L1_CACHE_BYTES;
-}
-
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
struct dma_map_ops *ops = get_dma_ops(dev);
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index 4b4a0c0..74db853 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -52,15 +52,6 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
return (dma_addr == DMA_ERROR_CODE);
}
-static inline int dma_get_cache_alignment(void)
-{
- /*
- * no easy way to get cache size on all processors, so return
- * the maximum possible, to be safe
- */
- return (1 << INTERNODE_CACHE_SHIFT);
-}
-
static inline int dma_set_mask(struct device *dev, u64 mask)
{
#ifdef CONFIG_PCI
diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h
index cf466b3..1326b91 100644
--- a/arch/tile/include/asm/dma-mapping.h
+++ b/arch/tile/include/asm/dma-mapping.h
@@ -90,13 +90,6 @@ dma_set_mask(struct device *dev, u64 mask)
return 0;
}
-static inline int
-dma_get_cache_alignment(void)
-{
- return L2_CACHE_BYTES;
-}
-
#define dma_is_consistent(d, h) (1)
-
#endif /* _ASM_TILE_DMA_MAPPING_H */
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index ac91eed..f9c67e8 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -87,13 +87,6 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
flush_write_buffers();
}
-static inline int dma_get_cache_alignment(void)
-{
- /* no easy way to get cache size on all x86, so return the
- * maximum possible, to be safe */
- return boot_cpu_data.x86_clflush_size;
-}
-
static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
gfp_t gfp)
{
diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h
index 51882ae..7104f2f 100644
--- a/arch/xtensa/include/asm/dma-mapping.h
+++ b/arch/xtensa/include/asm/dma-mapping.h
@@ -161,12 +161,6 @@ dma_set_mask(struct device *dev, u64 mask)
return 0;
}
-static inline int
-dma_get_cache_alignment(void)
-{
- return L1_CACHE_BYTES;
-}
-
#define dma_is_consistent(d, h) (1)
static inline void
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 89b7e1a..e0670a5 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -142,6 +142,14 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
return -EIO;
}
+static inline int dma_get_cache_alignment(void)
+{
+#ifdef ARCH_DMA_MINALIGN
+ return ARCH_DMA_MINALIGN;
+#endif
+ return 1;
+}
+
/* flags for the coherent memory api */
#define DMA_MEMORY_MAP 0x01
#define DMA_MEMORY_IO 0x02
--
1.6.5
From: FUJITA Tomonori <[email protected]>
Date: Thu, 8 Jul 2010 15:57:47 +0900
> dma_get_cache_alignment returns the minimum DMA
> alignment. Architectures defines it as ARCH_DMA_MINALIGN (formally
> ARCH_KMALLOC_MINALIGN). So we can unify dma_get_cache_alignment
> implementations.
>
> Note that some architectures implement dma_get_cache_alignment
> wrongly. dma_get_cache_alignment() should return the minimum DMA
> alignment. So fully-coherent architectures should return 1. This patch
> also fixes this issue.
>
> Signed-off-by: FUJITA Tomonori <[email protected]>
Acked-by: David S. Miller <[email protected]>