Let's use the standard L1_CACHE_ALIGN macro instead.
Signed-off-by: FUJITA Tomonori <[email protected]>
---
arch/parisc/include/asm/cache.h | 2 --
1 files changed, 0 insertions(+), 2 deletions(-)
diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
index 45effe6..a050f9f 100644
--- a/arch/parisc/include/asm/cache.h
+++ b/arch/parisc/include/asm/cache.h
@@ -24,8 +24,6 @@
#ifndef __ASSEMBLY__
-#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
-
#define SMP_CACHE_BYTES L1_CACHE_BYTES
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
--
1.6.5