Add byteshifting arch overrides to avoid any functional changes
when using the new common header. At a later time this arch should
be able to use the common header directly, but this has not been
proven yet.
Signed-off-by: Harvey Harrison <[email protected]>
---
Russell, while this looks like a lot of churn, it is function-equivalent
to the existing situation. It essentially open-codes the [lb]e_byteshift.h
as arch-overrides. A future patch will move over to the generic
packed-struct version, but I didn't want to introduce behavior changes
while doing the code consolidation.
arch/arm/include/asm/unaligned.h | 81 ++++++++++++++++++++++++++++++++------
1 files changed, 69 insertions(+), 12 deletions(-)
diff --git a/arch/arm/include/asm/unaligned.h b/arch/arm/include/asm/unaligned.h
index 44593a8..1510c8b 100644
--- a/arch/arm/include/asm/unaligned.h
+++ b/arch/arm/include/asm/unaligned.h
@@ -1,19 +1,76 @@
#ifndef _ASM_ARM_UNALIGNED_H
#define _ASM_ARM_UNALIGNED_H
-#include <linux/unaligned/le_byteshift.h>
-#include <linux/unaligned/be_byteshift.h>
-#include <linux/unaligned/generic.h>
-
-/*
- * Select endianness
- */
-#ifndef __ARMEB__
-#define get_unaligned __get_unaligned_le
-#define put_unaligned __put_unaligned_le
+#include <linux/kernel.h>
+#include <asm/byteorder.h>
+
+static inline u16 __arch_load_cpu16_noalign(const u8 *p)
+{
+#ifdef __LITTLE_ENDIAN
+ return p[0] | p[1] << 8;
+#else
+ return p[0] << 8 | p[1];
+#endif
+}
+#define __arch_load_cpu16_noalign __arch_load_cpu16_noalign
+
+static inline u32 __arch_load_cpu32_noalign(const u8 *p)
+{
+#ifdef __LITTLE_ENDIAN
+ return p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24;
+#else
+ return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3];
+#endif
+}
+#define __arch_load_cpu32_noalign __arch_load_cpu32_noalign
+
+static inline u64 __arch_load_cpu64_noalign(const u8 *p)
+{
+#ifdef __LITTLE_ENDIAN
+ return ((u64)__arch_load_cpu32_noalign(p + 4) << 32) |
+ __arch_load_cpu32_noalign(p);
+#else
+ return ((u64)__arch_load_cpu32_noalign(p) << 32) |
+ __arch_load_cpu32_noalign(p + 4);
+#endif
+}
+#define __arch_load_cpu64_noalign __arch_load_cpu64_noalign
+
+static inline void __arch_store_cpu16_noalign(u8 *p, u16 val)
+{
+#ifdef __LITTLE_ENDIAN
+ *p++ = val;
+ *p++ = val >> 8;
+#else
+ *p++ = val >> 8;
+ *p++ = val;
+#endif
+}
+#define __arch_store_cpu16_noalign __arch_store_cpu16_noalign
+
+static inline void __arch_store_cpu32_noalign(u8 *p, u32 val)
+{
+#ifdef __LITTLE_ENDIAN
+ __arch_store_cpu16_noalign(p, val);
+ __arch_store_cpu16_noalign(p + 2, val >> 16);
+#else
+ __arch_store_cpu16_noalign(p, val >> 16);
+ __arch_store_cpu16_noalign(p + 2, val);
+#endif
+}
+#define __arch_store_cpu32_noalign __arch_store_cpu32_noalign
+
+static inline void __arch_store_cpu64_noalign(u8 *p, u64 val)
+{
+#ifdef __LITTLE_ENDIAN
+ __arch_store_cpu32_noalign(p, val);
+ __arch_store_cpu32_noalign(p + 4, val >> 32);
#else
-#define get_unaligned __get_unaligned_be
-#define put_unaligned __put_unaligned_be
+ __arch_store_cpu32_noalign(p, val >> 32);
+ __arch_store_cpu32_noalign(p + 4, val);
#endif
+}
+#define __arch_store_cpu64_noalign __arch_store_cpu64_noalign
+#include <linux/unaligned.h>
#endif /* _ASM_ARM_UNALIGNED_H */
--
1.6.1.212.g4b3ec