2019-12-09 22:31:04

by Nick Desaulniers

[permalink] [raw]
Subject: [PATCH 0/2] Hexagon fixes

Fixes 2 warnings when trying to build hexagon with Clang:
$ ARCH=hexagon CROSS_COMPILE=hexagon-linux-gnu- make -j71 \
CC=clang AS=clang LD=ld.lld AR=llvm-ar

Fixes -Winline-asm and -Wimplicit-function-definition.

Nick Desaulniers (2):
hexagon: define ioremap_uc
hexagon: parenthesize registers in asm predicates

arch/hexagon/include/asm/atomic.h | 8 ++++----
arch/hexagon/include/asm/bitops.h | 8 ++++----
arch/hexagon/include/asm/cmpxchg.h | 2 +-
arch/hexagon/include/asm/futex.h | 6 +++---
arch/hexagon/include/asm/io.h | 1 +
arch/hexagon/include/asm/spinlock.h | 20 ++++++++++----------
arch/hexagon/kernel/vm_entry.S | 2 +-
7 files changed, 24 insertions(+), 23 deletions(-)

--
2.24.0.393.g34dc348eaf-goog


2019-12-09 22:31:35

by Nick Desaulniers

[permalink] [raw]
Subject: [PATCH 2/2] hexagon: parenthesize registers in asm predicates

Hexagon requires that register predicates in assembly be parenthesized.

Link: https://github.com/ClangBuiltLinux/linux/issues/754
Suggested-by: Sid Manning <[email protected]>
Signed-off-by: Nick Desaulniers <[email protected]>
---
arch/hexagon/include/asm/atomic.h | 8 ++++----
arch/hexagon/include/asm/bitops.h | 8 ++++----
arch/hexagon/include/asm/cmpxchg.h | 2 +-
arch/hexagon/include/asm/futex.h | 6 +++---
arch/hexagon/include/asm/spinlock.h | 20 ++++++++++----------
arch/hexagon/kernel/vm_entry.S | 2 +-
6 files changed, 23 insertions(+), 23 deletions(-)

diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h
index 12cd9231c4b8..0231d69c8bf2 100644
--- a/arch/hexagon/include/asm/atomic.h
+++ b/arch/hexagon/include/asm/atomic.h
@@ -91,7 +91,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
"1: %0 = memw_locked(%1);\n" \
" %0 = "#op "(%0,%2);\n" \
" memw_locked(%1,P3)=%0;\n" \
- " if !P3 jump 1b;\n" \
+ " if (!P3) jump 1b;\n" \
: "=&r" (output) \
: "r" (&v->counter), "r" (i) \
: "memory", "p3" \
@@ -107,7 +107,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
"1: %0 = memw_locked(%1);\n" \
" %0 = "#op "(%0,%2);\n" \
" memw_locked(%1,P3)=%0;\n" \
- " if !P3 jump 1b;\n" \
+ " if (!P3) jump 1b;\n" \
: "=&r" (output) \
: "r" (&v->counter), "r" (i) \
: "memory", "p3" \
@@ -124,7 +124,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
"1: %0 = memw_locked(%2);\n" \
" %1 = "#op "(%0,%3);\n" \
" memw_locked(%2,P3)=%1;\n" \
- " if !P3 jump 1b;\n" \
+ " if (!P3) jump 1b;\n" \
: "=&r" (output), "=&r" (val) \
: "r" (&v->counter), "r" (i) \
: "memory", "p3" \
@@ -173,7 +173,7 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
" }"
" memw_locked(%2, p3) = %1;"
" {"
- " if !p3 jump 1b;"
+ " if (!p3) jump 1b;"
" }"
"2:"
: "=&r" (__oldval), "=&r" (tmp)
diff --git a/arch/hexagon/include/asm/bitops.h b/arch/hexagon/include/asm/bitops.h
index 47384b094b94..71429f756af0 100644
--- a/arch/hexagon/include/asm/bitops.h
+++ b/arch/hexagon/include/asm/bitops.h
@@ -38,7 +38,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr)
"1: R12 = memw_locked(R10);\n"
" { P0 = tstbit(R12,R11); R12 = clrbit(R12,R11); }\n"
" memw_locked(R10,P1) = R12;\n"
- " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
+ " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
: "=&r" (oldval)
: "r" (addr), "r" (nr)
: "r10", "r11", "r12", "p0", "p1", "memory"
@@ -62,7 +62,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr)
"1: R12 = memw_locked(R10);\n"
" { P0 = tstbit(R12,R11); R12 = setbit(R12,R11); }\n"
" memw_locked(R10,P1) = R12;\n"
- " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
+ " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
: "=&r" (oldval)
: "r" (addr), "r" (nr)
: "r10", "r11", "r12", "p0", "p1", "memory"
@@ -88,7 +88,7 @@ static inline int test_and_change_bit(int nr, volatile void *addr)
"1: R12 = memw_locked(R10);\n"
" { P0 = tstbit(R12,R11); R12 = togglebit(R12,R11); }\n"
" memw_locked(R10,P1) = R12;\n"
- " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
+ " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
: "=&r" (oldval)
: "r" (addr), "r" (nr)
: "r10", "r11", "r12", "p0", "p1", "memory"
@@ -223,7 +223,7 @@ static inline int ffs(int x)
int r;

asm("{ P0 = cmp.eq(%1,#0); %0 = ct0(%1);}\n"
- "{ if P0 %0 = #0; if !P0 %0 = add(%0,#1);}\n"
+ "{ if (P0) %0 = #0; if (!P0) %0 = add(%0,#1);}\n"
: "=&r" (r)
: "r" (x)
: "p0");
diff --git a/arch/hexagon/include/asm/cmpxchg.h b/arch/hexagon/include/asm/cmpxchg.h
index 6091322c3af9..92b8a02e588a 100644
--- a/arch/hexagon/include/asm/cmpxchg.h
+++ b/arch/hexagon/include/asm/cmpxchg.h
@@ -30,7 +30,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
__asm__ __volatile__ (
"1: %0 = memw_locked(%1);\n" /* load into retval */
" memw_locked(%1,P0) = %2;\n" /* store into memory */
- " if !P0 jump 1b;\n"
+ " if (!P0) jump 1b;\n"
: "=&r" (retval)
: "r" (ptr), "r" (x)
: "memory", "p0"
diff --git a/arch/hexagon/include/asm/futex.h b/arch/hexagon/include/asm/futex.h
index cb635216a732..0191f7c7193e 100644
--- a/arch/hexagon/include/asm/futex.h
+++ b/arch/hexagon/include/asm/futex.h
@@ -16,7 +16,7 @@
/* For example: %1 = %4 */ \
insn \
"2: memw_locked(%3,p2) = %1;\n" \
- " if !p2 jump 1b;\n" \
+ " if (!p2) jump 1b;\n" \
" %1 = #0;\n" \
"3:\n" \
".section .fixup,\"ax\"\n" \
@@ -84,10 +84,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
"1: %1 = memw_locked(%3)\n"
" {\n"
" p2 = cmp.eq(%1,%4)\n"
- " if !p2.new jump:NT 3f\n"
+ " if (!p2.new) jump:NT 3f\n"
" }\n"
"2: memw_locked(%3,p2) = %5\n"
- " if !p2 jump 1b\n"
+ " if (!p2) jump 1b\n"
"3:\n"
".section .fixup,\"ax\"\n"
"4: %0 = #%6\n"
diff --git a/arch/hexagon/include/asm/spinlock.h b/arch/hexagon/include/asm/spinlock.h
index bfe07d842ff3..ef103b73bec8 100644
--- a/arch/hexagon/include/asm/spinlock.h
+++ b/arch/hexagon/include/asm/spinlock.h
@@ -30,9 +30,9 @@ static inline void arch_read_lock(arch_rwlock_t *lock)
__asm__ __volatile__(
"1: R6 = memw_locked(%0);\n"
" { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
- " { if !P3 jump 1b; }\n"
+ " { if (!P3) jump 1b; }\n"
" memw_locked(%0,P3) = R6;\n"
- " { if !P3 jump 1b; }\n"
+ " { if (!P3) jump 1b; }\n"
:
: "r" (&lock->lock)
: "memory", "r6", "p3"
@@ -46,7 +46,7 @@ static inline void arch_read_unlock(arch_rwlock_t *lock)
"1: R6 = memw_locked(%0);\n"
" R6 = add(R6,#-1);\n"
" memw_locked(%0,P3) = R6\n"
- " if !P3 jump 1b;\n"
+ " if (!P3) jump 1b;\n"
:
: "r" (&lock->lock)
: "memory", "r6", "p3"
@@ -61,7 +61,7 @@ static inline int arch_read_trylock(arch_rwlock_t *lock)
__asm__ __volatile__(
" R6 = memw_locked(%1);\n"
" { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
- " { if !P3 jump 1f; }\n"
+ " { if (!P3) jump 1f; }\n"
" memw_locked(%1,P3) = R6;\n"
" { %0 = P3 }\n"
"1:\n"
@@ -78,9 +78,9 @@ static inline void arch_write_lock(arch_rwlock_t *lock)
__asm__ __volatile__(
"1: R6 = memw_locked(%0)\n"
" { P3 = cmp.eq(R6,#0); R6 = #-1;}\n"
- " { if !P3 jump 1b; }\n"
+ " { if (!P3) jump 1b; }\n"
" memw_locked(%0,P3) = R6;\n"
- " { if !P3 jump 1b; }\n"
+ " { if (!P3) jump 1b; }\n"
:
: "r" (&lock->lock)
: "memory", "r6", "p3"
@@ -94,7 +94,7 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
__asm__ __volatile__(
" R6 = memw_locked(%1)\n"
" { %0 = #0; P3 = cmp.eq(R6,#0); R6 = #-1;}\n"
- " { if !P3 jump 1f; }\n"
+ " { if (!P3) jump 1f; }\n"
" memw_locked(%1,P3) = R6;\n"
" %0 = P3;\n"
"1:\n"
@@ -117,9 +117,9 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
__asm__ __volatile__(
"1: R6 = memw_locked(%0);\n"
" P3 = cmp.eq(R6,#0);\n"
- " { if !P3 jump 1b; R6 = #1; }\n"
+ " { if (!P3) jump 1b; R6 = #1; }\n"
" memw_locked(%0,P3) = R6;\n"
- " { if !P3 jump 1b; }\n"
+ " { if (!P3) jump 1b; }\n"
:
: "r" (&lock->lock)
: "memory", "r6", "p3"
@@ -139,7 +139,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
__asm__ __volatile__(
" R6 = memw_locked(%1);\n"
" P3 = cmp.eq(R6,#0);\n"
- " { if !P3 jump 1f; R6 = #1; %0 = #0; }\n"
+ " { if (!P3) jump 1f; R6 = #1; %0 = #0; }\n"
" memw_locked(%1,P3) = R6;\n"
" %0 = P3;\n"
"1:\n"
diff --git a/arch/hexagon/kernel/vm_entry.S b/arch/hexagon/kernel/vm_entry.S
index 65a1ea0eed2f..554371d92bed 100644
--- a/arch/hexagon/kernel/vm_entry.S
+++ b/arch/hexagon/kernel/vm_entry.S
@@ -369,7 +369,7 @@ ret_from_fork:
R26.L = #LO(do_work_pending);
R0 = #VM_INT_DISABLE;
}
- if P0 jump check_work_pending
+ if (P0) jump check_work_pending
{
R0 = R25;
callr R24
--
2.24.0.393.g34dc348eaf-goog

2019-12-09 22:32:16

by Nick Desaulniers

[permalink] [raw]
Subject: [PATCH 1/2] hexagon: define ioremap_uc

Similar to
commit 38e45d81d14e ("sparc64: implement ioremap_uc")
define ioremap_uc for hexagon to avoid errors from
-Wimplicit-function-definition.

Fixes: e537654b7039 ("lib: devres: add a helper function for ioremap_uc")
Link: https://github.com/ClangBuiltLinux/linux/issues/797
Suggested-by: Nathan Chancellor <[email protected]>
Signed-off-by: Nick Desaulniers <[email protected]>
---
arch/hexagon/include/asm/io.h | 1 +
1 file changed, 1 insertion(+)

diff --git a/arch/hexagon/include/asm/io.h b/arch/hexagon/include/asm/io.h
index 539e3efcf39c..b0dbc3473172 100644
--- a/arch/hexagon/include/asm/io.h
+++ b/arch/hexagon/include/asm/io.h
@@ -173,6 +173,7 @@ static inline void writel(u32 data, volatile void __iomem *addr)

void __iomem *ioremap(unsigned long phys_addr, unsigned long size);
#define ioremap_nocache ioremap
+#define ioremap_uc(X, Y) ioremap((X), (Y))


#define __raw_writel writel
--
2.24.0.393.g34dc348eaf-goog

2019-12-10 01:06:16

by Brian Cain

[permalink] [raw]
Subject: RE: [PATCH 2/2] hexagon: parenthesize registers in asm predicates

> -----Original Message-----
> From: Nick Desaulniers <[email protected]>
> Sent: Monday, December 9, 2019 4:30 PM
> To: [email protected]
> Cc: Nick Desaulniers <[email protected]>; [email protected];
> [email protected]; [email protected];
> [email protected]; [email protected];
> [email protected]; [email protected];
> [email protected]; [email protected]; [email protected];
> [email protected]; [email protected]; [email protected];
> [email protected]; [email protected]; [email protected];
> [email protected]; [email protected]; linux-
> [email protected]; Sid Manning <[email protected]>
> Subject: [PATCH 2/2] hexagon: parenthesize registers in asm predicates
>
> Hexagon requires that register predicates in assembly be parenthesized.
>
> Link: https://github.com/ClangBuiltLinux/linux/issues/754
> Suggested-by: Sid Manning <[email protected]>
> Signed-off-by: Nick Desaulniers <[email protected]>
> ---
> arch/hexagon/include/asm/atomic.h | 8 ++++----
> arch/hexagon/include/asm/bitops.h | 8 ++++----
> arch/hexagon/include/asm/cmpxchg.h | 2 +-
> arch/hexagon/include/asm/futex.h | 6 +++---
> arch/hexagon/include/asm/spinlock.h | 20 ++++++++++----------
> arch/hexagon/kernel/vm_entry.S | 2 +-
> 6 files changed, 23 insertions(+), 23 deletions(-)
>
> diff --git a/arch/hexagon/include/asm/atomic.h
> b/arch/hexagon/include/asm/atomic.h
> index 12cd9231c4b8..0231d69c8bf2 100644
> --- a/arch/hexagon/include/asm/atomic.h
> +++ b/arch/hexagon/include/asm/atomic.h
> @@ -91,7 +91,7 @@ static inline void atomic_##op(int i, atomic_t *v)
> \
> "1: %0 = memw_locked(%1);\n" \
> " %0 = "#op "(%0,%2);\n"
> \
> " memw_locked(%1,P3)=%0;\n" \
> - " if !P3 jump 1b;\n" \
> + " if (!P3) jump 1b;\n" \
> : "=&r" (output) \
> : "r" (&v->counter), "r" (i) \
> : "memory", "p3" \
> @@ -107,7 +107,7 @@ static inline int atomic_##op##_return(int i, atomic_t
> *v) \
> "1: %0 = memw_locked(%1);\n" \
> " %0 = "#op "(%0,%2);\n"
> \
> " memw_locked(%1,P3)=%0;\n" \
> - " if !P3 jump 1b;\n" \
> + " if (!P3) jump 1b;\n" \
> : "=&r" (output) \
> : "r" (&v->counter), "r" (i) \
> : "memory", "p3" \
> @@ -124,7 +124,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v)
> \
> "1: %0 = memw_locked(%2);\n" \
> " %1 = "#op "(%0,%3);\n"
> \
> " memw_locked(%2,P3)=%1;\n" \
> - " if !P3 jump 1b;\n" \
> + " if (!P3) jump 1b;\n" \
> : "=&r" (output), "=&r" (val) \
> : "r" (&v->counter), "r" (i) \
> : "memory", "p3" \
> @@ -173,7 +173,7 @@ static inline int atomic_fetch_add_unless(atomic_t
> *v, int a, int u)
> " }"
> " memw_locked(%2, p3) = %1;"
> " {"
> - " if !p3 jump 1b;"
> + " if (!p3) jump 1b;"
> " }"
> "2:"
> : "=&r" (__oldval), "=&r" (tmp)
> diff --git a/arch/hexagon/include/asm/bitops.h
> b/arch/hexagon/include/asm/bitops.h
> index 47384b094b94..71429f756af0 100644
> --- a/arch/hexagon/include/asm/bitops.h
> +++ b/arch/hexagon/include/asm/bitops.h
> @@ -38,7 +38,7 @@ static inline int test_and_clear_bit(int nr, volatile void
> *addr)
> "1: R12 = memw_locked(R10);\n"
> " { P0 = tstbit(R12,R11); R12 = clrbit(R12,R11); }\n"
> " memw_locked(R10,P1) = R12;\n"
> - " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
> + " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
> : "=&r" (oldval)
> : "r" (addr), "r" (nr)
> : "r10", "r11", "r12", "p0", "p1", "memory"
> @@ -62,7 +62,7 @@ static inline int test_and_set_bit(int nr, volatile void
> *addr)
> "1: R12 = memw_locked(R10);\n"
> " { P0 = tstbit(R12,R11); R12 = setbit(R12,R11); }\n"
> " memw_locked(R10,P1) = R12;\n"
> - " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
> + " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
> : "=&r" (oldval)
> : "r" (addr), "r" (nr)
> : "r10", "r11", "r12", "p0", "p1", "memory"
> @@ -88,7 +88,7 @@ static inline int test_and_change_bit(int nr, volatile void
> *addr)
> "1: R12 = memw_locked(R10);\n"
> " { P0 = tstbit(R12,R11); R12 = togglebit(R12,R11); }\n"
> " memw_locked(R10,P1) = R12;\n"
> - " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
> + " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
> : "=&r" (oldval)
> : "r" (addr), "r" (nr)
> : "r10", "r11", "r12", "p0", "p1", "memory"
> @@ -223,7 +223,7 @@ static inline int ffs(int x)
> int r;
>
> asm("{ P0 = cmp.eq(%1,#0); %0 = ct0(%1);}\n"
> - "{ if P0 %0 = #0; if !P0 %0 = add(%0,#1);}\n"
> + "{ if (P0) %0 = #0; if (!P0) %0 = add(%0,#1);}\n"
> : "=&r" (r)
> : "r" (x)
> : "p0");
> diff --git a/arch/hexagon/include/asm/cmpxchg.h
> b/arch/hexagon/include/asm/cmpxchg.h
> index 6091322c3af9..92b8a02e588a 100644
> --- a/arch/hexagon/include/asm/cmpxchg.h
> +++ b/arch/hexagon/include/asm/cmpxchg.h
> @@ -30,7 +30,7 @@ static inline unsigned long __xchg(unsigned long x,
> volatile void *ptr,
> __asm__ __volatile__ (
> "1: %0 = memw_locked(%1);\n" /* load into retval */
> " memw_locked(%1,P0) = %2;\n" /* store into memory */
> - " if !P0 jump 1b;\n"
> + " if (!P0) jump 1b;\n"
> : "=&r" (retval)
> : "r" (ptr), "r" (x)
> : "memory", "p0"
> diff --git a/arch/hexagon/include/asm/futex.h
> b/arch/hexagon/include/asm/futex.h
> index cb635216a732..0191f7c7193e 100644
> --- a/arch/hexagon/include/asm/futex.h
> +++ b/arch/hexagon/include/asm/futex.h
> @@ -16,7 +16,7 @@
> /* For example: %1 = %4 */ \
> insn \
> "2: memw_locked(%3,p2) = %1;\n" \
> - " if !p2 jump 1b;\n" \
> + " if (!p2) jump 1b;\n" \
> " %1 = #0;\n" \
> "3:\n" \
> ".section .fixup,\"ax\"\n" \
> @@ -84,10 +84,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32
> __user *uaddr, u32 oldval,
> "1: %1 = memw_locked(%3)\n"
> " {\n"
> " p2 = cmp.eq(%1,%4)\n"
> - " if !p2.new jump:NT 3f\n"
> + " if (!p2.new) jump:NT 3f\n"
> " }\n"
> "2: memw_locked(%3,p2) = %5\n"
> - " if !p2 jump 1b\n"
> + " if (!p2) jump 1b\n"
> "3:\n"
> ".section .fixup,\"ax\"\n"
> "4: %0 = #%6\n"
> diff --git a/arch/hexagon/include/asm/spinlock.h
> b/arch/hexagon/include/asm/spinlock.h
> index bfe07d842ff3..ef103b73bec8 100644
> --- a/arch/hexagon/include/asm/spinlock.h
> +++ b/arch/hexagon/include/asm/spinlock.h
> @@ -30,9 +30,9 @@ static inline void arch_read_lock(arch_rwlock_t *lock)
> __asm__ __volatile__(
> "1: R6 = memw_locked(%0);\n"
> " { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
> - " { if !P3 jump 1b; }\n"
> + " { if (!P3) jump 1b; }\n"
> " memw_locked(%0,P3) = R6;\n"
> - " { if !P3 jump 1b; }\n"
> + " { if (!P3) jump 1b; }\n"
> :
> : "r" (&lock->lock)
> : "memory", "r6", "p3"
> @@ -46,7 +46,7 @@ static inline void arch_read_unlock(arch_rwlock_t
> *lock)
> "1: R6 = memw_locked(%0);\n"
> " R6 = add(R6,#-1);\n"
> " memw_locked(%0,P3) = R6\n"
> - " if !P3 jump 1b;\n"
> + " if (!P3) jump 1b;\n"
> :
> : "r" (&lock->lock)
> : "memory", "r6", "p3"
> @@ -61,7 +61,7 @@ static inline int arch_read_trylock(arch_rwlock_t *lock)
> __asm__ __volatile__(
> " R6 = memw_locked(%1);\n"
> " { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
> - " { if !P3 jump 1f; }\n"
> + " { if (!P3) jump 1f; }\n"
> " memw_locked(%1,P3) = R6;\n"
> " { %0 = P3 }\n"
> "1:\n"
> @@ -78,9 +78,9 @@ static inline void arch_write_lock(arch_rwlock_t *lock)
> __asm__ __volatile__(
> "1: R6 = memw_locked(%0)\n"
> " { P3 = cmp.eq(R6,#0); R6 = #-1;}\n"
> - " { if !P3 jump 1b; }\n"
> + " { if (!P3) jump 1b; }\n"
> " memw_locked(%0,P3) = R6;\n"
> - " { if !P3 jump 1b; }\n"
> + " { if (!P3) jump 1b; }\n"
> :
> : "r" (&lock->lock)
> : "memory", "r6", "p3"
> @@ -94,7 +94,7 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
> __asm__ __volatile__(
> " R6 = memw_locked(%1)\n"
> " { %0 = #0; P3 = cmp.eq(R6,#0); R6 = #-1;}\n"
> - " { if !P3 jump 1f; }\n"
> + " { if (!P3) jump 1f; }\n"
> " memw_locked(%1,P3) = R6;\n"
> " %0 = P3;\n"
> "1:\n"
> @@ -117,9 +117,9 @@ static inline void arch_spin_lock(arch_spinlock_t
> *lock)
> __asm__ __volatile__(
> "1: R6 = memw_locked(%0);\n"
> " P3 = cmp.eq(R6,#0);\n"
> - " { if !P3 jump 1b; R6 = #1; }\n"
> + " { if (!P3) jump 1b; R6 = #1; }\n"
> " memw_locked(%0,P3) = R6;\n"
> - " { if !P3 jump 1b; }\n"
> + " { if (!P3) jump 1b; }\n"
> :
> : "r" (&lock->lock)
> : "memory", "r6", "p3"
> @@ -139,7 +139,7 @@ static inline unsigned int
> arch_spin_trylock(arch_spinlock_t *lock)
> __asm__ __volatile__(
> " R6 = memw_locked(%1);\n"
> " P3 = cmp.eq(R6,#0);\n"
> - " { if !P3 jump 1f; R6 = #1; %0 = #0; }\n"
> + " { if (!P3) jump 1f; R6 = #1; %0 = #0; }\n"
> " memw_locked(%1,P3) = R6;\n"
> " %0 = P3;\n"
> "1:\n"
> diff --git a/arch/hexagon/kernel/vm_entry.S
> b/arch/hexagon/kernel/vm_entry.S index 65a1ea0eed2f..554371d92bed
> 100644
> --- a/arch/hexagon/kernel/vm_entry.S
> +++ b/arch/hexagon/kernel/vm_entry.S
> @@ -369,7 +369,7 @@ ret_from_fork:
> R26.L = #LO(do_work_pending);
> R0 = #VM_INT_DISABLE;
> }
> - if P0 jump check_work_pending
> + if (P0) jump check_work_pending
> {
> R0 = R25;
> callr R24
> --
> 2.24.0.393.g34dc348eaf-goog

Acked-by: Brian Cain <[email protected]>


2019-12-10 01:06:22

by Brian Cain

[permalink] [raw]
Subject: RE: [PATCH 1/2] hexagon: define ioremap_uc

> -----Original Message-----
> From: [email protected] <linux-hexagon-
> [email protected]> On Behalf Of Nick Desaulniers
> Sent: Monday, December 9, 2019 4:30 PM
> To: [email protected]
> Cc: Nick Desaulniers <[email protected]>; [email protected];
> [email protected]; [email protected];
> [email protected]; [email protected];
> [email protected]; [email protected];
> [email protected]; [email protected]; [email protected];
> [email protected]; [email protected]; [email protected];
> [email protected]; [email protected]; [email protected];
> [email protected]; [email protected]; linux-
> [email protected]; Nathan Chancellor <[email protected]>
> Subject: [PATCH 1/2] hexagon: define ioremap_uc
>
> Similar to
> commit 38e45d81d14e ("sparc64: implement ioremap_uc") define
> ioremap_uc for hexagon to avoid errors from -Wimplicit-function-definition.
>
> Fixes: e537654b7039 ("lib: devres: add a helper function for ioremap_uc")
> Link: https://github.com/ClangBuiltLinux/linux/issues/797
> Suggested-by: Nathan Chancellor <[email protected]>
> Signed-off-by: Nick Desaulniers <[email protected]>
> ---
> arch/hexagon/include/asm/io.h | 1 +
> 1 file changed, 1 insertion(+)
>
> diff --git a/arch/hexagon/include/asm/io.h
> b/arch/hexagon/include/asm/io.h index 539e3efcf39c..b0dbc3473172
> 100644
> --- a/arch/hexagon/include/asm/io.h
> +++ b/arch/hexagon/include/asm/io.h
> @@ -173,6 +173,7 @@ static inline void writel(u32 data, volatile void
> __iomem *addr)
>
> void __iomem *ioremap(unsigned long phys_addr, unsigned long size);
> #define ioremap_nocache ioremap
> +#define ioremap_uc(X, Y) ioremap((X), (Y))
>
>
> #define __raw_writel writel
> --
> 2.24.0.393.g34dc348eaf-goog

Acked-by: Brian Cain <[email protected]>


2019-12-10 08:11:00

by Geert Uytterhoeven

[permalink] [raw]
Subject: Re: [PATCH 1/2] hexagon: define ioremap_uc

On Mon, Dec 9, 2019 at 11:30 PM Nick Desaulniers
<[email protected]> wrote:
> Similar to
> commit 38e45d81d14e ("sparc64: implement ioremap_uc")
> define ioremap_uc for hexagon to avoid errors from
> -Wimplicit-function-definition.
>
> Fixes: e537654b7039 ("lib: devres: add a helper function for ioremap_uc")
> Link: https://github.com/ClangBuiltLinux/linux/issues/797
> Suggested-by: Nathan Chancellor <[email protected]>
> Signed-off-by: Nick Desaulniers <[email protected]>
> ---
> arch/hexagon/include/asm/io.h | 1 +
> 1 file changed, 1 insertion(+)
>
> diff --git a/arch/hexagon/include/asm/io.h b/arch/hexagon/include/asm/io.h
> index 539e3efcf39c..b0dbc3473172 100644
> --- a/arch/hexagon/include/asm/io.h
> +++ b/arch/hexagon/include/asm/io.h
> @@ -173,6 +173,7 @@ static inline void writel(u32 data, volatile void __iomem *addr)
>
> void __iomem *ioremap(unsigned long phys_addr, unsigned long size);
> #define ioremap_nocache ioremap
> +#define ioremap_uc(X, Y) ioremap((X), (Y))

Do we really need this? There is only one user of ioremap_uc(), which
Christoph is trying hard to get rid of, and the new devres helper that
triggers all of this :-(
https://lore.kernel.org/dri-devel/[email protected]/

Gr{oetje,eeting}s,

Geert

--
Geert Uytterhoeven -- There's lots of Linux beyond ia32 -- [email protected]

In personal conversations with technical people, I call myself a hacker. But
when I'm talking to journalists I just say "programmer" or something like that.
-- Linus Torvalds