2019-11-27 18:46:26

by Pasha Tatashin

[permalink] [raw]
Subject: [PATCH 0/3] Use C inlines for uaccess

Changelog
v3:
- Added Acked-by from Stefano Stabellini
- Addressed comments from Mark Rutland
v2:
- Addressed Russell King's concern by not adding
uaccess_* to ARM.
- Removed the accidental change to xtensa

Convert the remaining uaccess_* calls from ASM macros to C inlines.

These patches apply against linux-next. I boot tested ARM64, and
compile tested ARM change
Pavel Tatashin (3):
arm/arm64/xen: use C inlines for privcmd_call
arm64: remove uaccess_ttbr0 asm macros from cache functions
arm64: remove the rest of asm-uaccess.h

arch/arm/include/asm/assembler.h | 2 +-
arch/arm/include/asm/xen/hypercall.h | 10 +++++
arch/arm/xen/enlighten.c | 2 +-
arch/arm/xen/hypercall.S | 4 +-
arch/arm64/include/asm/asm-uaccess.h | 61 --------------------------
arch/arm64/include/asm/cacheflush.h | 39 ++++++++++++++--
arch/arm64/include/asm/xen/hypercall.h | 28 ++++++++++++
arch/arm64/kernel/entry.S | 27 +++++++++++-
arch/arm64/lib/clear_user.S | 2 +-
arch/arm64/lib/copy_from_user.S | 2 +-
arch/arm64/lib/copy_in_user.S | 2 +-
arch/arm64/lib/copy_to_user.S | 2 +-
arch/arm64/mm/cache.S | 42 ++++++------------
arch/arm64/mm/flush.c | 2 +-
arch/arm64/xen/hypercall.S | 19 +-------
include/xen/arm/hypercall.h | 12 ++---
16 files changed, 130 insertions(+), 126 deletions(-)
delete mode 100644 arch/arm64/include/asm/asm-uaccess.h

--
2.24.0


2019-11-27 18:46:32

by Pasha Tatashin

[permalink] [raw]
Subject: [PATCH 2/3] arm64: remove uaccess_ttbr0 asm macros from cache functions

We currently duplicate the logic to enable/disable uaccess via TTBR0,
with C functions and assembly macros. This is a maintenenace burden
and is liable to lead to subtle bugs, so let's get rid of the assembly
macros, and always use the C functions. This requires refactoring
some assembly functions to have a C wrapper.

Signed-off-by: Pavel Tatashin <[email protected]>
---
arch/arm64/include/asm/asm-uaccess.h | 22 ---------------
arch/arm64/include/asm/cacheflush.h | 39 ++++++++++++++++++++++++--
arch/arm64/mm/cache.S | 41 +++++++++-------------------
arch/arm64/mm/flush.c | 2 +-
4 files changed, 50 insertions(+), 54 deletions(-)

diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
index f68a0e64482a..fba2a69f7fef 100644
--- a/arch/arm64/include/asm/asm-uaccess.h
+++ b/arch/arm64/include/asm/asm-uaccess.h
@@ -34,28 +34,6 @@
msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1
isb
.endm
-
- .macro uaccess_ttbr0_disable, tmp1, tmp2
-alternative_if_not ARM64_HAS_PAN
- save_and_disable_irq \tmp2 // avoid preemption
- __uaccess_ttbr0_disable \tmp1
- restore_irq \tmp2
-alternative_else_nop_endif
- .endm
-
- .macro uaccess_ttbr0_enable, tmp1, tmp2, tmp3
-alternative_if_not ARM64_HAS_PAN
- save_and_disable_irq \tmp3 // avoid preemption
- __uaccess_ttbr0_enable \tmp1, \tmp2
- restore_irq \tmp3
-alternative_else_nop_endif
- .endm
-#else
- .macro uaccess_ttbr0_disable, tmp1, tmp2
- .endm
-
- .macro uaccess_ttbr0_enable, tmp1, tmp2, tmp3
- .endm
#endif

#endif
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 665c78e0665a..c0b265e12d9d 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -61,16 +61,49 @@
* - kaddr - page address
* - size - region size
*/
-extern void __flush_icache_range(unsigned long start, unsigned long end);
-extern int invalidate_icache_range(unsigned long start, unsigned long end);
+extern void __asm_flush_icache_range(unsigned long start, unsigned long end);
+extern long __asm_flush_cache_user_range(unsigned long start,
+ unsigned long end);
+extern int asm_invalidate_icache_range(unsigned long start,
+ unsigned long end);
extern void __flush_dcache_area(void *addr, size_t len);
extern void __inval_dcache_area(void *addr, size_t len);
extern void __clean_dcache_area_poc(void *addr, size_t len);
extern void __clean_dcache_area_pop(void *addr, size_t len);
extern void __clean_dcache_area_pou(void *addr, size_t len);
-extern long __flush_cache_user_range(unsigned long start, unsigned long end);
extern void sync_icache_aliases(void *kaddr, unsigned long len);

+static inline void __flush_icache_range(unsigned long start, unsigned long end)
+{
+ uaccess_ttbr0_enable();
+ __asm_flush_icache_range(start, end);
+ uaccess_ttbr0_disable();
+}
+
+static inline void __flush_cache_user_range(unsigned long start,
+ unsigned long end)
+{
+ uaccess_ttbr0_enable();
+ __asm_flush_cache_user_range(start, end);
+ uaccess_ttbr0_disable();
+}
+
+static inline int invalidate_icache_range(unsigned long start,
+ unsigned long end)
+{
+ int rv;
+
+ if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC)) {
+ isb();
+ return 0;
+ }
+ uaccess_ttbr0_enable();
+ rv = asm_invalidate_icache_range(start, end);
+ uaccess_ttbr0_disable();
+
+ return rv;
+}
+
static inline void flush_icache_range(unsigned long start, unsigned long end)
{
__flush_icache_range(start, end);
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index db767b072601..a48b6dba304e 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -15,7 +15,7 @@
#include <asm/asm-uaccess.h>

/*
- * flush_icache_range(start,end)
+ * __asm_flush_icache_range(start,end)
*
* Ensure that the I and D caches are coherent within specified region.
* This is typically used when code has been written to a memory region,
@@ -24,11 +24,11 @@
* - start - virtual start address of region
* - end - virtual end address of region
*/
-ENTRY(__flush_icache_range)
+ENTRY(__asm_flush_icache_range)
/* FALLTHROUGH */

/*
- * __flush_cache_user_range(start,end)
+ * __asm_flush_cache_user_range(start,end)
*
* Ensure that the I and D caches are coherent within specified region.
* This is typically used when code has been written to a memory region,
@@ -37,8 +37,7 @@ ENTRY(__flush_icache_range)
* - start - virtual start address of region
* - end - virtual end address of region
*/
-ENTRY(__flush_cache_user_range)
- uaccess_ttbr0_enable x2, x3, x4
+ENTRY(__asm_flush_cache_user_range)
alternative_if ARM64_HAS_CACHE_IDC
dsb ishst
b 7f
@@ -60,41 +59,27 @@ alternative_if ARM64_HAS_CACHE_DIC
alternative_else_nop_endif
invalidate_icache_by_line x0, x1, x2, x3, 9f
8: mov x0, #0
-1:
- uaccess_ttbr0_disable x1, x2
- ret
-9:
- mov x0, #-EFAULT
+1: ret
+9: mov x0, #-EFAULT
b 1b
-ENDPROC(__flush_icache_range)
-ENDPROC(__flush_cache_user_range)
+ENDPROC(__asm_flush_icache_range)
+ENDPROC(__asm_flush_cache_user_range)

/*
- * invalidate_icache_range(start,end)
+ * asm_invalidate_icache_range(start,end)
*
* Ensure that the I cache is invalid within specified region.
*
* - start - virtual start address of region
* - end - virtual end address of region
*/
-ENTRY(invalidate_icache_range)
-alternative_if ARM64_HAS_CACHE_DIC
- mov x0, xzr
- isb
- ret
-alternative_else_nop_endif
-
- uaccess_ttbr0_enable x2, x3, x4
-
+ENTRY(asm_invalidate_icache_range)
invalidate_icache_by_line x0, x1, x2, x3, 2f
mov x0, xzr
-1:
- uaccess_ttbr0_disable x1, x2
- ret
-2:
- mov x0, #-EFAULT
+1: ret
+2: mov x0, #-EFAULT
b 1b
-ENDPROC(invalidate_icache_range)
+ENDPROC(asm_invalidate_icache_range)

/*
* __flush_dcache_area(kaddr, size)
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index ac485163a4a7..b23f34d23f31 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -75,7 +75,7 @@ EXPORT_SYMBOL(flush_dcache_page);
/*
* Additional functions defined in assembly.
*/
-EXPORT_SYMBOL(__flush_icache_range);
+EXPORT_SYMBOL(__asm_flush_icache_range);

#ifdef CONFIG_ARCH_HAS_PMEM_API
void arch_wb_cache_pmem(void *addr, size_t size)
--
2.24.0

2019-11-27 18:46:38

by Pasha Tatashin

[permalink] [raw]
Subject: [PATCH 3/3] arm64: remove the rest of asm-uaccess.h

The __uaccess_ttbr0_disable and __uaccess_ttbr0_enable,
are the last two macros defined in asm-uaccess.h.

For now move them to entry.S where they are used. Eventually,
these macros should be replaced with C wrappers to reduce the
maintenance burden.

Also, once these macros are unified with the C counterparts, it
is a good idea to check that PAN is in correct state on every
enable/disable calls.

Signed-off-by: Pavel Tatashin <[email protected]>
---
arch/arm64/include/asm/asm-uaccess.h | 39 ----------------------------
arch/arm64/kernel/entry.S | 27 ++++++++++++++++++-
arch/arm64/lib/clear_user.S | 2 +-
arch/arm64/lib/copy_from_user.S | 2 +-
arch/arm64/lib/copy_in_user.S | 2 +-
arch/arm64/lib/copy_to_user.S | 2 +-
arch/arm64/mm/cache.S | 1 -
7 files changed, 30 insertions(+), 45 deletions(-)
delete mode 100644 arch/arm64/include/asm/asm-uaccess.h

diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
deleted file mode 100644
index fba2a69f7fef..000000000000
--- a/arch/arm64/include/asm/asm-uaccess.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_ASM_UACCESS_H
-#define __ASM_ASM_UACCESS_H
-
-#include <asm/alternative.h>
-#include <asm/kernel-pgtable.h>
-#include <asm/mmu.h>
-#include <asm/sysreg.h>
-#include <asm/assembler.h>
-
-/*
- * User access enabling/disabling macros.
- */
-#ifdef CONFIG_ARM64_SW_TTBR0_PAN
- .macro __uaccess_ttbr0_disable, tmp1
- mrs \tmp1, ttbr1_el1 // swapper_pg_dir
- bic \tmp1, \tmp1, #TTBR_ASID_MASK
- sub \tmp1, \tmp1, #RESERVED_TTBR0_SIZE // reserved_ttbr0 just before swapper_pg_dir
- msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
- isb
- add \tmp1, \tmp1, #RESERVED_TTBR0_SIZE
- msr ttbr1_el1, \tmp1 // set reserved ASID
- isb
- .endm
-
- .macro __uaccess_ttbr0_enable, tmp1, tmp2
- get_current_task \tmp1
- ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1
- mrs \tmp2, ttbr1_el1
- extr \tmp2, \tmp2, \tmp1, #48
- ror \tmp2, \tmp2, #16
- msr ttbr1_el1, \tmp2 // set the active ASID
- isb
- msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1
- isb
- .endm
-#endif
-
-#endif
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 583f71abbe98..446d90ab31af 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -22,8 +22,8 @@
#include <asm/mmu.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
+#include <asm/kernel-pgtable.h>
#include <asm/thread_info.h>
-#include <asm/asm-uaccess.h>
#include <asm/unistd.h>

/*
@@ -143,6 +143,31 @@ alternative_cb_end
#endif
.endm

+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+ .macro __uaccess_ttbr0_disable, tmp1
+ mrs \tmp1, ttbr1_el1 // swapper_pg_dir
+ bic \tmp1, \tmp1, #TTBR_ASID_MASK
+ sub \tmp1, \tmp1, #RESERVED_TTBR0_SIZE // reserved_ttbr0 just before swapper_pg_dir
+ msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
+ isb
+ add \tmp1, \tmp1, #RESERVED_TTBR0_SIZE
+ msr ttbr1_el1, \tmp1 // set reserved ASID
+ isb
+ .endm
+
+ .macro __uaccess_ttbr0_enable, tmp1, tmp2
+ get_current_task \tmp1
+ ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1
+ mrs \tmp2, ttbr1_el1
+ extr \tmp2, \tmp2, \tmp1, #48
+ ror \tmp2, \tmp2, #16
+ msr ttbr1_el1, \tmp2 // set the active ASID
+ isb
+ msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1
+ isb
+ .endm
+#endif
+
.macro kernel_entry, el, regsize = 64
.if \regsize == 32
mov w0, w0 // zero upper 32 bits of x0
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index aeafc03e961a..b0b4a86a09e2 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -6,7 +6,7 @@
*/
#include <linux/linkage.h>

-#include <asm/asm-uaccess.h>
+#include <asm/alternative.h>
#include <asm/assembler.h>

.text
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index ebb3c06cbb5d..142bc7505518 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -5,7 +5,7 @@

#include <linux/linkage.h>

-#include <asm/asm-uaccess.h>
+#include <asm/alternative.h>
#include <asm/assembler.h>
#include <asm/cache.h>

diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index 3d8153a1ebce..04dc48ca26f7 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -7,7 +7,7 @@

#include <linux/linkage.h>

-#include <asm/asm-uaccess.h>
+#include <asm/alternative.h>
#include <asm/assembler.h>
#include <asm/cache.h>

diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 357eae2c18eb..8f3218ae88ab 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -5,7 +5,7 @@

#include <linux/linkage.h>

-#include <asm/asm-uaccess.h>
+#include <asm/alternative.h>
#include <asm/assembler.h>
#include <asm/cache.h>

diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index a48b6dba304e..f7130c30d6e3 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -12,7 +12,6 @@
#include <asm/assembler.h>
#include <asm/cpufeature.h>
#include <asm/alternative.h>
-#include <asm/asm-uaccess.h>

/*
* __asm_flush_icache_range(start,end)
--
2.24.0

2019-11-27 18:46:44

by Pasha Tatashin

[permalink] [raw]
Subject: [PATCH 1/3] arm/arm64/xen: use C inlines for privcmd_call

privcmd_call requires to enable access to userspace for the
duration of the hypercall.

Currently, this is done via assembly macros. Change it to C
inlines instead.

Signed-off-by: Pavel Tatashin <[email protected]>
Acked-by: Stefano Stabellini <[email protected]>
---
arch/arm/include/asm/assembler.h | 2 +-
arch/arm/include/asm/xen/hypercall.h | 10 +++++++++
arch/arm/xen/enlighten.c | 2 +-
arch/arm/xen/hypercall.S | 4 ++--
arch/arm64/include/asm/xen/hypercall.h | 28 ++++++++++++++++++++++++++
arch/arm64/xen/hypercall.S | 19 ++---------------
include/xen/arm/hypercall.h | 12 +++++------
7 files changed, 50 insertions(+), 27 deletions(-)

diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 99929122dad7..8e9262a0f016 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -480,7 +480,7 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
.macro uaccess_disable, tmp, isb=1
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/*
- * Whenever we re-enter userspace, the domains should always be
+ * Whenever we re-enter kernel, the domains should always be
* set appropriately.
*/
mov \tmp, #DACR_UACCESS_DISABLE
diff --git a/arch/arm/include/asm/xen/hypercall.h b/arch/arm/include/asm/xen/hypercall.h
index 3522cbaed316..cac5bd9ef519 100644
--- a/arch/arm/include/asm/xen/hypercall.h
+++ b/arch/arm/include/asm/xen/hypercall.h
@@ -1 +1,11 @@
+#ifndef _ASM_ARM_XEN_HYPERCALL_H
+#define _ASM_ARM_XEN_HYPERCALL_H
#include <xen/arm/hypercall.h>
+
+static inline long privcmd_call(unsigned int call, unsigned long a1,
+ unsigned long a2, unsigned long a3,
+ unsigned long a4, unsigned long a5)
+{
+ return arch_privcmd_call(call, a1, a2, a3, a4, a5);
+}
+#endif /* _ASM_ARM_XEN_HYPERCALL_H */
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index dd6804a64f1a..e87280c6d25d 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -440,4 +440,4 @@ EXPORT_SYMBOL_GPL(HYPERVISOR_platform_op_raw);
EXPORT_SYMBOL_GPL(HYPERVISOR_multicall);
EXPORT_SYMBOL_GPL(HYPERVISOR_vm_assist);
EXPORT_SYMBOL_GPL(HYPERVISOR_dm_op);
-EXPORT_SYMBOL_GPL(privcmd_call);
+EXPORT_SYMBOL_GPL(arch_privcmd_call);
diff --git a/arch/arm/xen/hypercall.S b/arch/arm/xen/hypercall.S
index b11bba542fac..277078c7da49 100644
--- a/arch/arm/xen/hypercall.S
+++ b/arch/arm/xen/hypercall.S
@@ -94,7 +94,7 @@ HYPERCALL2(multicall);
HYPERCALL2(vm_assist);
HYPERCALL3(dm_op);

-ENTRY(privcmd_call)
+ENTRY(arch_privcmd_call)
stmdb sp!, {r4}
mov r12, r0
mov r0, r1
@@ -119,4 +119,4 @@ ENTRY(privcmd_call)

ldm sp!, {r4}
ret lr
-ENDPROC(privcmd_call);
+ENDPROC(arch_privcmd_call);
diff --git a/arch/arm64/include/asm/xen/hypercall.h b/arch/arm64/include/asm/xen/hypercall.h
index 3522cbaed316..1a74fb28607f 100644
--- a/arch/arm64/include/asm/xen/hypercall.h
+++ b/arch/arm64/include/asm/xen/hypercall.h
@@ -1 +1,29 @@
+#ifndef _ASM_ARM64_XEN_HYPERCALL_H
+#define _ASM_ARM64_XEN_HYPERCALL_H
#include <xen/arm/hypercall.h>
+#include <linux/uaccess.h>
+
+static inline long privcmd_call(unsigned int call, unsigned long a1,
+ unsigned long a2, unsigned long a3,
+ unsigned long a4, unsigned long a5)
+{
+ long rv;
+
+ /*
+ * Privcmd calls are issued by the userspace. The kernel needs to
+ * enable access to TTBR0_EL1 as the hypervisor would issue stage 1
+ * translations to user memory via AT instructions. Since AT
+ * instructions are not affected by the PAN bit (ARMv8.1), we only
+ * need the explicit uaccess_enable/disable if the TTBR0 PAN emulation
+ * is enabled (it implies that hardware UAO and PAN disabled).
+ */
+ uaccess_ttbr0_enable();
+ rv = arch_privcmd_call(call, a1, a2, a3, a4, a5);
+ /*
+ * Disable userspace access from kernel once the hyp call completed.
+ */
+ uaccess_ttbr0_disable();
+
+ return rv;
+}
+#endif /* _ASM_ARM64_XEN_HYPERCALL_H */
diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S
index c5f05c4a4d00..921611778d2a 100644
--- a/arch/arm64/xen/hypercall.S
+++ b/arch/arm64/xen/hypercall.S
@@ -49,7 +49,6 @@

#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/asm-uaccess.h>
#include <xen/interface/xen.h>


@@ -86,27 +85,13 @@ HYPERCALL2(multicall);
HYPERCALL2(vm_assist);
HYPERCALL3(dm_op);

-ENTRY(privcmd_call)
+ENTRY(arch_privcmd_call)
mov x16, x0
mov x0, x1
mov x1, x2
mov x2, x3
mov x3, x4
mov x4, x5
- /*
- * Privcmd calls are issued by the userspace. The kernel needs to
- * enable access to TTBR0_EL1 as the hypervisor would issue stage 1
- * translations to user memory via AT instructions. Since AT
- * instructions are not affected by the PAN bit (ARMv8.1), we only
- * need the explicit uaccess_enable/disable if the TTBR0 PAN emulation
- * is enabled (it implies that hardware UAO and PAN disabled).
- */
- uaccess_ttbr0_enable x6, x7, x8
hvc XEN_IMM
-
- /*
- * Disable userspace access from kernel once the hyp call completed.
- */
- uaccess_ttbr0_disable x6, x7
ret
-ENDPROC(privcmd_call);
+ENDPROC(arch_privcmd_call);
diff --git a/include/xen/arm/hypercall.h b/include/xen/arm/hypercall.h
index b40485e54d80..624c8ad7e42a 100644
--- a/include/xen/arm/hypercall.h
+++ b/include/xen/arm/hypercall.h
@@ -30,8 +30,8 @@
* IN THE SOFTWARE.
*/

-#ifndef _ASM_ARM_XEN_HYPERCALL_H
-#define _ASM_ARM_XEN_HYPERCALL_H
+#ifndef _ARM_XEN_HYPERCALL_H
+#define _ARM_XEN_HYPERCALL_H

#include <linux/bug.h>

@@ -41,9 +41,9 @@

struct xen_dm_op_buf;

-long privcmd_call(unsigned call, unsigned long a1,
- unsigned long a2, unsigned long a3,
- unsigned long a4, unsigned long a5);
+long arch_privcmd_call(unsigned int call, unsigned long a1,
+ unsigned long a2, unsigned long a3,
+ unsigned long a4, unsigned long a5);
int HYPERVISOR_xen_version(int cmd, void *arg);
int HYPERVISOR_console_io(int cmd, int count, char *str);
int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count);
@@ -88,4 +88,4 @@ MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
BUG();
}

-#endif /* _ASM_ARM_XEN_HYPERCALL_H */
+#endif /* _ARM_XEN_HYPERCALL_H */
--
2.24.0

2019-11-27 18:48:22

by Pasha Tatashin

[permalink] [raw]
Subject: Re: [PATCH 0/3] Use C inlines for uaccess

Sorry, forgot to set the subject prefix correctly. It should be: [PATCH v3 0/3].

On Wed, Nov 27, 2019 at 1:44 PM Pavel Tatashin
<[email protected]> wrote:
>
> Changelog
> v3:
> - Added Acked-by from Stefano Stabellini
> - Addressed comments from Mark Rutland
> v2:
> - Addressed Russell King's concern by not adding
> uaccess_* to ARM.
> - Removed the accidental change to xtensa
>
> Convert the remaining uaccess_* calls from ASM macros to C inlines.
>
> These patches apply against linux-next. I boot tested ARM64, and
> compile tested ARM change
> Pavel Tatashin (3):
> arm/arm64/xen: use C inlines for privcmd_call
> arm64: remove uaccess_ttbr0 asm macros from cache functions
> arm64: remove the rest of asm-uaccess.h
>
> arch/arm/include/asm/assembler.h | 2 +-
> arch/arm/include/asm/xen/hypercall.h | 10 +++++
> arch/arm/xen/enlighten.c | 2 +-
> arch/arm/xen/hypercall.S | 4 +-
> arch/arm64/include/asm/asm-uaccess.h | 61 --------------------------
> arch/arm64/include/asm/cacheflush.h | 39 ++++++++++++++--
> arch/arm64/include/asm/xen/hypercall.h | 28 ++++++++++++
> arch/arm64/kernel/entry.S | 27 +++++++++++-
> arch/arm64/lib/clear_user.S | 2 +-
> arch/arm64/lib/copy_from_user.S | 2 +-
> arch/arm64/lib/copy_in_user.S | 2 +-
> arch/arm64/lib/copy_to_user.S | 2 +-
> arch/arm64/mm/cache.S | 42 ++++++------------
> arch/arm64/mm/flush.c | 2 +-
> arch/arm64/xen/hypercall.S | 19 +-------
> include/xen/arm/hypercall.h | 12 ++---
> 16 files changed, 130 insertions(+), 126 deletions(-)
> delete mode 100644 arch/arm64/include/asm/asm-uaccess.h
>
> --
> 2.24.0
>

2019-11-28 14:54:16

by Mark Rutland

[permalink] [raw]
Subject: Re: [PATCH 2/3] arm64: remove uaccess_ttbr0 asm macros from cache functions

On Wed, Nov 27, 2019 at 01:44:52PM -0500, Pavel Tatashin wrote:
> We currently duplicate the logic to enable/disable uaccess via TTBR0,
> with C functions and assembly macros. This is a maintenenace burden
> and is liable to lead to subtle bugs, so let's get rid of the assembly
> macros, and always use the C functions. This requires refactoring
> some assembly functions to have a C wrapper.

[...]

> +static inline int invalidate_icache_range(unsigned long start,
> + unsigned long end)
> +{
> + int rv;

Please make this 'ret', for consistency with other arm64 code. We only
use 'rv' in one place where it's short for "Revision and Variant", and
'ret' is our usual name for a temporary variable used to hold a return
value.

> +
> + if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC)) {
> + isb();
> + return 0;
> + }
> + uaccess_ttbr0_enable();

Please place a newline between these two, for consistency with other
arm64 code.

> + rv = asm_invalidate_icache_range(start, end);
> + uaccess_ttbr0_disable();
> +
> + return rv;
> +}
> +
> static inline void flush_icache_range(unsigned long start, unsigned long end)
> {
> __flush_icache_range(start, end);
> diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
> index db767b072601..a48b6dba304e 100644
> --- a/arch/arm64/mm/cache.S
> +++ b/arch/arm64/mm/cache.S
> @@ -15,7 +15,7 @@
> #include <asm/asm-uaccess.h>
>
> /*
> - * flush_icache_range(start,end)
> + * __asm_flush_icache_range(start,end)
> *
> * Ensure that the I and D caches are coherent within specified region.
> * This is typically used when code has been written to a memory region,
> @@ -24,11 +24,11 @@
> * - start - virtual start address of region
> * - end - virtual end address of region
> */
> -ENTRY(__flush_icache_range)
> +ENTRY(__asm_flush_icache_range)
> /* FALLTHROUGH */
>
> /*
> - * __flush_cache_user_range(start,end)
> + * __asm_flush_cache_user_range(start,end)
> *
> * Ensure that the I and D caches are coherent within specified region.
> * This is typically used when code has been written to a memory region,
> @@ -37,8 +37,7 @@ ENTRY(__flush_icache_range)
> * - start - virtual start address of region
> * - end - virtual end address of region
> */
> -ENTRY(__flush_cache_user_range)
> - uaccess_ttbr0_enable x2, x3, x4
> +ENTRY(__asm_flush_cache_user_range)
> alternative_if ARM64_HAS_CACHE_IDC

It's unfortunate that we pulled the IDC alternative out for
invalidate_icache_range(), but not here.

If we want to pull out that, then I reckon what we might want to do is
have two asm primitives:

* __asm_clean_dcache_range
* __asm_invalidate_icache_range

... which just do the by_line op, with a fixup that can return -EFAULT.

Then we can give each a C wrapper that just does the IDC/DIC check, e.g.

static int __clean_dcache_range(unsigned long start, unsigned long end)
{
if (cpus_have_const_cap(ARM64_HAS_CACHE_IDC)) {
dsb(ishst);
return 0;
}

return __asm_clean_dcache_range(start, end);
}

Then we can build all the more complicated variants atop of those, e.g.

static int __flush_cache_user_range(unsigned long start, unsigned long end)
{
int ret;

uaccess_ttbr0_enable();

ret = __clean_dcache_range(start, end);
if (ret)
goto out;

ret = __invalidate_icache_range(start, end);

out:
uaccess_ttbr0_disable();
return ret;
}

Thanks,
Mark.

2019-11-29 15:11:19

by Julien Grall

[permalink] [raw]
Subject: Re: [PATCH 1/3] arm/arm64/xen: use C inlines for privcmd_call

Hi,

On 27/11/2019 18:44, Pavel Tatashin wrote:
> diff --git a/arch/arm64/include/asm/xen/hypercall.h b/arch/arm64/include/asm/xen/hypercall.h
> index 3522cbaed316..1a74fb28607f 100644
> --- a/arch/arm64/include/asm/xen/hypercall.h
> +++ b/arch/arm64/include/asm/xen/hypercall.h
> @@ -1 +1,29 @@
> +#ifndef _ASM_ARM64_XEN_HYPERCALL_H
> +#define _ASM_ARM64_XEN_HYPERCALL_H
> #include <xen/arm/hypercall.h>
> +#include <linux/uaccess.h>
> +
> +static inline long privcmd_call(unsigned int call, unsigned long a1,
> + unsigned long a2, unsigned long a3,
> + unsigned long a4, unsigned long a5)

I realize that privcmd_call is the only hypercall using Software PAN at
the moment. However, dm_op needs the same as hypercall will be issued
from userspace as well.

So I was wondering whether we should create a generic function (e.g.
do_xen_hypercall() or do_xen_user_hypercall()) to cover the two hypercalls?

> diff --git a/include/xen/arm/hypercall.h b/include/xen/arm/hypercall.h
> index b40485e54d80..624c8ad7e42a 100644
> --- a/include/xen/arm/hypercall.h
> +++ b/include/xen/arm/hypercall.h
> @@ -30,8 +30,8 @@
> * IN THE SOFTWARE.
> */
>
> -#ifndef _ASM_ARM_XEN_HYPERCALL_H
> -#define _ASM_ARM_XEN_HYPERCALL_H
> +#ifndef _ARM_XEN_HYPERCALL_H
> +#define _ARM_XEN_HYPERCALL_H

This change feels a bit out of context. Could you split it in a separate
patch?

>
> #include <linux/bug.h>
>
> @@ -41,9 +41,9 @@
>
> struct xen_dm_op_buf;
>
> -long privcmd_call(unsigned call, unsigned long a1,
> - unsigned long a2, unsigned long a3,
> - unsigned long a4, unsigned long a5);
> +long arch_privcmd_call(unsigned int call, unsigned long a1,
> + unsigned long a2, unsigned long a3,
> + unsigned long a4, unsigned long a5);
> int HYPERVISOR_xen_version(int cmd, void *arg);
> int HYPERVISOR_console_io(int cmd, int count, char *str);
> int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count);
> @@ -88,4 +88,4 @@ MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
> BUG();
> }
>
> -#endif /* _ASM_ARM_XEN_HYPERCALL_H */
> +#endif /* _ARM_XEN_HYPERCALL_H */
>

Cheers,

--
Julien Grall

2019-11-29 15:19:11

by Andrew Cooper

[permalink] [raw]
Subject: Re: [Xen-devel] [PATCH 1/3] arm/arm64/xen: use C inlines for privcmd_call

On 29/11/2019 15:05, Julien Grall wrote:
> Hi,
>
> On 27/11/2019 18:44, Pavel Tatashin wrote:
>> diff --git a/arch/arm64/include/asm/xen/hypercall.h
>> b/arch/arm64/include/asm/xen/hypercall.h
>> index 3522cbaed316..1a74fb28607f 100644
>> --- a/arch/arm64/include/asm/xen/hypercall.h
>> +++ b/arch/arm64/include/asm/xen/hypercall.h
>> @@ -1 +1,29 @@
>> +#ifndef _ASM_ARM64_XEN_HYPERCALL_H
>> +#define _ASM_ARM64_XEN_HYPERCALL_H
>>   #include <xen/arm/hypercall.h>
>> +#include <linux/uaccess.h>
>> +
>> +static inline long privcmd_call(unsigned int call, unsigned long a1,
>> +                unsigned long a2, unsigned long a3,
>> +                unsigned long a4, unsigned long a5)
>
> I realize that privcmd_call is the only hypercall using Software PAN
> at the moment. However, dm_op needs the same as hypercall will be
> issued from userspace as well.

And dm_op() won't be the only example as we continue in cleaning up the
gaping hole that is privcmd.

> So I was wondering whether we should create a generic function (e.g.
> do_xen_hypercall() or do_xen_user_hypercall()) to cover the two
> hypercalls?

Probably a good idea.

~Andrew

2019-12-04 17:57:16

by Pasha Tatashin

[permalink] [raw]
Subject: Re: [Xen-devel] [PATCH 1/3] arm/arm64/xen: use C inlines for privcmd_call

On Fri, Nov 29, 2019 at 10:10 AM Andrew Cooper
<[email protected]> wrote:
>
> On 29/11/2019 15:05, Julien Grall wrote:
> > Hi,
> >
> > On 27/11/2019 18:44, Pavel Tatashin wrote:
> >> diff --git a/arch/arm64/include/asm/xen/hypercall.h
> >> b/arch/arm64/include/asm/xen/hypercall.h
> >> index 3522cbaed316..1a74fb28607f 100644
> >> --- a/arch/arm64/include/asm/xen/hypercall.h
> >> +++ b/arch/arm64/include/asm/xen/hypercall.h
> >> @@ -1 +1,29 @@
> >> +#ifndef _ASM_ARM64_XEN_HYPERCALL_H
> >> +#define _ASM_ARM64_XEN_HYPERCALL_H
> >> #include <xen/arm/hypercall.h>
> >> +#include <linux/uaccess.h>
> >> +
> >> +static inline long privcmd_call(unsigned int call, unsigned long a1,
> >> + unsigned long a2, unsigned long a3,
> >> + unsigned long a4, unsigned long a5)
> >
> > I realize that privcmd_call is the only hypercall using Software PAN
> > at the moment. However, dm_op needs the same as hypercall will be
> > issued from userspace as well.
>
> And dm_op() won't be the only example as we continue in cleaning up the
> gaping hole that is privcmd.
>
> > So I was wondering whether we should create a generic function (e.g.
> > do_xen_hypercall() or do_xen_user_hypercall()) to cover the two
> > hypercalls?
>
> Probably a good idea.

It sounds good to me, but let's keep it outside of this series.

Thank you,
Pasha

2019-12-04 17:59:43

by Pasha Tatashin

[permalink] [raw]
Subject: Re: [PATCH 1/3] arm/arm64/xen: use C inlines for privcmd_call

On Fri, Nov 29, 2019 at 10:05 AM Julien Grall <[email protected]> wrote:
>
> Hi,
>
> On 27/11/2019 18:44, Pavel Tatashin wrote:
> > diff --git a/arch/arm64/include/asm/xen/hypercall.h b/arch/arm64/include/asm/xen/hypercall.h
> > index 3522cbaed316..1a74fb28607f 100644
> > --- a/arch/arm64/include/asm/xen/hypercall.h
> > +++ b/arch/arm64/include/asm/xen/hypercall.h
> > @@ -1 +1,29 @@
> > +#ifndef _ASM_ARM64_XEN_HYPERCALL_H
> > +#define _ASM_ARM64_XEN_HYPERCALL_H
> > #include <xen/arm/hypercall.h>
> > +#include <linux/uaccess.h>
> > +
> > +static inline long privcmd_call(unsigned int call, unsigned long a1,
> > + unsigned long a2, unsigned long a3,
> > + unsigned long a4, unsigned long a5)
>
> I realize that privcmd_call is the only hypercall using Software PAN at
> the moment. However, dm_op needs the same as hypercall will be issued
> from userspace as well.

The clean-up I am working on now is specific to moving current PAN
useage to C wraps. Once dm_op requires to use PAN it will need to be
used the C variants, because ASM versions are going to be removed by
this series.

>
> So I was wondering whether we should create a generic function (e.g.
> do_xen_hypercall() or do_xen_user_hypercall()) to cover the two hypercalls?
>
> > diff --git a/include/xen/arm/hypercall.h b/include/xen/arm/hypercall.h
> > index b40485e54d80..624c8ad7e42a 100644
> > --- a/include/xen/arm/hypercall.h
> > +++ b/include/xen/arm/hypercall.h
> > @@ -30,8 +30,8 @@
> > * IN THE SOFTWARE.
> > */
> >
> > -#ifndef _ASM_ARM_XEN_HYPERCALL_H
> > -#define _ASM_ARM_XEN_HYPERCALL_H
> > +#ifndef _ARM_XEN_HYPERCALL_H
> > +#define _ARM_XEN_HYPERCALL_H
>
> This change feels a bit out of context. Could you split it in a separate
> patch?

Makes sense, I am splitting this into a separate patch.

Thank you,
Pasha

2019-12-04 20:52:33

by Pasha Tatashin

[permalink] [raw]
Subject: Re: [PATCH 2/3] arm64: remove uaccess_ttbr0 asm macros from cache functions

On Thu, Nov 28, 2019 at 9:51 AM Mark Rutland <[email protected]> wrote:
>
> On Wed, Nov 27, 2019 at 01:44:52PM -0500, Pavel Tatashin wrote:
> > We currently duplicate the logic to enable/disable uaccess via TTBR0,
> > with C functions and assembly macros. This is a maintenenace burden
> > and is liable to lead to subtle bugs, so let's get rid of the assembly
> > macros, and always use the C functions. This requires refactoring
> > some assembly functions to have a C wrapper.
>
> [...]
>
> > +static inline int invalidate_icache_range(unsigned long start,
> > + unsigned long end)
> > +{
> > + int rv;
>
> Please make this 'ret', for consistency with other arm64 code. We only
> use 'rv' in one place where it's short for "Revision and Variant", and
> 'ret' is our usual name for a temporary variable used to hold a return
> value.

OK

>
> > +
> > + if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC)) {
> > + isb();
> > + return 0;
> > + }
> > + uaccess_ttbr0_enable();
>
> Please place a newline between these two, for consistency with other
> arm64 code.

OK

>
> > + rv = asm_invalidate_icache_range(start, end);
> > + uaccess_ttbr0_disable();
> > +
> > + return rv;
> > +}
> > +
> > static inline void flush_icache_range(unsigned long start, unsigned long end)
> > {
> > __flush_icache_range(start, end);
> > diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
> > index db767b072601..a48b6dba304e 100644
> > --- a/arch/arm64/mm/cache.S
> > +++ b/arch/arm64/mm/cache.S
> > @@ -15,7 +15,7 @@
> > #include <asm/asm-uaccess.h>
> >
> > /*
> > - * flush_icache_range(start,end)
> > + * __asm_flush_icache_range(start,end)
> > *
> > * Ensure that the I and D caches are coherent within specified region.
> > * This is typically used when code has been written to a memory region,
> > @@ -24,11 +24,11 @@
> > * - start - virtual start address of region
> > * - end - virtual end address of region
> > */
> > -ENTRY(__flush_icache_range)
> > +ENTRY(__asm_flush_icache_range)
> > /* FALLTHROUGH */
> >
> > /*
> > - * __flush_cache_user_range(start,end)
> > + * __asm_flush_cache_user_range(start,end)
> > *
> > * Ensure that the I and D caches are coherent within specified region.
> > * This is typically used when code has been written to a memory region,
> > @@ -37,8 +37,7 @@ ENTRY(__flush_icache_range)
> > * - start - virtual start address of region
> > * - end - virtual end address of region
> > */
> > -ENTRY(__flush_cache_user_range)
> > - uaccess_ttbr0_enable x2, x3, x4
> > +ENTRY(__asm_flush_cache_user_range)
> > alternative_if ARM64_HAS_CACHE_IDC
>
> It's unfortunate that we pulled the IDC alternative out for
> invalidate_icache_range(), but not here.

Good point. I will fix that in a separate patch.

Thank you,
Pasha