2019-09-13 13:02:28

by Michal Suchánek

[permalink] [raw]
Subject: [PATCH v9 0/8] Disable compat cruft on ppc64le v9

Less code means less bugs so add a knob to skip the compat stuff.

This is tested on ppc64le top of

https://patchwork.ozlabs.org/patch/1153850/
https://patchwork.ozlabs.org/patch/1158412/

Changes in v2: saner CONFIG_COMPAT ifdefs
Changes in v3:
- change llseek to 32bit instead of builing it unconditionally in fs
- clanup the makefile conditionals
- remove some ifdefs or convert to IS_DEFINED where possible
Changes in v4:
- cleanup is_32bit_task and current_is_64bit
- more makefile cleanup
Changes in v5:
- more current_is_64bit cleanup
- split off callchain.c 32bit and 64bit parts
Changes in v6:
- cleanup makefile after split
- consolidate read_user_stack_32
- fix some checkpatch warnings
Changes in v7:
- add back __ARCH_WANT_SYS_LLSEEK to fix build with llseek
- remove leftover hunk
- add review tags
Changes in v8:
- consolidate valid_user_sp to fix it in the split callchain.c
- fix build errors/warnings with PPC64 !COMPAT and PPC32
Changes in v9:
- remove current_is_64bit()

Michal Suchanek (8):
powerpc: Add back __ARCH_WANT_SYS_LLSEEK macro
powerpc: move common register copy functions from signal_32.c to
signal.c
powerpc/perf: consolidate read_user_stack_32
powerpc/perf: consolidate valid_user_sp
powerpc/perf: remove current_is_64bit()
powerpc/64: make buildable without CONFIG_COMPAT
powerpc/64: Make COMPAT user-selectable disabled on littleendian by
default.
powerpc/perf: split callchain.c by bitness

arch/powerpc/Kconfig | 5 +-
arch/powerpc/include/asm/thread_info.h | 4 +-
arch/powerpc/include/asm/unistd.h | 1 +
arch/powerpc/kernel/Makefile | 7 +-
arch/powerpc/kernel/entry_64.S | 2 +
arch/powerpc/kernel/signal.c | 144 ++++++++-
arch/powerpc/kernel/signal_32.c | 140 ---------
arch/powerpc/kernel/syscall_64.c | 6 +-
arch/powerpc/kernel/vdso.c | 3 +-
arch/powerpc/perf/Makefile | 5 +-
arch/powerpc/perf/callchain.c | 387 +------------------------
arch/powerpc/perf/callchain.h | 25 ++
arch/powerpc/perf/callchain_32.c | 197 +++++++++++++
arch/powerpc/perf/callchain_64.c | 178 ++++++++++++
fs/read_write.c | 3 +-
15 files changed, 565 insertions(+), 542 deletions(-)
create mode 100644 arch/powerpc/perf/callchain.h
create mode 100644 arch/powerpc/perf/callchain_32.c
create mode 100644 arch/powerpc/perf/callchain_64.c

--
2.23.0


2019-09-13 14:38:21

by Michal Suchánek

[permalink] [raw]
Subject: [PATCH v9 3/8] powerpc/perf: consolidate read_user_stack_32

There are two almost identical copies for 32bit and 64bit.

The function is used only in 32bit code which will be split out in next
patch so consolidate to one function.

Signed-off-by: Michal Suchanek <[email protected]>
Reviewed-by: Christophe Leroy <[email protected]>
---
v6: new patch
v8: move the consolidated function out of the ifdef block.
---
arch/powerpc/perf/callchain.c | 59 +++++++++++++++--------------------
1 file changed, 25 insertions(+), 34 deletions(-)

diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
index c84bbd4298a0..d86bdbffda9e 100644
--- a/arch/powerpc/perf/callchain.c
+++ b/arch/powerpc/perf/callchain.c
@@ -165,22 +165,6 @@ static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
return read_user_stack_slow(ptr, ret, 8);
}

-static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
-{
- if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
- ((unsigned long)ptr & 3))
- return -EFAULT;
-
- pagefault_disable();
- if (!__get_user_inatomic(*ret, ptr)) {
- pagefault_enable();
- return 0;
- }
- pagefault_enable();
-
- return read_user_stack_slow(ptr, ret, 4);
-}
-
static inline int valid_user_sp(unsigned long sp, int is_64)
{
if (!sp || (sp & 7) || sp > (is_64 ? TASK_SIZE : 0x100000000UL) - 32)
@@ -295,25 +279,9 @@ static inline int current_is_64bit(void)
}

#else /* CONFIG_PPC64 */
-/*
- * On 32-bit we just access the address and let hash_page create a
- * HPTE if necessary, so there is no need to fall back to reading
- * the page tables. Since this is called at interrupt level,
- * do_page_fault() won't treat a DSI as a page fault.
- */
-static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
+static int read_user_stack_slow(void __user *ptr, void *buf, int nb)
{
- int rc;
-
- if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
- ((unsigned long)ptr & 3))
- return -EFAULT;
-
- pagefault_disable();
- rc = __get_user_inatomic(*ret, ptr);
- pagefault_enable();
-
- return rc;
+ return 0;
}

static inline void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
@@ -341,6 +309,29 @@ static inline int valid_user_sp(unsigned long sp, int is_64)

#endif /* CONFIG_PPC64 */

+/*
+ * On 32-bit we just access the address and let hash_page create a
+ * HPTE if necessary, so there is no need to fall back to reading
+ * the page tables. Since this is called at interrupt level,
+ * do_page_fault() won't treat a DSI as a page fault.
+ */
+static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
+{
+ int rc;
+
+ if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
+ ((unsigned long)ptr & 3))
+ return -EFAULT;
+
+ pagefault_disable();
+ rc = __get_user_inatomic(*ret, ptr);
+ pagefault_enable();
+
+ if (IS_ENABLED(CONFIG_PPC64) && rc)
+ return read_user_stack_slow(ptr, ret, 4);
+ return rc;
+}
+
/*
* Layout for non-RT signal frames
*/
--
2.23.0