There are two almost identical copies for 32bit and 64bit.
The function is used only in 32bit code which will be split out in next
patch so consolidate to one function.
Signed-off-by: Michal Suchanek <[email protected]>
---
new patch in v6
---
arch/powerpc/perf/callchain.c | 25 +++++++++----------------
1 file changed, 9 insertions(+), 16 deletions(-)
diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
index c84bbd4298a0..b7cdcce20280 100644
--- a/arch/powerpc/perf/callchain.c
+++ b/arch/powerpc/perf/callchain.c
@@ -165,22 +165,6 @@ static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
return read_user_stack_slow(ptr, ret, 8);
}
-static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
-{
- if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
- ((unsigned long)ptr & 3))
- return -EFAULT;
-
- pagefault_disable();
- if (!__get_user_inatomic(*ret, ptr)) {
- pagefault_enable();
- return 0;
- }
- pagefault_enable();
-
- return read_user_stack_slow(ptr, ret, 4);
-}
-
static inline int valid_user_sp(unsigned long sp, int is_64)
{
if (!sp || (sp & 7) || sp > (is_64 ? TASK_SIZE : 0x100000000UL) - 32)
@@ -295,6 +279,12 @@ static inline int current_is_64bit(void)
}
#else /* CONFIG_PPC64 */
+static int read_user_stack_slow(void __user *ptr, void *buf, int nb)
+{
+ return 0;
+}
+#endif /* CONFIG_PPC64 */
+
/*
* On 32-bit we just access the address and let hash_page create a
* HPTE if necessary, so there is no need to fall back to reading
@@ -313,9 +303,12 @@ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
rc = __get_user_inatomic(*ret, ptr);
pagefault_enable();
+ if (IS_ENABLED(CONFIG_PPC64) && rc)
+ return read_user_stack_slow(ptr, ret, 4);
return rc;
}
+#ifndef CONFIG_PPC64
static inline void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
--
2.22.0
Le 30/08/2019 à 20:57, Michal Suchanek a écrit :
> There are two almost identical copies for 32bit and 64bit.
>
> The function is used only in 32bit code which will be split out in next
> patch so consolidate to one function.
>
> Signed-off-by: Michal Suchanek <[email protected]>
Reviewed-by: [email protected]
> ---
> new patch in v6
> ---
> arch/powerpc/perf/callchain.c | 25 +++++++++----------------
> 1 file changed, 9 insertions(+), 16 deletions(-)
>
> diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
> index c84bbd4298a0..b7cdcce20280 100644
> --- a/arch/powerpc/perf/callchain.c
> +++ b/arch/powerpc/perf/callchain.c
> @@ -165,22 +165,6 @@ static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
> return read_user_stack_slow(ptr, ret, 8);
> }
>
> -static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
> -{
> - if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
> - ((unsigned long)ptr & 3))
> - return -EFAULT;
> -
> - pagefault_disable();
> - if (!__get_user_inatomic(*ret, ptr)) {
> - pagefault_enable();
> - return 0;
> - }
> - pagefault_enable();
> -
> - return read_user_stack_slow(ptr, ret, 4);
> -}
> -
> static inline int valid_user_sp(unsigned long sp, int is_64)
> {
> if (!sp || (sp & 7) || sp > (is_64 ? TASK_SIZE : 0x100000000UL) - 32)
> @@ -295,6 +279,12 @@ static inline int current_is_64bit(void)
> }
>
> #else /* CONFIG_PPC64 */
> +static int read_user_stack_slow(void __user *ptr, void *buf, int nb)
> +{
> + return 0;
> +}
> +#endif /* CONFIG_PPC64 */
> +
> /*
> * On 32-bit we just access the address and let hash_page create a
> * HPTE if necessary, so there is no need to fall back to reading
> @@ -313,9 +303,12 @@ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
> rc = __get_user_inatomic(*ret, ptr);
> pagefault_enable();
>
> + if (IS_ENABLED(CONFIG_PPC64) && rc)
> + return read_user_stack_slow(ptr, ret, 4);
> return rc;
> }
>
> +#ifndef CONFIG_PPC64
> static inline void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
> struct pt_regs *regs)
> {
>