2013-07-30 01:14:48

by Jed Davis

[permalink] [raw]
Subject: [PATCH 1/2] perf: Fix handling of arch_perf_out_copy_user return value.

All architectures except x86 use __copy_from_user_inatomic to provide
arch_perf_out_copy_user; like the other copy_from routines, it returns
the number of bytes not copied. perf was expecting the number of bytes
that had been copied. This change corrects that, and thereby allows
PERF_SAMPLE_STACK_USER to be enabled on non-x86 architectures.

x86 uses copy_from_user_nmi, which deviates from the other copy_from
routines by returning the number of bytes copied. (This cancels out
the effect of perf being backwards; apparently this code has only ever
been tested on x86.) This change therefore adds a second wrapper to
re-reverse it for perf; the next patch in this series will clean it up.

Signed-off-by: Jed Davis <[email protected]>
---
arch/x86/include/asm/perf_event.h | 9 ++++++++-
kernel/events/internal.h | 11 ++++++++++-
2 files changed, 18 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 8249df4..ddae5bd 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -274,6 +274,13 @@ static inline void perf_check_microcode(void) { }
static inline void amd_pmu_disable_virt(void) { }
#endif

-#define arch_perf_out_copy_user copy_from_user_nmi
+static inline unsigned long copy_from_user_nmi_for_perf(void *to,
+ const void __user *from,
+ unsigned long n)
+{
+ return n - copy_from_user_nmi(to, from, n);
+}
+
+#define arch_perf_out_copy_user copy_from_user_nmi_for_perf

#endif /* _ASM_X86_PERF_EVENT_H */
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index ca65997..e61b22c 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -81,6 +81,7 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
}

+/* The memcpy_func must return the number of bytes successfully copied. */
#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
static inline unsigned int \
func_name(struct perf_output_handle *handle, \
@@ -122,11 +123,19 @@ DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)

DEFINE_OUTPUT_COPY(__output_skip, MEMCPY_SKIP)

+/* arch_perf_out_copy_user must return the number of bytes not copied. */
#ifndef arch_perf_out_copy_user
#define arch_perf_out_copy_user __copy_from_user_inatomic
#endif

-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
+static inline unsigned long perf_memcpy_from_user(void *to,
+ const void __user *from,
+ unsigned long n)
+{
+ return n - arch_perf_out_copy_user(to, from, n);
+}
+
+DEFINE_OUTPUT_COPY(__output_copy_user, perf_memcpy_from_user)

/* Callchain handling */
extern struct perf_callchain_entry *
--
1.8.3.2


2013-07-30 01:14:55

by Jed Davis

[permalink] [raw]
Subject: [PATCH 2/2] x86: Fix copy_from_user_nmi return to match copy_from_user.

copy_from_user returns the number of bytes not copied. This change
makes copy_from_user_nmi behave the same way, instead of returning the
number of bytes that were copied, to help prevent bugs caused by this
surprising difference (and simplify callers, which mostly want to know
if the number of uncopied bytes is nonzero).

Signed-off-by: Jed Davis <[email protected]>
---
arch/x86/include/asm/perf_event.h | 9 +--------
arch/x86/kernel/cpu/perf_event.c | 8 ++------
arch/x86/kernel/cpu/perf_event_intel_ds.c | 6 ++----
arch/x86/kernel/cpu/perf_event_intel_lbr.c | 4 +---
arch/x86/lib/usercopy.c | 2 +-
arch/x86/oprofile/backtrace.c | 8 ++------
6 files changed, 9 insertions(+), 28 deletions(-)

diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index ddae5bd..8249df4 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -274,13 +274,6 @@ static inline void perf_check_microcode(void) { }
static inline void amd_pmu_disable_virt(void) { }
#endif

-static inline unsigned long copy_from_user_nmi_for_perf(void *to,
- const void __user *from,
- unsigned long n)
-{
- return n - copy_from_user_nmi(to, from, n);
-}
-
-#define arch_perf_out_copy_user copy_from_user_nmi_for_perf
+#define arch_perf_out_copy_user copy_from_user_nmi

#endif /* _ASM_X86_PERF_EVENT_H */
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index a7c7305..038c18c 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1983,12 +1983,10 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)

fp = compat_ptr(ss_base + regs->bp);
while (entry->nr < PERF_MAX_STACK_DEPTH) {
- unsigned long bytes;
frame.next_frame = 0;
frame.return_address = 0;

- bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
- if (bytes != sizeof(frame))
+ if (copy_from_user_nmi(&frame, fp, sizeof(frame)))
break;

if (!valid_user_frame(fp, sizeof(frame)))
@@ -2035,12 +2033,10 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
return;

while (entry->nr < PERF_MAX_STACK_DEPTH) {
- unsigned long bytes;
frame.next_frame = NULL;
frame.return_address = 0;

- bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
- if (bytes != sizeof(frame))
+ if (copy_from_user_nmi(&frame, fp, sizeof(frame)))
break;

if (!valid_user_frame(fp, sizeof(frame)))
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 3065c57..5208fe1 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -729,10 +729,8 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)

old_to = to;
if (!kernel_ip(ip)) {
- int bytes, size = MAX_INSN_SIZE;
-
- bytes = copy_from_user_nmi(buf, (void __user *)to, size);
- if (bytes != size)
+ if (copy_from_user_nmi(buf, (void __user *)to,
+ MAX_INSN_SIZE))
return 0;

kaddr = buf;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index d5be06a..2833514 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -449,7 +449,6 @@ static int branch_type(unsigned long from, unsigned long to, int abort)
{
struct insn insn;
void *addr;
- int bytes, size = MAX_INSN_SIZE;
int ret = X86_BR_NONE;
int ext, to_plm, from_plm;
u8 buf[MAX_INSN_SIZE];
@@ -477,8 +476,7 @@ static int branch_type(unsigned long from, unsigned long to, int abort)
return X86_BR_NONE;

/* may fail if text not present */
- bytes = copy_from_user_nmi(buf, (void __user *)from, size);
- if (bytes != size)
+ if (copy_from_user_nmi(buf, (void __user *)from, MAX_INSN_SIZE))
return X86_BR_NONE;

addr = buf;
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
index 4f74d94..7a13c98 100644
--- a/arch/x86/lib/usercopy.c
+++ b/arch/x86/lib/usercopy.c
@@ -44,6 +44,6 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)

} while (len < n);

- return len;
+ return n - len;
}
EXPORT_SYMBOL_GPL(copy_from_user_nmi);
diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
index d6aa6e8..e778d41 100644
--- a/arch/x86/oprofile/backtrace.c
+++ b/arch/x86/oprofile/backtrace.c
@@ -44,10 +44,8 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
/* Also check accessibility of one struct frame_head beyond: */
struct stack_frame_ia32 bufhead[2];
struct stack_frame_ia32 *fp;
- unsigned long bytes;

- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
- if (bytes != sizeof(bufhead))
+ if (copy_from_user_nmi(bufhead, head, sizeof(bufhead)))
return NULL;

fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
@@ -90,10 +88,8 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
{
/* Also check accessibility of one struct frame_head beyond: */
struct stack_frame bufhead[2];
- unsigned long bytes;

- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
- if (bytes != sizeof(bufhead))
+ if (copy_from_user_nmi(bufhead, head, sizeof(bufhead)))
return NULL;

oprofile_add_trace(bufhead[0].return_address);
--
1.8.3.2

2013-07-30 13:22:07

by Robert Richter

[permalink] [raw]
Subject: Re: [PATCH 1/2] perf: Fix handling of arch_perf_out_copy_user return value.

On 29.07.13 18:12:40, Jed Davis wrote:
> All architectures except x86 use __copy_from_user_inatomic to provide
> arch_perf_out_copy_user; like the other copy_from routines, it returns
> the number of bytes not copied. perf was expecting the number of bytes
> that had been copied. This change corrects that, and thereby allows
> PERF_SAMPLE_STACK_USER to be enabled on non-x86 architectures.
>
> x86 uses copy_from_user_nmi, which deviates from the other copy_from
> routines by returning the number of bytes copied. (This cancels out
> the effect of perf being backwards; apparently this code has only ever
> been tested on x86.) This change therefore adds a second wrapper to
> re-reverse it for perf; the next patch in this series will clean it up.
>
> Signed-off-by: Jed Davis <[email protected]>
> ---
> arch/x86/include/asm/perf_event.h | 9 ++++++++-
> kernel/events/internal.h | 11 ++++++++++-
> 2 files changed, 18 insertions(+), 2 deletions(-)
>
> diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
> index 8249df4..ddae5bd 100644
> --- a/arch/x86/include/asm/perf_event.h
> +++ b/arch/x86/include/asm/perf_event.h
> @@ -274,6 +274,13 @@ static inline void perf_check_microcode(void) { }
> static inline void amd_pmu_disable_virt(void) { }
> #endif
>
> -#define arch_perf_out_copy_user copy_from_user_nmi
> +static inline unsigned long copy_from_user_nmi_for_perf(void *to,
> + const void __user *from,
> + unsigned long n)
> +{
> + return n - copy_from_user_nmi(to, from, n);
> +}
> +
> +#define arch_perf_out_copy_user copy_from_user_nmi_for_perf

I like your change of copy_from_user_nmi() to return bytes not copied
since it makes callers simpler and has the same i/f as other copy
functions.

Please do not introduce code that you later remove, instead merge this
patch with your next.

>
> #endif /* _ASM_X86_PERF_EVENT_H */
> diff --git a/kernel/events/internal.h b/kernel/events/internal.h
> index ca65997..e61b22c 100644
> --- a/kernel/events/internal.h
> +++ b/kernel/events/internal.h
> @@ -81,6 +81,7 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
> return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
> }
>
> +/* The memcpy_func must return the number of bytes successfully copied. */
> #define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
> static inline unsigned int \
> func_name(struct perf_output_handle *handle, \
> @@ -122,11 +123,19 @@ DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
>
> DEFINE_OUTPUT_COPY(__output_skip, MEMCPY_SKIP)
>
> +/* arch_perf_out_copy_user must return the number of bytes not copied. */
> #ifndef arch_perf_out_copy_user
> #define arch_perf_out_copy_user __copy_from_user_inatomic
> #endif
>
> -DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
> +static inline unsigned long perf_memcpy_from_user(void *to,
> + const void __user *from,
> + unsigned long n)
> +{
> + return n - arch_perf_out_copy_user(to, from, n);
> +}
> +
> +DEFINE_OUTPUT_COPY(__output_copy_user, perf_memcpy_from_user)

Better modify DEFINE_OUTPUT_COPY() to deal with bytes-not-copied as
return value for memcpy_func(). Other users of DEFINE_OUTPUT_COPY()
could be fixed easily.

-Robert

2013-08-16 23:56:20

by Jed Davis

[permalink] [raw]
Subject: [PATCH v2] x86, perf: Fix arch_perf_out_copy_user and copy_from_user_nmi return values

All architectures except x86 use __copy_from_user_inatomic to provide
arch_perf_out_copy_user; like the other copy_from routines, it returns
the number of bytes not copied. perf was expecting the number of bytes
that had been copied. This change corrects that, and thereby allows
PERF_SAMPLE_STACK_USER to be enabled on non-x86 architectures.

x86 uses copy_from_user_nmi, which deviates from the other copy_from
routines by returning the number of bytes copied. This change therefore
also reverses copy_from_user_nmi's return value, so that the perf change
doesn't break user stack copies on x86, and to help prevent bugs caused
by this surprising difference (and simplify callers, which mostly want
to know if the number of uncopied bytes is nonzero).

Signed-off-by: Jed Davis <[email protected]>
---
arch/x86/kernel/cpu/perf_event.c | 8 ++------
arch/x86/kernel/cpu/perf_event_intel_ds.c | 6 ++----
arch/x86/kernel/cpu/perf_event_intel_lbr.c | 4 +---
arch/x86/lib/usercopy.c | 2 +-
arch/x86/oprofile/backtrace.c | 8 ++------
kernel/events/internal.h | 9 +++++----
6 files changed, 13 insertions(+), 24 deletions(-)

diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index a7c7305..038c18c 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1983,12 +1983,10 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)

fp = compat_ptr(ss_base + regs->bp);
while (entry->nr < PERF_MAX_STACK_DEPTH) {
- unsigned long bytes;
frame.next_frame = 0;
frame.return_address = 0;

- bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
- if (bytes != sizeof(frame))
+ if (copy_from_user_nmi(&frame, fp, sizeof(frame)))
break;

if (!valid_user_frame(fp, sizeof(frame)))
@@ -2035,12 +2033,10 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
return;

while (entry->nr < PERF_MAX_STACK_DEPTH) {
- unsigned long bytes;
frame.next_frame = NULL;
frame.return_address = 0;

- bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
- if (bytes != sizeof(frame))
+ if (copy_from_user_nmi(&frame, fp, sizeof(frame)))
break;

if (!valid_user_frame(fp, sizeof(frame)))
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 3065c57..5208fe1 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -729,10 +729,8 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)

old_to = to;
if (!kernel_ip(ip)) {
- int bytes, size = MAX_INSN_SIZE;
-
- bytes = copy_from_user_nmi(buf, (void __user *)to, size);
- if (bytes != size)
+ if (copy_from_user_nmi(buf, (void __user *)to,
+ MAX_INSN_SIZE))
return 0;

kaddr = buf;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index d5be06a..2833514 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -449,7 +449,6 @@ static int branch_type(unsigned long from, unsigned long to, int abort)
{
struct insn insn;
void *addr;
- int bytes, size = MAX_INSN_SIZE;
int ret = X86_BR_NONE;
int ext, to_plm, from_plm;
u8 buf[MAX_INSN_SIZE];
@@ -477,8 +476,7 @@ static int branch_type(unsigned long from, unsigned long to, int abort)
return X86_BR_NONE;

/* may fail if text not present */
- bytes = copy_from_user_nmi(buf, (void __user *)from, size);
- if (bytes != size)
+ if (copy_from_user_nmi(buf, (void __user *)from, MAX_INSN_SIZE))
return X86_BR_NONE;

addr = buf;
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
index 4f74d94..7a13c98 100644
--- a/arch/x86/lib/usercopy.c
+++ b/arch/x86/lib/usercopy.c
@@ -44,6 +44,6 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)

} while (len < n);

- return len;
+ return n - len;
}
EXPORT_SYMBOL_GPL(copy_from_user_nmi);
diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
index d6aa6e8..e778d41 100644
--- a/arch/x86/oprofile/backtrace.c
+++ b/arch/x86/oprofile/backtrace.c
@@ -44,10 +44,8 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
/* Also check accessibility of one struct frame_head beyond: */
struct stack_frame_ia32 bufhead[2];
struct stack_frame_ia32 *fp;
- unsigned long bytes;

- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
- if (bytes != sizeof(bufhead))
+ if (copy_from_user_nmi(bufhead, head, sizeof(bufhead)))
return NULL;

fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
@@ -90,10 +88,8 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
{
/* Also check accessibility of one struct frame_head beyond: */
struct stack_frame bufhead[2];
- unsigned long bytes;

- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
- if (bytes != sizeof(bufhead))
+ if (copy_from_user_nmi(bufhead, head, sizeof(bufhead)))
return NULL;

oprofile_add_trace(bufhead[0].return_address);
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index ca65997..f77120f 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -81,7 +81,8 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
}

-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
+/* func_name and copy_func return the number of bytes not copied */
+#define DEFINE_OUTPUT_COPY(func_name, copy_func) \
static inline unsigned int \
func_name(struct perf_output_handle *handle, \
const void *buf, unsigned int len) \
@@ -91,7 +92,7 @@ func_name(struct perf_output_handle *handle, \
do { \
size = min_t(unsigned long, handle->size, len); \
\
- written = memcpy_func(handle->addr, buf, size); \
+ written = size - copy_func(handle->addr, buf, size); \
\
len -= written; \
handle->addr += written; \
@@ -113,12 +114,12 @@ func_name(struct perf_output_handle *handle, \
static inline int memcpy_common(void *dst, const void *src, size_t n)
{
memcpy(dst, src, n);
- return n;
+ return 0;
}

DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)

-#define MEMCPY_SKIP(dst, src, n) (n)
+#define MEMCPY_SKIP(dst, src, n) (0)

DEFINE_OUTPUT_COPY(__output_skip, MEMCPY_SKIP)

--
1.8.3.2