2019-02-28 15:07:52

by Peter Zijlstra

[permalink] [raw]
Subject: [PATCH 1/8] kasan,x86: Frob kasan_report() in an exception

Because __asan_{load,store}{N,1,2,4,8,16}_noabort() get called from
UACCESS context, and kasan_report() is most definitely _NOT_ safe to
be called from there, move it into an exception much like BUG/WARN.

*compile tested only*

Cc: Andrey Ryabinin <[email protected]>
Cc: Dmitry Vyukov <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
---
arch/x86/include/asm/bug.h | 28 ++++++++++++++--------------
arch/x86/include/asm/kasan.h | 15 +++++++++++++++
include/asm-generic/bug.h | 1 +
include/linux/kasan.h | 12 +++++++++---
lib/bug.c | 9 ++++++++-
mm/kasan/generic.c | 4 ++--
mm/kasan/kasan.h | 2 +-
mm/kasan/report.c | 2 +-
8 files changed, 51 insertions(+), 22 deletions(-)

--- a/arch/x86/include/asm/bug.h
+++ b/arch/x86/include/asm/bug.h
@@ -30,33 +30,33 @@

#ifdef CONFIG_DEBUG_BUGVERBOSE

-#define _BUG_FLAGS(ins, flags) \
+#define _BUG_FLAGS(ins, flags, ...) \
do { \
asm volatile("1:\t" ins "\n" \
".pushsection __bug_table,\"aw\"\n" \
- "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
- "\t" __BUG_REL(%c0) "\t# bug_entry::file\n" \
- "\t.word %c1" "\t# bug_entry::line\n" \
- "\t.word %c2" "\t# bug_entry::flags\n" \
- "\t.org 2b+%c3\n" \
+ "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
+ "\t" __BUG_REL(%c[file]) "\t# bug_entry::file\n" \
+ "\t.word %c[line]" "\t# bug_entry::line\n" \
+ "\t.word %c[flag]" "\t# bug_entry::flags\n" \
+ "\t.org 2b+%c[size]\n" \
".popsection" \
- : : "i" (__FILE__), "i" (__LINE__), \
- "i" (flags), \
- "i" (sizeof(struct bug_entry))); \
+ : : [file] "i" (__FILE__), [line] "i" (__LINE__), \
+ [flag] "i" (flags), \
+ [size] "i" (sizeof(struct bug_entry)), ##__VA_ARGS__); \
} while (0)

#else /* !CONFIG_DEBUG_BUGVERBOSE */

-#define _BUG_FLAGS(ins, flags) \
+#define _BUG_FLAGS(ins, flags, ...) \
do { \
asm volatile("1:\t" ins "\n" \
".pushsection __bug_table,\"aw\"\n" \
"2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
- "\t.word %c0" "\t# bug_entry::flags\n" \
- "\t.org 2b+%c1\n" \
+ "\t.word %c[flag]" "\t# bug_entry::flags\n" \
+ "\t.org 2b+%c[size]\n" \
".popsection" \
- : : "i" (flags), \
- "i" (sizeof(struct bug_entry))); \
+ : : [flag] "i" (flags), \
+ [size] "i" (sizeof(struct bug_entry)), ##__VA_ARGS__); \
} while (0)

#endif /* CONFIG_DEBUG_BUGVERBOSE */
--- a/arch/x86/include/asm/kasan.h
+++ b/arch/x86/include/asm/kasan.h
@@ -2,6 +2,8 @@
#ifndef _ASM_X86_KASAN_H
#define _ASM_X86_KASAN_H

+#include <asm/bug.h>
+
#include <linux/const.h>
#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
#define KASAN_SHADOW_SCALE_SHIFT 3
@@ -26,8 +28,21 @@
#ifndef __ASSEMBLY__

#ifdef CONFIG_KASAN
+
void __init kasan_early_init(void);
void __init kasan_init(void);
+
+static __always_inline void
+kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip)
+{
+ unsigned long rdi = addr, rsi = size, rdx = is_write, rcx = ip;
+
+ _BUG_FLAGS(ASM_UD2, BUGFLAG_KASAN,
+ "D" (rdi), "S" (rsi), "d" (rdx), "c" (rcx));
+ annotate_reachable();
+}
+#define kasan_report kasan_report
+
#else
static inline void kasan_early_init(void) { }
static inline void kasan_init(void) { }
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -10,6 +10,7 @@
#define BUGFLAG_WARNING (1 << 0)
#define BUGFLAG_ONCE (1 << 1)
#define BUGFLAG_DONE (1 << 2)
+#define BUGFLAG_KASAN (1 << 3)
#define BUGFLAG_TAINT(taint) ((taint) << 8)
#define BUG_GET_TAINT(bug) ((bug)->flags >> 8)
#endif
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -83,6 +83,9 @@ size_t kasan_metadata_size(struct kmem_c
bool kasan_save_enable_multi_shot(void);
void kasan_restore_multi_shot(bool enabled);

+void __kasan_report(unsigned long addr, size_t size,
+ bool is_write, unsigned long ip);
+
#else /* CONFIG_KASAN */

static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
@@ -153,8 +156,14 @@ static inline void kasan_remove_zero_sha
static inline void kasan_unpoison_slab(const void *ptr) { }
static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }

+static inline void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip) { }
+
#endif /* CONFIG_KASAN */

+#ifndef kasan_report
+#define kasan_report(addr, size, is_write, ip) __kasan_report(addr, size, is_write, ip)
+#endif
+
#ifdef CONFIG_KASAN_GENERIC

#define KASAN_SHADOW_INIT 0
@@ -177,9 +186,6 @@ void kasan_init_tags(void);

void *kasan_reset_tag(const void *addr);

-void kasan_report(unsigned long addr, size_t size,
- bool is_write, unsigned long ip);
-
#else /* CONFIG_KASAN_SW_TAGS */

static inline void kasan_init_tags(void) { }
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -47,6 +47,7 @@
#include <linux/bug.h>
#include <linux/sched.h>
#include <linux/rculist.h>
+#include <linux/kasan.h>

extern struct bug_entry __start___bug_table[], __stop___bug_table[];

@@ -144,7 +145,7 @@ enum bug_trap_type report_bug(unsigned l
{
struct bug_entry *bug;
const char *file;
- unsigned line, warning, once, done;
+ unsigned line, warning, once, done, kasan;

if (!is_valid_bugaddr(bugaddr))
return BUG_TRAP_TYPE_NONE;
@@ -167,6 +168,7 @@ enum bug_trap_type report_bug(unsigned l
line = bug->line;
#endif
warning = (bug->flags & BUGFLAG_WARNING) != 0;
+ kasan = (bug->flags & BUGFLAG_KASAN) != 0;
once = (bug->flags & BUGFLAG_ONCE) != 0;
done = (bug->flags & BUGFLAG_DONE) != 0;

@@ -188,6 +190,11 @@ enum bug_trap_type report_bug(unsigned l
return BUG_TRAP_TYPE_WARN;
}

+ if (kasan) {
+ __kasan_report(regs->di, regs->si, regs->dx, regs->cx);
+ return BUG_TRAP_TYPE_WARN;
+ }
+
printk(KERN_DEFAULT CUT_HERE);

if (file)
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -228,7 +228,7 @@ void __asan_unregister_globals(struct ka
EXPORT_SYMBOL(__asan_unregister_globals);

#define DEFINE_ASAN_LOAD_STORE(size) \
- void __asan_load##size(unsigned long addr) \
+ notrace void __asan_load##size(unsigned long addr) \
{ \
check_memory_region_inline(addr, size, false, _RET_IP_);\
} \
@@ -236,7 +236,7 @@ EXPORT_SYMBOL(__asan_unregister_globals)
__alias(__asan_load##size) \
void __asan_load##size##_noabort(unsigned long); \
EXPORT_SYMBOL(__asan_load##size##_noabort); \
- void __asan_store##size(unsigned long addr) \
+ notrace void __asan_store##size(unsigned long addr) \
{ \
check_memory_region_inline(addr, size, true, _RET_IP_); \
} \
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -130,7 +130,7 @@ void check_memory_region(unsigned long a
void *find_first_bad_addr(void *addr, size_t size);
const char *get_bug_type(struct kasan_access_info *info);

-void kasan_report(unsigned long addr, size_t size,
+void __kasan_report(unsigned long addr, size_t size,
bool is_write, unsigned long ip);
void kasan_report_invalid_free(void *object, unsigned long ip);

--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -281,7 +281,7 @@ void kasan_report_invalid_free(void *obj
end_report(&flags);
}

-void kasan_report(unsigned long addr, size_t size,
+void __kasan_report(unsigned long addr, size_t size,
bool is_write, unsigned long ip)
{
struct kasan_access_info info;




2019-02-28 15:23:44

by Dmitry Vyukov

[permalink] [raw]
Subject: Re: [PATCH 1/8] kasan,x86: Frob kasan_report() in an exception

On Thu, Feb 28, 2019 at 4:05 PM Peter Zijlstra <[email protected]> wrote:
>
> Because __asan_{load,store}{N,1,2,4,8,16}_noabort() get called from
> UACCESS context, and kasan_report() is most definitely _NOT_ safe to
> be called from there, move it into an exception much like BUG/WARN.
>
> *compile tested only*


Please test it by booting KASAN kernel and then loading module
produced by CONFIG_TEST_KASAN=y. There are too many subtle aspects to
rely on "compile tested only", reviewers can't catch all of them
either.


> Cc: Andrey Ryabinin <[email protected]>
> Cc: Dmitry Vyukov <[email protected]>
> Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
> ---
> arch/x86/include/asm/bug.h | 28 ++++++++++++++--------------
> arch/x86/include/asm/kasan.h | 15 +++++++++++++++
> include/asm-generic/bug.h | 1 +
> include/linux/kasan.h | 12 +++++++++---
> lib/bug.c | 9 ++++++++-
> mm/kasan/generic.c | 4 ++--
> mm/kasan/kasan.h | 2 +-
> mm/kasan/report.c | 2 +-
> 8 files changed, 51 insertions(+), 22 deletions(-)
>
> --- a/arch/x86/include/asm/bug.h
> +++ b/arch/x86/include/asm/bug.h
> @@ -30,33 +30,33 @@
>
> #ifdef CONFIG_DEBUG_BUGVERBOSE
>
> -#define _BUG_FLAGS(ins, flags) \
> +#define _BUG_FLAGS(ins, flags, ...) \
> do { \
> asm volatile("1:\t" ins "\n" \
> ".pushsection __bug_table,\"aw\"\n" \
> - "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
> - "\t" __BUG_REL(%c0) "\t# bug_entry::file\n" \
> - "\t.word %c1" "\t# bug_entry::line\n" \
> - "\t.word %c2" "\t# bug_entry::flags\n" \
> - "\t.org 2b+%c3\n" \
> + "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
> + "\t" __BUG_REL(%c[file]) "\t# bug_entry::file\n" \
> + "\t.word %c[line]" "\t# bug_entry::line\n" \
> + "\t.word %c[flag]" "\t# bug_entry::flags\n" \
> + "\t.org 2b+%c[size]\n" \
> ".popsection" \
> - : : "i" (__FILE__), "i" (__LINE__), \
> - "i" (flags), \
> - "i" (sizeof(struct bug_entry))); \
> + : : [file] "i" (__FILE__), [line] "i" (__LINE__), \
> + [flag] "i" (flags), \
> + [size] "i" (sizeof(struct bug_entry)), ##__VA_ARGS__); \
> } while (0)
>
> #else /* !CONFIG_DEBUG_BUGVERBOSE */
>
> -#define _BUG_FLAGS(ins, flags) \
> +#define _BUG_FLAGS(ins, flags, ...) \
> do { \
> asm volatile("1:\t" ins "\n" \
> ".pushsection __bug_table,\"aw\"\n" \
> "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
> - "\t.word %c0" "\t# bug_entry::flags\n" \
> - "\t.org 2b+%c1\n" \
> + "\t.word %c[flag]" "\t# bug_entry::flags\n" \
> + "\t.org 2b+%c[size]\n" \
> ".popsection" \
> - : : "i" (flags), \
> - "i" (sizeof(struct bug_entry))); \
> + : : [flag] "i" (flags), \
> + [size] "i" (sizeof(struct bug_entry)), ##__VA_ARGS__); \
> } while (0)
>
> #endif /* CONFIG_DEBUG_BUGVERBOSE */
> --- a/arch/x86/include/asm/kasan.h
> +++ b/arch/x86/include/asm/kasan.h
> @@ -2,6 +2,8 @@
> #ifndef _ASM_X86_KASAN_H
> #define _ASM_X86_KASAN_H
>
> +#include <asm/bug.h>
> +
> #include <linux/const.h>
> #define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
> #define KASAN_SHADOW_SCALE_SHIFT 3
> @@ -26,8 +28,21 @@
> #ifndef __ASSEMBLY__
>
> #ifdef CONFIG_KASAN
> +
> void __init kasan_early_init(void);
> void __init kasan_init(void);
> +
> +static __always_inline void
> +kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip)
> +{
> + unsigned long rdi = addr, rsi = size, rdx = is_write, rcx = ip;
> +
> + _BUG_FLAGS(ASM_UD2, BUGFLAG_KASAN,
> + "D" (rdi), "S" (rsi), "d" (rdx), "c" (rcx));

Can BUG return? This should be able to return.
We also have other tools coming (KMSAN/KTSAN) where distinction
between fast path that does nothing and slower-paths are very blurred
and there are dozens of them, I don't think this BUG thunk will be
sustainable. What does BUG do what a normal call can't do?


> + annotate_reachable();
> +}
> +#define kasan_report kasan_report
> +
> #else
> static inline void kasan_early_init(void) { }
> static inline void kasan_init(void) { }
> --- a/include/asm-generic/bug.h
> +++ b/include/asm-generic/bug.h
> @@ -10,6 +10,7 @@
> #define BUGFLAG_WARNING (1 << 0)
> #define BUGFLAG_ONCE (1 << 1)
> #define BUGFLAG_DONE (1 << 2)
> +#define BUGFLAG_KASAN (1 << 3)
> #define BUGFLAG_TAINT(taint) ((taint) << 8)
> #define BUG_GET_TAINT(bug) ((bug)->flags >> 8)
> #endif
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -83,6 +83,9 @@ size_t kasan_metadata_size(struct kmem_c
> bool kasan_save_enable_multi_shot(void);
> void kasan_restore_multi_shot(bool enabled);
>
> +void __kasan_report(unsigned long addr, size_t size,
> + bool is_write, unsigned long ip);
> +
> #else /* CONFIG_KASAN */
>
> static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
> @@ -153,8 +156,14 @@ static inline void kasan_remove_zero_sha
> static inline void kasan_unpoison_slab(const void *ptr) { }
> static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
>
> +static inline void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip) { }
> +
> #endif /* CONFIG_KASAN */
>
> +#ifndef kasan_report
> +#define kasan_report(addr, size, is_write, ip) __kasan_report(addr, size, is_write, ip)
> +#endif
> +
> #ifdef CONFIG_KASAN_GENERIC
>
> #define KASAN_SHADOW_INIT 0
> @@ -177,9 +186,6 @@ void kasan_init_tags(void);
>
> void *kasan_reset_tag(const void *addr);
>
> -void kasan_report(unsigned long addr, size_t size,
> - bool is_write, unsigned long ip);
> -
> #else /* CONFIG_KASAN_SW_TAGS */
>
> static inline void kasan_init_tags(void) { }
> --- a/lib/bug.c
> +++ b/lib/bug.c
> @@ -47,6 +47,7 @@
> #include <linux/bug.h>
> #include <linux/sched.h>
> #include <linux/rculist.h>
> +#include <linux/kasan.h>
>
> extern struct bug_entry __start___bug_table[], __stop___bug_table[];
>
> @@ -144,7 +145,7 @@ enum bug_trap_type report_bug(unsigned l
> {
> struct bug_entry *bug;
> const char *file;
> - unsigned line, warning, once, done;
> + unsigned line, warning, once, done, kasan;
>
> if (!is_valid_bugaddr(bugaddr))
> return BUG_TRAP_TYPE_NONE;
> @@ -167,6 +168,7 @@ enum bug_trap_type report_bug(unsigned l
> line = bug->line;
> #endif
> warning = (bug->flags & BUGFLAG_WARNING) != 0;
> + kasan = (bug->flags & BUGFLAG_KASAN) != 0;
> once = (bug->flags & BUGFLAG_ONCE) != 0;
> done = (bug->flags & BUGFLAG_DONE) != 0;
>
> @@ -188,6 +190,11 @@ enum bug_trap_type report_bug(unsigned l
> return BUG_TRAP_TYPE_WARN;
> }
>
> + if (kasan) {
> + __kasan_report(regs->di, regs->si, regs->dx, regs->cx);
> + return BUG_TRAP_TYPE_WARN;
> + }
> +
> printk(KERN_DEFAULT CUT_HERE);
>
> if (file)
> --- a/mm/kasan/generic.c
> +++ b/mm/kasan/generic.c
> @@ -228,7 +228,7 @@ void __asan_unregister_globals(struct ka
> EXPORT_SYMBOL(__asan_unregister_globals);
>
> #define DEFINE_ASAN_LOAD_STORE(size) \
> - void __asan_load##size(unsigned long addr) \
> + notrace void __asan_load##size(unsigned long addr) \


We already have:
CFLAGS_REMOVE_generic.o = -pg
Doesn't it imply notrace for all functions?



> { \
> check_memory_region_inline(addr, size, false, _RET_IP_);\
> } \
> @@ -236,7 +236,7 @@ EXPORT_SYMBOL(__asan_unregister_globals)
> __alias(__asan_load##size) \
> void __asan_load##size##_noabort(unsigned long); \
> EXPORT_SYMBOL(__asan_load##size##_noabort); \
> - void __asan_store##size(unsigned long addr) \
> + notrace void __asan_store##size(unsigned long addr) \
> { \
> check_memory_region_inline(addr, size, true, _RET_IP_); \
> } \
> --- a/mm/kasan/kasan.h
> +++ b/mm/kasan/kasan.h
> @@ -130,7 +130,7 @@ void check_memory_region(unsigned long a
> void *find_first_bad_addr(void *addr, size_t size);
> const char *get_bug_type(struct kasan_access_info *info);
>
> -void kasan_report(unsigned long addr, size_t size,
> +void __kasan_report(unsigned long addr, size_t size,
> bool is_write, unsigned long ip);
> void kasan_report_invalid_free(void *object, unsigned long ip);
>
> --- a/mm/kasan/report.c
> +++ b/mm/kasan/report.c
> @@ -281,7 +281,7 @@ void kasan_report_invalid_free(void *obj
> end_report(&flags);
> }
>
> -void kasan_report(unsigned long addr, size_t size,
> +void __kasan_report(unsigned long addr, size_t size,
> bool is_write, unsigned long ip)
> {
> struct kasan_access_info info;
>
>

2019-02-28 15:47:20

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH 1/8] kasan,x86: Frob kasan_report() in an exception

On Thu, Feb 28, 2019 at 04:22:04PM +0100, Dmitry Vyukov wrote:
> On Thu, Feb 28, 2019 at 4:05 PM Peter Zijlstra <[email protected]> wrote:
> >
> > Because __asan_{load,store}{N,1,2,4,8,16}_noabort() get called from
> > UACCESS context, and kasan_report() is most definitely _NOT_ safe to
> > be called from there, move it into an exception much like BUG/WARN.
> >
> > *compile tested only*
>
>
> Please test it by booting KASAN kernel and then loading module
> produced by CONFIG_TEST_KASAN=y. There are too many subtle aspects to
> rely on "compile tested only", reviewers can't catch all of them
> either.

Sure, I'll do that. I just wanted to share the rest of the patches.

A quick test shows it dies _REAAAAAAAALY_ early, as in:

"Booting the kernel."

is the first and very last thing it says... I wonder how I did that :-)

> > +static __always_inline void
> > +kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip)
> > +{
> > + unsigned long rdi = addr, rsi = size, rdx = is_write, rcx = ip;
> > +
> > + _BUG_FLAGS(ASM_UD2, BUGFLAG_KASAN,
> > + "D" (rdi), "S" (rsi), "d" (rdx), "c" (rcx));
>
> Can BUG return?

Yes. Also see the annotate_reachable().

> This should be able to return.
> We also have other tools coming (KMSAN/KTSAN) where distinction
> between fast path that does nothing and slower-paths are very blurred
> and there are dozens of them, I don't think this BUG thunk will be
> sustainable. What does BUG do what a normal call can't do?

It keeps the SMAP validation rules nice and tight. If we were to add
(and allow) things like pushf;clac;call ponies;popf or similar things,
it all becomes complicated real quick.

How would KMSAN/KTSAN interact with SMAP ?

> > + annotate_reachable();
> > +}
> > @@ -228,7 +228,7 @@ void __asan_unregister_globals(struct ka
> > EXPORT_SYMBOL(__asan_unregister_globals);
> >
> > #define DEFINE_ASAN_LOAD_STORE(size) \
> > - void __asan_load##size(unsigned long addr) \
> > + notrace void __asan_load##size(unsigned long addr) \
>
>
> We already have:
> CFLAGS_REMOVE_generic.o = -pg
> Doesn't it imply notrace for all functions?

Indeed so, I'll make these hunks go away.

2019-02-28 15:54:30

by Dmitry Vyukov

[permalink] [raw]
Subject: Re: [PATCH 1/8] kasan,x86: Frob kasan_report() in an exception

On Thu, Feb 28, 2019 at 4:46 PM Peter Zijlstra <[email protected]> wrote:
>
> On Thu, Feb 28, 2019 at 04:22:04PM +0100, Dmitry Vyukov wrote:
> > On Thu, Feb 28, 2019 at 4:05 PM Peter Zijlstra <[email protected]> wrote:
> > >
> > > Because __asan_{load,store}{N,1,2,4,8,16}_noabort() get called from
> > > UACCESS context, and kasan_report() is most definitely _NOT_ safe to
> > > be called from there, move it into an exception much like BUG/WARN.
> > >
> > > *compile tested only*
> >
> >
> > Please test it by booting KASAN kernel and then loading module
> > produced by CONFIG_TEST_KASAN=y. There are too many subtle aspects to
> > rely on "compile tested only", reviewers can't catch all of them
> > either.
>
> Sure, I'll do that. I just wanted to share the rest of the patches.
>
> A quick test shows it dies _REAAAAAAAALY_ early, as in:
>
> "Booting the kernel."
>
> is the first and very last thing it says... I wonder how I did that :-)

One thing is that during early boot kasan_report is called multiple
times, but these are false positives related to the fact that we don't
have a proper shadow yet (setup later). So during early boot we set
kasan_disable=1 (or some global or per-task flag), and then
kasan_report checks it and returns.
Once we setup proper shadow, the flag is reset and from now on
kasan_report actually reports bug.


> > > +static __always_inline void
> > > +kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip)
> > > +{
> > > + unsigned long rdi = addr, rsi = size, rdx = is_write, rcx = ip;
> > > +
> > > + _BUG_FLAGS(ASM_UD2, BUGFLAG_KASAN,
> > > + "D" (rdi), "S" (rsi), "d" (rdx), "c" (rcx));
> >
> > Can BUG return?
>
> Yes. Also see the annotate_reachable().
>
> > This should be able to return.
> > We also have other tools coming (KMSAN/KTSAN) where distinction
> > between fast path that does nothing and slower-paths are very blurred
> > and there are dozens of them, I don't think this BUG thunk will be
> > sustainable. What does BUG do what a normal call can't do?
>
> It keeps the SMAP validation rules nice and tight. If we were to add
> (and allow) things like pushf;clac;call ponies;popf or similar things,
> it all becomes complicated real quick.
>
> How would KMSAN/KTSAN interact with SMAP ?
>
> > > + annotate_reachable();
> > > +}
> > > @@ -228,7 +228,7 @@ void __asan_unregister_globals(struct ka
> > > EXPORT_SYMBOL(__asan_unregister_globals);
> > >
> > > #define DEFINE_ASAN_LOAD_STORE(size) \
> > > - void __asan_load##size(unsigned long addr) \
> > > + notrace void __asan_load##size(unsigned long addr) \
> >
> >
> > We already have:
> > CFLAGS_REMOVE_generic.o = -pg
> > Doesn't it imply notrace for all functions?
>
> Indeed so, I'll make these hunks go away.

2019-02-28 16:02:43

by Andrey Ryabinin

[permalink] [raw]
Subject: Re: [PATCH 1/8] kasan,x86: Frob kasan_report() in an exception



On 2/28/19 6:52 PM, Dmitry Vyukov wrote:
> On Thu, Feb 28, 2019 at 4:46 PM Peter Zijlstra <[email protected]> wrote:
>>
>> On Thu, Feb 28, 2019 at 04:22:04PM +0100, Dmitry Vyukov wrote:
>>> On Thu, Feb 28, 2019 at 4:05 PM Peter Zijlstra <[email protected]> wrote:
>>>>
>>>> Because __asan_{load,store}{N,1,2,4,8,16}_noabort() get called from
>>>> UACCESS context, and kasan_report() is most definitely _NOT_ safe to
>>>> be called from there, move it into an exception much like BUG/WARN.
>>>>
>>>> *compile tested only*
>>>
>>>
>>> Please test it by booting KASAN kernel and then loading module
>>> produced by CONFIG_TEST_KASAN=y. There are too many subtle aspects to
>>> rely on "compile tested only", reviewers can't catch all of them
>>> either.
>>
>> Sure, I'll do that. I just wanted to share the rest of the patches.
>>
>> A quick test shows it dies _REAAAAAAAALY_ early, as in:
>>
>> "Booting the kernel."
>>
>> is the first and very last thing it says... I wonder how I did that :-)
>
> One thing is that during early boot kasan_report is called multiple
> times, but these are false positives related to the fact that we don't
> have a proper shadow yet (setup later). So during early boot we set
> kasan_disable=1 (or some global or per-task flag), and then
> kasan_report checks it and returns.
> Once we setup proper shadow, the flag is reset and from now on
> kasan_report actually reports bug.
>

Yup, see report_enabled() function.

2019-02-28 18:05:58

by Dmitry Vyukov

[permalink] [raw]
Subject: Re: [PATCH 1/8] kasan,x86: Frob kasan_report() in an exception

On Thu, Feb 28, 2019 at 4:46 PM Peter Zijlstra <[email protected]> wrote:
>
> On Thu, Feb 28, 2019 at 04:22:04PM +0100, Dmitry Vyukov wrote:
> > On Thu, Feb 28, 2019 at 4:05 PM Peter Zijlstra <[email protected]> wrote:
> > >
> > > Because __asan_{load,store}{N,1,2,4,8,16}_noabort() get called from
> > > UACCESS context, and kasan_report() is most definitely _NOT_ safe to
> > > be called from there, move it into an exception much like BUG/WARN.
> > >
> > > *compile tested only*
> >
> >
> > Please test it by booting KASAN kernel and then loading module
> > produced by CONFIG_TEST_KASAN=y. There are too many subtle aspects to
> > rely on "compile tested only", reviewers can't catch all of them
> > either.
>
> Sure, I'll do that. I just wanted to share the rest of the patches.
>
> A quick test shows it dies _REAAAAAAAALY_ early, as in:
>
> "Booting the kernel."
>
> is the first and very last thing it says... I wonder how I did that :-)
>
> > > +static __always_inline void
> > > +kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip)
> > > +{
> > > + unsigned long rdi = addr, rsi = size, rdx = is_write, rcx = ip;
> > > +
> > > + _BUG_FLAGS(ASM_UD2, BUGFLAG_KASAN,
> > > + "D" (rdi), "S" (rsi), "d" (rdx), "c" (rcx));
> >
> > Can BUG return?
>
> Yes. Also see the annotate_reachable().
>
> > This should be able to return.
> > We also have other tools coming (KMSAN/KTSAN) where distinction
> > between fast path that does nothing and slower-paths are very blurred
> > and there are dozens of them, I don't think this BUG thunk will be
> > sustainable. What does BUG do what a normal call can't do?
>
> It keeps the SMAP validation rules nice and tight. If we were to add
> (and allow) things like pushf;clac;call ponies;popf or similar things,
> it all becomes complicated real quick.
>
> How would KMSAN/KTSAN interact with SMAP ?


I am missing some knowledge about SMAP to answer this.
In short, these tools insert lots of callbacks into runtime for memory
accesses, function entry/exit, atomicops and some other. These
callbacks can do things of different complexity.
Humm... perhaps we could just disable SMAP for KMSAN/KTSAN. It's
possible, right? If we have it enabled with KASAN, that should be
enough.

And the question from another thread: does SMAP detect bugs that KASAN
can't? If SMAP is not useful during stress testing/fuzzing, then we
could also disable it. But if it finds some bugs that KASAN won't
detect, then it would be pity to disable it.

Also, what's the actual problem with KASAN+SMAP? Is it warnings from
static analysis tool? Or there are also some runtime effects? What
effects?
Is it possible to disable the SMAP runtime checks once we enter
kasan_report() past report_enabled() check? We could restrict it to
"just finish printing this bug report whatever it takes and then
whatever" if it makes things simpler.
It would be nice if we could restrict it to something like:

@@ -291,6 +303,7 @@ void kasan_report(unsigned long addr, size_t size,
if (likely(!report_enabled()))
return;
+ disable_smap();

And then enforce panic at the end of report if smap is enabled.


> > > + annotate_reachable();
> > > +}
> > > @@ -228,7 +228,7 @@ void __asan_unregister_globals(struct ka
> > > EXPORT_SYMBOL(__asan_unregister_globals);
> > >
> > > #define DEFINE_ASAN_LOAD_STORE(size) \
> > > - void __asan_load##size(unsigned long addr) \
> > > + notrace void __asan_load##size(unsigned long addr) \
> >
> >
> > We already have:
> > CFLAGS_REMOVE_generic.o = -pg
> > Doesn't it imply notrace for all functions?
>
> Indeed so, I'll make these hunks go away.

2019-02-28 18:37:39

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH 1/8] kasan,x86: Frob kasan_report() in an exception

On Thu, Feb 28, 2019 at 05:03:09PM +0100, Dmitry Vyukov wrote:

> I am missing some knowledge about SMAP to answer this.
> In short, these tools insert lots of callbacks into runtime for memory
> accesses, function entry/exit, atomicops and some other. These
> callbacks can do things of different complexity.
> Humm... perhaps we could just disable SMAP for KMSAN/KTSAN. It's
> possible, right? If we have it enabled with KASAN, that should be
> enough.

SMAP detects access to _PAGE_USER pages; that is, such access is only
allowed when EFLAGS.AC=1, otherwise they'll fault.

I again don't know enough about KASAN to say if it does that; but I
suspect it only tracks kernel memory state.

> Also, what's the actual problem with KASAN+SMAP? Is it warnings from
> static analysis tool? Or there are also some runtime effects? What
> effects?

Both; so because of the above semantics, things like copy_to_user() will
have to do STAC (set EFLAGS.AC=1), then do the actual copies to the user
addresses, and then CLAC (clear the AC flag again).

The desire is to have AC=1 sections as small as possible, such that as
much code as possible is ran with AC=0 and will trap on unintended
accesses.

Also; the scheduler doesn't (but I have a patch for that, but I'd prefer
to not have to use it) context switch EFLAGS. This means that if we land
in the scheduler while AC=1, the next task will resume with AC=1.

Consequently, if that task returns to userspace before it gets scheduled
again, we'll continue our previous task (that left with AC=1) with AC=0
and it'll then fault where no fault were expected.

Anyway; the objtool annotation basically tracks the EFLAGS.AC state
(through STAC/CLAC instructions -- no PUSHF/POPF) and disallows any
CALL/RET while AC=1.

This is where the __asan_{load,store}*() stuff went *splat*. GCC inserts
those calls in the middle of STAC/CLAC (AC=1) and we then have to mark
the functions as AC-safe. objtool validates those on the same rules, no
further CALLs that are not also safe.

Things like __fentry__ are inherently unsafe because they use
preempt_disable/preempt_enable, where the latter has a CALL
__preempt_schedule (and is thus very unsafe). Similarly with
kasan_report(), it does all sorts of things that are not safe to do.

> Is it possible to disable the SMAP runtime checks once we enter
> kasan_report() past report_enabled() check? We could restrict it to
> "just finish printing this bug report whatever it takes and then
> whatever" if it makes things simpler.
> It would be nice if we could restrict it to something like:
>
> @@ -291,6 +303,7 @@ void kasan_report(unsigned long addr, size_t size,
> if (likely(!report_enabled()))
> return;
> + disable_smap();
>
> And then enforce panic at the end of report if smap is enabled.

That would be a CLAC, and the current rules disallow CLAC for AC-safe
functions.

Furthermore, kasan_report() isn't fatal, right? So it would have to
restore the state on exit. That makes the validation state much more
complicated.

Let me try and frob some of the report_enabled() stuff before the #UD.

2019-02-28 18:45:33

by Dmitry Vyukov

[permalink] [raw]
Subject: Re: [PATCH 1/8] kasan,x86: Frob kasan_report() in an exception

On Thu, Feb 28, 2019 at 6:46 PM Peter Zijlstra <[email protected]> wrote:
>
> On Thu, Feb 28, 2019 at 05:03:09PM +0100, Dmitry Vyukov wrote:
>
> > I am missing some knowledge about SMAP to answer this.
> > In short, these tools insert lots of callbacks into runtime for memory
> > accesses, function entry/exit, atomicops and some other. These
> > callbacks can do things of different complexity.
> > Humm... perhaps we could just disable SMAP for KMSAN/KTSAN. It's
> > possible, right? If we have it enabled with KASAN, that should be
> > enough.
>
> SMAP detects access to _PAGE_USER pages; that is, such access is only
> allowed when EFLAGS.AC=1, otherwise they'll fault.
>
> I again don't know enough about KASAN to say if it does that; but I
> suspect it only tracks kernel memory state.
>
> > Also, what's the actual problem with KASAN+SMAP? Is it warnings from
> > static analysis tool? Or there are also some runtime effects? What
> > effects?
>
> Both; so because of the above semantics, things like copy_to_user() will
> have to do STAC (set EFLAGS.AC=1), then do the actual copies to the user
> addresses, and then CLAC (clear the AC flag again).
>
> The desire is to have AC=1 sections as small as possible, such that as
> much code as possible is ran with AC=0 and will trap on unintended
> accesses.
>
> Also; the scheduler doesn't (but I have a patch for that, but I'd prefer
> to not have to use it) context switch EFLAGS. This means that if we land
> in the scheduler while AC=1, the next task will resume with AC=1.
>
> Consequently, if that task returns to userspace before it gets scheduled
> again, we'll continue our previous task (that left with AC=1) with AC=0
> and it'll then fault where no fault were expected.
>
> Anyway; the objtool annotation basically tracks the EFLAGS.AC state
> (through STAC/CLAC instructions -- no PUSHF/POPF) and disallows any
> CALL/RET while AC=1.
>
> This is where the __asan_{load,store}*() stuff went *splat*. GCC inserts
> those calls in the middle of STAC/CLAC (AC=1) and we then have to mark
> the functions as AC-safe. objtool validates those on the same rules, no
> further CALLs that are not also safe.
>
> Things like __fentry__ are inherently unsafe because they use
> preempt_disable/preempt_enable, where the latter has a CALL
> __preempt_schedule (and is thus very unsafe). Similarly with
> kasan_report(), it does all sorts of things that are not safe to do.
>
> > Is it possible to disable the SMAP runtime checks once we enter
> > kasan_report() past report_enabled() check? We could restrict it to
> > "just finish printing this bug report whatever it takes and then
> > whatever" if it makes things simpler.
> > It would be nice if we could restrict it to something like:
> >
> > @@ -291,6 +303,7 @@ void kasan_report(unsigned long addr, size_t size,
> > if (likely(!report_enabled()))
> > return;
> > + disable_smap();
> >
> > And then enforce panic at the end of report if smap is enabled.
>
> That would be a CLAC, and the current rules disallow CLAC for AC-safe
> functions.
>
> Furthermore, kasan_report() isn't fatal, right? So it would have to
> restore the state on exit. That makes the validation state much more
> complicated.
>
> Let me try and frob some of the report_enabled() stuff before the #UD.

What if do CLAC in the beginning of kasan_report, STAC at the end if
AC was set on entry. And then special-case kasan_report in the static
tool? This looks like a reasonable compromise to me taking into
account that KASAN is not enabled in production builds, so special
casing it in the static tool won't lead to missed bugs in production
code. That's effectively what _BUG_FLAGS will do, right? But just
without involving asm.
Or, perhaps, wrap it into something like:
void smap_call(void (*fn)(void* ctx), void *ctx);
that will do all of this. And then the tool only needs to know about
the smap_call?

2019-03-01 14:54:20

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH 1/8] kasan,x86: Frob kasan_report() in an exception

On Thu, Feb 28, 2019 at 04:22:04PM +0100, Dmitry Vyukov wrote:
> On Thu, Feb 28, 2019 at 4:05 PM Peter Zijlstra <[email protected]> wrote:
> >
> > Because __asan_{load,store}{N,1,2,4,8,16}_noabort() get called from
> > UACCESS context, and kasan_report() is most definitely _NOT_ safe to
> > be called from there, move it into an exception much like BUG/WARN.
> >
> > *compile tested only*
>
>
> Please test it by booting KASAN kernel and then loading module
> produced by CONFIG_TEST_KASAN=y. There are too many subtle aspects to
> rely on "compile tested only", reviewers can't catch all of them
> either.

The below boots and survives test_kasan.

I'll now try that annotation you preferred. But I think this is a pretty
neat hack :-)

---
Subject: kasan,x86: Frob kasan_report() in an exception
From: Peter Zijlstra <[email protected]>
Date: Thu Feb 28 15:52:03 CET 2019

Because __asan_{load,store}{N,1,2,4,8,16}_noabort() get called from
UACCESS context, and kasan_report() is most definitely _NOT_ safe to
be called from there, move it into an exception much like BUg/WARN.

Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
---
arch/x86/include/asm/bug.h | 28 ++++++++++++++--------------
arch/x86/include/asm/kasan.h | 17 +++++++++++++++++
include/asm-generic/bug.h | 1 +
include/linux/kasan.h | 12 +++++++++---
lib/bug.c | 9 ++++++++-
mm/kasan/kasan.h | 2 +-
mm/kasan/report.c | 2 +-
7 files changed, 51 insertions(+), 20 deletions(-)

--- a/arch/x86/include/asm/bug.h
+++ b/arch/x86/include/asm/bug.h
@@ -30,33 +30,33 @@

#ifdef CONFIG_DEBUG_BUGVERBOSE

-#define _BUG_FLAGS(ins, flags) \
+#define _BUG_FLAGS(ins, flags, ...) \
do { \
asm volatile("1:\t" ins "\n" \
".pushsection __bug_table,\"aw\"\n" \
- "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
- "\t" __BUG_REL(%c0) "\t# bug_entry::file\n" \
- "\t.word %c1" "\t# bug_entry::line\n" \
- "\t.word %c2" "\t# bug_entry::flags\n" \
- "\t.org 2b+%c3\n" \
+ "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
+ "\t" __BUG_REL(%c[file]) "\t# bug_entry::file\n" \
+ "\t.word %c[line]" "\t# bug_entry::line\n" \
+ "\t.word %c[flag]" "\t# bug_entry::flags\n" \
+ "\t.org 2b+%c[size]\n" \
".popsection" \
- : : "i" (__FILE__), "i" (__LINE__), \
- "i" (flags), \
- "i" (sizeof(struct bug_entry))); \
+ : : [file] "i" (__FILE__), [line] "i" (__LINE__), \
+ [flag] "i" (flags), \
+ [size] "i" (sizeof(struct bug_entry)), ##__VA_ARGS__); \
} while (0)

#else /* !CONFIG_DEBUG_BUGVERBOSE */

-#define _BUG_FLAGS(ins, flags) \
+#define _BUG_FLAGS(ins, flags, ...) \
do { \
asm volatile("1:\t" ins "\n" \
".pushsection __bug_table,\"aw\"\n" \
"2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
- "\t.word %c0" "\t# bug_entry::flags\n" \
- "\t.org 2b+%c1\n" \
+ "\t.word %c[flag]" "\t# bug_entry::flags\n" \
+ "\t.org 2b+%c[size]\n" \
".popsection" \
- : : "i" (flags), \
- "i" (sizeof(struct bug_entry))); \
+ : : [flag] "i" (flags), \
+ [size] "i" (sizeof(struct bug_entry)), ##__VA_ARGS__); \
} while (0)

#endif /* CONFIG_DEBUG_BUGVERBOSE */
--- a/arch/x86/include/asm/kasan.h
+++ b/arch/x86/include/asm/kasan.h
@@ -2,7 +2,10 @@
#ifndef _ASM_X86_KASAN_H
#define _ASM_X86_KASAN_H

+#include <asm/bug.h>
+
#include <linux/const.h>
+#include <linux/sched.h>
#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
#define KASAN_SHADOW_SCALE_SHIFT 3

@@ -26,8 +29,22 @@
#ifndef __ASSEMBLY__

#ifdef CONFIG_KASAN
+
void __init kasan_early_init(void);
void __init kasan_init(void);
+
+static __always_inline void
+kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip)
+{
+ if (!current->kasan_depth) {
+ unsigned long rdi = addr, rsi = size, rdx = is_write, rcx = ip;
+ _BUG_FLAGS(ASM_UD2, BUGFLAG_KASAN,
+ "D" (rdi), "S" (rsi), "d" (rdx), "c" (rcx));
+ annotate_reachable();
+ }
+}
+#define kasan_report kasan_report
+
#else
static inline void kasan_early_init(void) { }
static inline void kasan_init(void) { }
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -10,6 +10,7 @@
#define BUGFLAG_WARNING (1 << 0)
#define BUGFLAG_ONCE (1 << 1)
#define BUGFLAG_DONE (1 << 2)
+#define BUGFLAG_KASAN (1 << 3)
#define BUGFLAG_TAINT(taint) ((taint) << 8)
#define BUG_GET_TAINT(bug) ((bug)->flags >> 8)
#endif
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -83,6 +83,9 @@ size_t kasan_metadata_size(struct kmem_c
bool kasan_save_enable_multi_shot(void);
void kasan_restore_multi_shot(bool enabled);

+void __kasan_report(unsigned long addr, size_t size,
+ bool is_write, unsigned long ip);
+
#else /* CONFIG_KASAN */

static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
@@ -153,8 +156,14 @@ static inline void kasan_remove_zero_sha
static inline void kasan_unpoison_slab(const void *ptr) { }
static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }

+static inline void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip) { }
+
#endif /* CONFIG_KASAN */

+#ifndef kasan_report
+#define kasan_report(addr, size, is_write, ip) __kasan_report(addr, size, is_write, ip)
+#endif
+
#ifdef CONFIG_KASAN_GENERIC

#define KASAN_SHADOW_INIT 0
@@ -177,9 +186,6 @@ void kasan_init_tags(void);

void *kasan_reset_tag(const void *addr);

-void kasan_report(unsigned long addr, size_t size,
- bool is_write, unsigned long ip);
-
#else /* CONFIG_KASAN_SW_TAGS */

static inline void kasan_init_tags(void) { }
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -47,6 +47,7 @@
#include <linux/bug.h>
#include <linux/sched.h>
#include <linux/rculist.h>
+#include <linux/kasan.h>

extern struct bug_entry __start___bug_table[], __stop___bug_table[];

@@ -144,7 +145,7 @@ enum bug_trap_type report_bug(unsigned l
{
struct bug_entry *bug;
const char *file;
- unsigned line, warning, once, done;
+ unsigned line, warning, once, done, kasan;

if (!is_valid_bugaddr(bugaddr))
return BUG_TRAP_TYPE_NONE;
@@ -167,6 +168,7 @@ enum bug_trap_type report_bug(unsigned l
line = bug->line;
#endif
warning = (bug->flags & BUGFLAG_WARNING) != 0;
+ kasan = (bug->flags & BUGFLAG_KASAN) != 0;
once = (bug->flags & BUGFLAG_ONCE) != 0;
done = (bug->flags & BUGFLAG_DONE) != 0;

@@ -188,6 +190,11 @@ enum bug_trap_type report_bug(unsigned l
return BUG_TRAP_TYPE_WARN;
}

+ if (kasan) {
+ __kasan_report(regs->di, regs->si, regs->dx, regs->cx);
+ return BUG_TRAP_TYPE_WARN;
+ }
+
printk(KERN_DEFAULT CUT_HERE);

if (file)
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -130,7 +130,7 @@ void check_memory_region(unsigned long a
void *find_first_bad_addr(void *addr, size_t size);
const char *get_bug_type(struct kasan_access_info *info);

-void kasan_report(unsigned long addr, size_t size,
+void __kasan_report(unsigned long addr, size_t size,
bool is_write, unsigned long ip);
void kasan_report_invalid_free(void *object, unsigned long ip);

--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -281,7 +281,7 @@ void kasan_report_invalid_free(void *obj
end_report(&flags);
}

-void kasan_report(unsigned long addr, size_t size,
+void __kasan_report(unsigned long addr, size_t size,
bool is_write, unsigned long ip)
{
struct kasan_access_info info;

2019-03-01 15:07:50

by Dmitry Vyukov

[permalink] [raw]
Subject: Re: [PATCH 1/8] kasan,x86: Frob kasan_report() in an exception

On Fri, Mar 1, 2019 at 3:46 PM Peter Zijlstra <[email protected]> wrote:
>
> On Thu, Feb 28, 2019 at 04:22:04PM +0100, Dmitry Vyukov wrote:
> > On Thu, Feb 28, 2019 at 4:05 PM Peter Zijlstra <[email protected]> wrote:
> > >
> > > Because __asan_{load,store}{N,1,2,4,8,16}_noabort() get called from
> > > UACCESS context, and kasan_report() is most definitely _NOT_ safe to
> > > be called from there, move it into an exception much like BUG/WARN.
> > >
> > > *compile tested only*
> >
> >
> > Please test it by booting KASAN kernel and then loading module
> > produced by CONFIG_TEST_KASAN=y. There are too many subtle aspects to
> > rely on "compile tested only", reviewers can't catch all of them
> > either.
>
> The below boots and survives test_kasan.
>
> I'll now try that annotation you preferred. But I think this is a pretty
> neat hack :-)

Yes, it's "neat" but it's also a "hack" :)

It involves asm, hardware exceptions, UD2 instructions. It also seems
to use arch-dependent code in arch-independent files: there is no RSI
on other arches, does this compile on non-x86? I understand you are
pretty comfortable with such code, but all else being equal normal C
code is preferable.
And it's not that we gain much due to this, we are merely working
around things. We tried to use UD2 for asan reports, but we emitted it
into the generated code where it had a chance of speeding up things
which could potentially justify hacks. But even there the final
decision was to go with normal calls.


> ---
> Subject: kasan,x86: Frob kasan_report() in an exception
> From: Peter Zijlstra <[email protected]>
> Date: Thu Feb 28 15:52:03 CET 2019
>
> Because __asan_{load,store}{N,1,2,4,8,16}_noabort() get called from
> UACCESS context, and kasan_report() is most definitely _NOT_ safe to
> be called from there, move it into an exception much like BUg/WARN.
>
> Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
> ---
> arch/x86/include/asm/bug.h | 28 ++++++++++++++--------------
> arch/x86/include/asm/kasan.h | 17 +++++++++++++++++
> include/asm-generic/bug.h | 1 +
> include/linux/kasan.h | 12 +++++++++---
> lib/bug.c | 9 ++++++++-
> mm/kasan/kasan.h | 2 +-
> mm/kasan/report.c | 2 +-
> 7 files changed, 51 insertions(+), 20 deletions(-)
>
> --- a/arch/x86/include/asm/bug.h
> +++ b/arch/x86/include/asm/bug.h
> @@ -30,33 +30,33 @@
>
> #ifdef CONFIG_DEBUG_BUGVERBOSE
>
> -#define _BUG_FLAGS(ins, flags) \
> +#define _BUG_FLAGS(ins, flags, ...) \
> do { \
> asm volatile("1:\t" ins "\n" \
> ".pushsection __bug_table,\"aw\"\n" \
> - "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
> - "\t" __BUG_REL(%c0) "\t# bug_entry::file\n" \
> - "\t.word %c1" "\t# bug_entry::line\n" \
> - "\t.word %c2" "\t# bug_entry::flags\n" \
> - "\t.org 2b+%c3\n" \
> + "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
> + "\t" __BUG_REL(%c[file]) "\t# bug_entry::file\n" \
> + "\t.word %c[line]" "\t# bug_entry::line\n" \
> + "\t.word %c[flag]" "\t# bug_entry::flags\n" \
> + "\t.org 2b+%c[size]\n" \
> ".popsection" \
> - : : "i" (__FILE__), "i" (__LINE__), \
> - "i" (flags), \
> - "i" (sizeof(struct bug_entry))); \
> + : : [file] "i" (__FILE__), [line] "i" (__LINE__), \
> + [flag] "i" (flags), \
> + [size] "i" (sizeof(struct bug_entry)), ##__VA_ARGS__); \
> } while (0)
>
> #else /* !CONFIG_DEBUG_BUGVERBOSE */
>
> -#define _BUG_FLAGS(ins, flags) \
> +#define _BUG_FLAGS(ins, flags, ...) \
> do { \
> asm volatile("1:\t" ins "\n" \
> ".pushsection __bug_table,\"aw\"\n" \
> "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
> - "\t.word %c0" "\t# bug_entry::flags\n" \
> - "\t.org 2b+%c1\n" \
> + "\t.word %c[flag]" "\t# bug_entry::flags\n" \
> + "\t.org 2b+%c[size]\n" \
> ".popsection" \
> - : : "i" (flags), \
> - "i" (sizeof(struct bug_entry))); \
> + : : [flag] "i" (flags), \
> + [size] "i" (sizeof(struct bug_entry)), ##__VA_ARGS__); \
> } while (0)
>
> #endif /* CONFIG_DEBUG_BUGVERBOSE */
> --- a/arch/x86/include/asm/kasan.h
> +++ b/arch/x86/include/asm/kasan.h
> @@ -2,7 +2,10 @@
> #ifndef _ASM_X86_KASAN_H
> #define _ASM_X86_KASAN_H
>
> +#include <asm/bug.h>
> +
> #include <linux/const.h>
> +#include <linux/sched.h>
> #define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
> #define KASAN_SHADOW_SCALE_SHIFT 3
>
> @@ -26,8 +29,22 @@
> #ifndef __ASSEMBLY__
>
> #ifdef CONFIG_KASAN
> +
> void __init kasan_early_init(void);
> void __init kasan_init(void);
> +
> +static __always_inline void
> +kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip)
> +{
> + if (!current->kasan_depth) {
> + unsigned long rdi = addr, rsi = size, rdx = is_write, rcx = ip;
> + _BUG_FLAGS(ASM_UD2, BUGFLAG_KASAN,
> + "D" (rdi), "S" (rsi), "d" (rdx), "c" (rcx));
> + annotate_reachable();
> + }
> +}
> +#define kasan_report kasan_report
> +
> #else
> static inline void kasan_early_init(void) { }
> static inline void kasan_init(void) { }
> --- a/include/asm-generic/bug.h
> +++ b/include/asm-generic/bug.h
> @@ -10,6 +10,7 @@
> #define BUGFLAG_WARNING (1 << 0)
> #define BUGFLAG_ONCE (1 << 1)
> #define BUGFLAG_DONE (1 << 2)
> +#define BUGFLAG_KASAN (1 << 3)
> #define BUGFLAG_TAINT(taint) ((taint) << 8)
> #define BUG_GET_TAINT(bug) ((bug)->flags >> 8)
> #endif
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -83,6 +83,9 @@ size_t kasan_metadata_size(struct kmem_c
> bool kasan_save_enable_multi_shot(void);
> void kasan_restore_multi_shot(bool enabled);
>
> +void __kasan_report(unsigned long addr, size_t size,
> + bool is_write, unsigned long ip);
> +
> #else /* CONFIG_KASAN */
>
> static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
> @@ -153,8 +156,14 @@ static inline void kasan_remove_zero_sha
> static inline void kasan_unpoison_slab(const void *ptr) { }
> static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
>
> +static inline void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip) { }
> +
> #endif /* CONFIG_KASAN */
>
> +#ifndef kasan_report
> +#define kasan_report(addr, size, is_write, ip) __kasan_report(addr, size, is_write, ip)
> +#endif
> +
> #ifdef CONFIG_KASAN_GENERIC
>
> #define KASAN_SHADOW_INIT 0
> @@ -177,9 +186,6 @@ void kasan_init_tags(void);
>
> void *kasan_reset_tag(const void *addr);
>
> -void kasan_report(unsigned long addr, size_t size,
> - bool is_write, unsigned long ip);
> -
> #else /* CONFIG_KASAN_SW_TAGS */
>
> static inline void kasan_init_tags(void) { }
> --- a/lib/bug.c
> +++ b/lib/bug.c
> @@ -47,6 +47,7 @@
> #include <linux/bug.h>
> #include <linux/sched.h>
> #include <linux/rculist.h>
> +#include <linux/kasan.h>
>
> extern struct bug_entry __start___bug_table[], __stop___bug_table[];
>
> @@ -144,7 +145,7 @@ enum bug_trap_type report_bug(unsigned l
> {
> struct bug_entry *bug;
> const char *file;
> - unsigned line, warning, once, done;
> + unsigned line, warning, once, done, kasan;
>
> if (!is_valid_bugaddr(bugaddr))
> return BUG_TRAP_TYPE_NONE;
> @@ -167,6 +168,7 @@ enum bug_trap_type report_bug(unsigned l
> line = bug->line;
> #endif
> warning = (bug->flags & BUGFLAG_WARNING) != 0;
> + kasan = (bug->flags & BUGFLAG_KASAN) != 0;
> once = (bug->flags & BUGFLAG_ONCE) != 0;
> done = (bug->flags & BUGFLAG_DONE) != 0;
>
> @@ -188,6 +190,11 @@ enum bug_trap_type report_bug(unsigned l
> return BUG_TRAP_TYPE_WARN;
> }
>
> + if (kasan) {
> + __kasan_report(regs->di, regs->si, regs->dx, regs->cx);
> + return BUG_TRAP_TYPE_WARN;
> + }
> +
> printk(KERN_DEFAULT CUT_HERE);
>
> if (file)
> --- a/mm/kasan/kasan.h
> +++ b/mm/kasan/kasan.h
> @@ -130,7 +130,7 @@ void check_memory_region(unsigned long a
> void *find_first_bad_addr(void *addr, size_t size);
> const char *get_bug_type(struct kasan_access_info *info);
>
> -void kasan_report(unsigned long addr, size_t size,
> +void __kasan_report(unsigned long addr, size_t size,
> bool is_write, unsigned long ip);
> void kasan_report_invalid_free(void *object, unsigned long ip);
>
> --- a/mm/kasan/report.c
> +++ b/mm/kasan/report.c
> @@ -281,7 +281,7 @@ void kasan_report_invalid_free(void *obj
> end_report(&flags);
> }
>
> -void kasan_report(unsigned long addr, size_t size,
> +void __kasan_report(unsigned long addr, size_t size,
> bool is_write, unsigned long ip)
> {
> struct kasan_access_info info;

2019-03-01 17:32:11

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH 1/8] kasan,x86: Frob kasan_report() in an exception

On Fri, Mar 01, 2019 at 04:06:17PM +0100, Dmitry Vyukov wrote:
> It involves asm, hardware exceptions, UD2 instructions. It also seems
> to use arch-dependent code in arch-independent files: there is no RSI
> on other arches, does this compile on non-x86?

> > +#ifndef kasan_report
> > +#define kasan_report(addr, size, is_write, ip) __kasan_report(addr, size, is_write, ip)
> > +#endif

Should build; but I've not tried yet.

I'll push the lot out to 0day, that'll yell loudly if I messed that up.

But yes, I'll try some annotation, see what that looks like.

2019-03-06 15:51:54

by Dmitry Vyukov

[permalink] [raw]
Subject: Re: [PATCH 1/8] kasan,x86: Frob kasan_report() in an exception

On Wed, Mar 6, 2019 at 2:13 PM Peter Zijlstra <[email protected]> wrote:
>
> On Fri, Mar 01, 2019 at 04:23:05PM +0100, Peter Zijlstra wrote:
>
> > But yes, I'll try some annotation, see what that looks like.
>
> OK; that took a lot of time.. and a number of objtool bugs fixed but I
> think I have something that I don't hate -- although it is not as solid
> as I'd like it to be.
>
>
> unmodified:
>
> 0000 0000000000000150 <__asan_load1>:
> 0000 150: 48 b8 ff ff ff ff ff movabs $0xffff7fffffffffff,%rax
> 0007 157: 7f ff ff
> 000a 15a: 48 8b 0c 24 mov (%rsp),%rcx
> 000e 15e: 48 39 c7 cmp %rax,%rdi
> 0011 161: 76 23 jbe 186 <__asan_load1+0x36>
> 0013 163: 48 b8 00 00 00 00 00 movabs $0xdffffc0000000000,%rax
> 001a 16a: fc ff df
> 001d 16d: 48 89 fa mov %rdi,%rdx
> 0020 170: 48 c1 ea 03 shr $0x3,%rdx
> 0024 174: 0f b6 04 02 movzbl (%rdx,%rax,1),%eax
> 0028 178: 84 c0 test %al,%al
> 002a 17a: 75 01 jne 17d <__asan_load1+0x2d>
> 002c 17c: c3 retq
> 002d 17d: 89 fa mov %edi,%edx
> 002f 17f: 83 e2 07 and $0x7,%edx
> 0032 182: 38 d0 cmp %dl,%al
> 0034 184: 7f f6 jg 17c <__asan_load1+0x2c>
> 0036 186: 31 d2 xor %edx,%edx
> 0038 188: be 01 00 00 00 mov $0x1,%esi
> 003d 18d: e9 00 00 00 00 jmpq 192 <__asan_load1+0x42>
> 003e 18e: R_X86_64_PLT32 kasan_report-0x4
>
> exception:
>
> 0000 0000000000000150 <__asan_load1>:
> 0000 150: 48 b8 ff ff ff ff ff movabs $0xffff7fffffffffff,%rax
> 0007 157: 7f ff ff
> 000a 15a: 48 8b 0c 24 mov (%rsp),%rcx
> 000e 15e: 48 39 c7 cmp %rax,%rdi
> 0011 161: 76 23 jbe 186 <__asan_load1+0x36>
> 0013 163: 48 b8 00 00 00 00 00 movabs $0xdffffc0000000000,%rax
> 001a 16a: fc ff df
> 001d 16d: 48 89 fa mov %rdi,%rdx
> 0020 170: 48 c1 ea 03 shr $0x3,%rdx
> 0024 174: 0f b6 04 02 movzbl (%rdx,%rax,1),%eax
> 0028 178: 84 c0 test %al,%al
> 002a 17a: 75 01 jne 17d <__asan_load1+0x2d>
> 002c 17c: c3 retq
> 002d 17d: 89 fa mov %edi,%edx
> 002f 17f: 83 e2 07 and $0x7,%edx
> 0032 182: 38 d0 cmp %dl,%al
> 0034 184: 7f f6 jg 17c <__asan_load1+0x2c>
> 0036 186: 65 48 8b 04 25 00 00 mov %gs:0x0,%rax
> 003d 18d: 00 00
> 003b 18b: R_X86_64_32S current_task
> 003f 18f: 8b 80 68 08 00 00 mov 0x868(%rax),%eax
> 0045 195: 85 c0 test %eax,%eax
> 0047 197: 75 e3 jne 17c <__asan_load1+0x2c>
> 0049 199: be 01 00 00 00 mov $0x1,%esi
> 004e 19e: 31 d2 xor %edx,%edx
> 0050 1a0: 0f 0b ud2
> 0052 1a2: c3 retq
>
> annotated:
>
> 0000 0000000000000150 <__asan_load1>:
> 0000 150: 48 b8 ff ff ff ff ff movabs $0xffff7fffffffffff,%rax
> 0007 157: 7f ff ff
> 000a 15a: 53 push %rbx

/\/\/\/\/\/\

This push is unpleasant on hot fast path. I think we need to move
whole report cold path into a separate noinline function as it is now,
and that function will do the magic with smap. Then this won't prevent
tail calling and won't affect fast-path codegen.

> 000b 15b: 48 8b 4c 24 08 mov 0x8(%rsp),%rcx
> 0010 160: 48 39 c7 cmp %rax,%rdi
> 0013 163: 76 24 jbe 189 <__asan_load1+0x39>
> 0015 165: 48 b8 00 00 00 00 00 movabs $0xdffffc0000000000,%rax
> 001c 16c: fc ff df
> 001f 16f: 48 89 fa mov %rdi,%rdx
> 0022 172: 48 c1 ea 03 shr $0x3,%rdx
> 0026 176: 0f b6 04 02 movzbl (%rdx,%rax,1),%eax
> 002a 17a: 84 c0 test %al,%al
> 002c 17c: 75 02 jne 180 <__asan_load1+0x30>
> 002e 17e: 5b pop %rbx
> 002f 17f: c3 retq
> 0030 180: 89 fa mov %edi,%edx
> 0032 182: 83 e2 07 and $0x7,%edx
> 0035 185: 38 d0 cmp %dl,%al
> 0037 187: 7f f5 jg 17e <__asan_load1+0x2e>
> 0039 189: 9c pushfq
> 003a 18a: 5b pop %rbx
> 003b 18b: 90 nop
> 003c 18c: 90 nop
> 003d 18d: 90 nop
> 003e 18e: 31 d2 xor %edx,%edx
> 0040 190: be 01 00 00 00 mov $0x1,%esi
> 0045 195: e8 00 00 00 00 callq 19a <__asan_load1+0x4a>
> 0046 196: R_X86_64_PLT32 __kasan_report-0x4
> 004a 19a: 53 push %rbx
> 004b 19b: 9d popfq
> 004c 19c: 5b pop %rbx
> 004d 19d: c3 retq
>
>
> ---
> --- a/arch/x86/include/asm/kasan.h
> +++ b/arch/x86/include/asm/kasan.h
> @@ -28,6 +28,23 @@
> #ifdef CONFIG_KASAN
> void __init kasan_early_init(void);
> void __init kasan_init(void);
> +
> +#include <asm/smap.h>
> +
> +extern void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip);
> +
> +static __always_inline
> +void kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip)
> +{
> + unsigned long flags;
> +
> + flags = smap_save();

Previously you said that messing with smap here causes boot errors.
Shouldn't we do smap_save iff kasan_report_enabled? Otherwise we just
bail out, so no need to enable/disable smap.

> + __kasan_report(addr, size, is_write, ip);
> + smap_restore(flags);
> +
> +}
> +#define kasan_report kasan_report
> +
> #else
> static inline void kasan_early_init(void) { }
> static inline void kasan_init(void) { }
> --- a/arch/x86/include/asm/smap.h
> +++ b/arch/x86/include/asm/smap.h
> @@ -46,6 +46,8 @@
>
> #ifdef CONFIG_X86_SMAP
>
> +#include <asm/irqflags.h>
> +
> static __always_inline void clac(void)
> {
> /* Note: a barrier is implicit in alternative() */
> @@ -58,6 +60,18 @@ static __always_inline void stac(void)
> alternative("", __stringify(__ASM_STAC), X86_FEATURE_SMAP);
> }
>
> +static __always_inline unsigned long smap_save(void)
> +{
> + unsigned long flags = arch_local_save_flags();
> + clac();
> + return flags;
> +}
> +
> +static __always_inline void smap_restore(unsigned long flags)
> +{
> + arch_local_irq_restore(flags);
> +}
> +
> /* These macros can be used in asm() statements */
> #define ASM_CLAC \
> ALTERNATIVE("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP)
> @@ -69,6 +83,9 @@ static __always_inline void stac(void)
> static inline void clac(void) { }
> static inline void stac(void) { }
>
> +static inline unsigned long smap_save(void) { return 0; }
> +static inline void smap_restore(unsigned long flags) { }
> +
> #define ASM_CLAC
> #define ASM_STAC
>
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -83,6 +83,8 @@ size_t kasan_metadata_size(struct kmem_c
> bool kasan_save_enable_multi_shot(void);
> void kasan_restore_multi_shot(bool enabled);
>
> +void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip);
> +
> #else /* CONFIG_KASAN */
>
> static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
> @@ -153,8 +155,14 @@ static inline void kasan_remove_zero_sha
> static inline void kasan_unpoison_slab(const void *ptr) { }
> static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
>
> +static inline void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip) { }
> +
> #endif /* CONFIG_KASAN */
>
> +#ifndef kasan_report
> +#define kasan_report(addr, size, is_write, ip) __kasan_report(addr, size, is_write, ip)
> +#endif
> +
> #ifdef CONFIG_KASAN_GENERIC
>
> #define KASAN_SHADOW_INIT 0
> @@ -177,9 +185,6 @@ void kasan_init_tags(void);
>
> void *kasan_reset_tag(const void *addr);
>
> -void kasan_report(unsigned long addr, size_t size,
> - bool is_write, unsigned long ip);
> -
> #else /* CONFIG_KASAN_SW_TAGS */
>
> static inline void kasan_init_tags(void) { }
> --- a/mm/kasan/generic_report.c
> +++ b/mm/kasan/generic_report.c
> @@ -118,14 +118,14 @@ const char *get_bug_type(struct kasan_ac
> #define DEFINE_ASAN_REPORT_LOAD(size) \
> void __asan_report_load##size##_noabort(unsigned long addr) \
> { \
> - kasan_report(addr, size, false, _RET_IP_); \
> + __kasan_report(addr, size, false, _RET_IP_); \

Unless I am missing something, this seems to make this patch no-op. We
fixed kasan_report for smap, but here we now use __kasan_report which
is not fixed. So this won't work with smap again?..


> } \
> EXPORT_SYMBOL(__asan_report_load##size##_noabort)
>
> #define DEFINE_ASAN_REPORT_STORE(size) \
> void __asan_report_store##size##_noabort(unsigned long addr) \
> { \
> - kasan_report(addr, size, true, _RET_IP_); \
> + __kasan_report(addr, size, true, _RET_IP_); \
> } \
> EXPORT_SYMBOL(__asan_report_store##size##_noabort)
>
> @@ -142,12 +142,12 @@ DEFINE_ASAN_REPORT_STORE(16);
>
> void __asan_report_load_n_noabort(unsigned long addr, size_t size)
> {
> - kasan_report(addr, size, false, _RET_IP_);
> + __kasan_report(addr, size, false, _RET_IP_);
> }
> EXPORT_SYMBOL(__asan_report_load_n_noabort);
>
> void __asan_report_store_n_noabort(unsigned long addr, size_t size)
> {
> - kasan_report(addr, size, true, _RET_IP_);
> + __kasan_report(addr, size, true, _RET_IP_);
> }
> EXPORT_SYMBOL(__asan_report_store_n_noabort);
> --- a/mm/kasan/kasan.h
> +++ b/mm/kasan/kasan.h
> @@ -130,8 +130,6 @@ void check_memory_region(unsigned long a
> void *find_first_bad_addr(void *addr, size_t size);
> const char *get_bug_type(struct kasan_access_info *info);
>
> -void kasan_report(unsigned long addr, size_t size,
> - bool is_write, unsigned long ip);
> void kasan_report_invalid_free(void *object, unsigned long ip);
>
> #if defined(CONFIG_KASAN_GENERIC) && \
> --- a/mm/kasan/report.c
> +++ b/mm/kasan/report.c
> @@ -281,7 +281,7 @@ void kasan_report_invalid_free(void *obj
> end_report(&flags);
> }
>
> -void kasan_report(unsigned long addr, size_t size,
> +void __kasan_report(unsigned long addr, size_t size,
> bool is_write, unsigned long ip)
> {
> struct kasan_access_info info;
> --- a/tools/objtool/arch.h
> +++ b/tools/objtool/arch.h
> @@ -43,6 +43,7 @@ enum op_dest_type {
> OP_DEST_REG_INDIRECT,
> OP_DEST_MEM,
> OP_DEST_PUSH,
> + OP_DEST_PUSHF,
> OP_DEST_LEAVE,
> };
>
> @@ -57,6 +58,7 @@ enum op_src_type {
> OP_SRC_REG_INDIRECT,
> OP_SRC_CONST,
> OP_SRC_POP,
> + OP_SRC_POPF,
> OP_SRC_ADD,
> OP_SRC_AND,
> };
> --- a/tools/objtool/arch/x86/decode.c
> +++ b/tools/objtool/arch/x86/decode.c
> @@ -357,13 +357,13 @@ int arch_decode_instruction(struct elf *
> /* pushf */
> *type = INSN_STACK;
> op->src.type = OP_SRC_CONST;
> - op->dest.type = OP_DEST_PUSH;
> + op->dest.type = OP_DEST_PUSHF;
> break;
>
> case 0x9d:
> /* popf */
> *type = INSN_STACK;
> - op->src.type = OP_SRC_POP;
> + op->src.type = OP_SRC_POPF;
> op->dest.type = OP_DEST_MEM;
> break;
>
> --- a/tools/objtool/check.c
> +++ b/tools/objtool/check.c
> @@ -1359,11 +1359,11 @@ static int update_insn_state_regs(struct
> return 0;
>
> /* push */
> - if (op->dest.type == OP_DEST_PUSH)
> + if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF)
> cfa->offset += 8;
>
> /* pop */
> - if (op->src.type == OP_SRC_POP)
> + if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF)
> cfa->offset -= 8;
>
> /* add immediate to sp */
> @@ -1620,6 +1620,7 @@ static int update_insn_state(struct inst
> break;
>
> case OP_SRC_POP:
> + case OP_SRC_POPF:
> if (!state->drap && op->dest.type == OP_DEST_REG &&
> op->dest.reg == cfa->base) {
>
> @@ -1684,6 +1685,7 @@ static int update_insn_state(struct inst
> break;
>
> case OP_DEST_PUSH:
> + case OP_DEST_PUSHF:
> state->stack_size += 8;
> if (cfa->base == CFI_SP)
> cfa->offset += 8;
> @@ -1774,7 +1776,7 @@ static int update_insn_state(struct inst
> break;
>
> case OP_DEST_MEM:
> - if (op->src.type != OP_SRC_POP) {
> + if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) {
> WARN_FUNC("unknown stack-related memory operation",
> insn->sec, insn->offset);
> return -1;
> @@ -2071,6 +2073,16 @@ static int validate_branch(struct objtoo
> if (update_insn_state(insn, &state))
> return 1;
>
> + if (insn->stack_op.dest.type == OP_DEST_PUSHF) {
> + if (state.uaccess)
> + state.uaccess_stack++;
> + }
> +
> + if (insn->stack_op.src.type == OP_SRC_POPF) {
> + if (state.uaccess_stack && !--state.uaccess_stack)
> + state.uaccess = func_uaccess_safe(func);
> + }
> +
> break;
>
> case INSN_STAC:
> @@ -2088,7 +2100,7 @@ static int validate_branch(struct objtoo
> return 1;
> }
>
> - if (func_uaccess_safe(func)) {
> + if (func_uaccess_safe(func) && !state.uaccess_stack) {
> WARN_FUNC("UACCESS-safe disables UACCESS", sec, insn->offset);
> return 1;
> }
> --- a/tools/objtool/check.h
> +++ b/tools/objtool/check.h
> @@ -32,6 +32,7 @@ struct insn_state {
> unsigned char type;
> bool bp_scratch;
> bool drap, end, uaccess;
> + int uaccess_stack;
> int drap_reg, drap_offset;
> struct cfi_reg vals[CFI_NUM_REGS];
> };

2019-03-06 15:53:00

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH 1/8] kasan,x86: Frob kasan_report() in an exception

On Wed, Mar 06, 2019 at 02:39:33PM +0100, Dmitry Vyukov wrote:
> On Wed, Mar 6, 2019 at 2:13 PM Peter Zijlstra <[email protected]> wrote:

> > annotated:
> >
> > 0000 0000000000000150 <__asan_load1>:
> > 0000 150: 48 b8 ff ff ff ff ff movabs $0xffff7fffffffffff,%rax
> > 0007 157: 7f ff ff
> > 000a 15a: 53 push %rbx
>
> /\/\/\/\/\/\
>
> This push is unpleasant on hot fast path. I think we need to move
> whole report cold path into a separate noinline function as it is now,
> and that function will do the magic with smap. Then this won't prevent
> tail calling and won't affect fast-path codegen.

It's a bit daft of GCC to do that anyway; since it only uses that rbx
thing in the cold path at __asan_load1+0x30.

But yes, that wants fixing or something. Then again; a kernel with KASAN
on is unbearable slow anyway.

> > 000b 15b: 48 8b 4c 24 08 mov 0x8(%rsp),%rcx
> > 0010 160: 48 39 c7 cmp %rax,%rdi
> > 0013 163: 76 24 jbe 189 <__asan_load1+0x39>
> > 0015 165: 48 b8 00 00 00 00 00 movabs $0xdffffc0000000000,%rax
> > 001c 16c: fc ff df
> > 001f 16f: 48 89 fa mov %rdi,%rdx
> > 0022 172: 48 c1 ea 03 shr $0x3,%rdx
> > 0026 176: 0f b6 04 02 movzbl (%rdx,%rax,1),%eax
> > 002a 17a: 84 c0 test %al,%al
> > 002c 17c: 75 02 jne 180 <__asan_load1+0x30>
> > 002e 17e: 5b pop %rbx
> > 002f 17f: c3 retq

^^^ hot path, vvv cold path

> > 0030 180: 89 fa mov %edi,%edx
> > 0032 182: 83 e2 07 and $0x7,%edx
> > 0035 185: 38 d0 cmp %dl,%al
> > 0037 187: 7f f5 jg 17e <__asan_load1+0x2e>
> > 0039 189: 9c pushfq
> > 003a 18a: 5b pop %rbx
> > 003b 18b: 90 nop
> > 003c 18c: 90 nop
> > 003d 18d: 90 nop
> > 003e 18e: 31 d2 xor %edx,%edx
> > 0040 190: be 01 00 00 00 mov $0x1,%esi
> > 0045 195: e8 00 00 00 00 callq 19a <__asan_load1+0x4a>
> > 0046 196: R_X86_64_PLT32 __kasan_report-0x4
> > 004a 19a: 53 push %rbx
> > 004b 19b: 9d popfq
> > 004c 19c: 5b pop %rbx
> > 004d 19d: c3 retq

> > +static __always_inline
> > +void kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip)
> > +{
> > + unsigned long flags;
> > +
> > + flags = smap_save();
>
> Previously you said that messing with smap here causes boot errors.
> Shouldn't we do smap_save iff kasan_report_enabled? Otherwise we just
> bail out, so no need to enable/disable smap.
>
> > + __kasan_report(addr, size, is_write, ip);
> > + smap_restore(flags);
> > +
> > +}

Ah, you think I booted this :-) Still, this is only PUSHF;CLAC, which I
think should actually work really early. It was that #UD thing that
didn't work early, simply because we'd not set up the exception vector
yet when first this happens.

> > --- a/mm/kasan/generic_report.c
> > +++ b/mm/kasan/generic_report.c
> > @@ -118,14 +118,14 @@ const char *get_bug_type(struct kasan_ac
> > #define DEFINE_ASAN_REPORT_LOAD(size) \
> > void __asan_report_load##size##_noabort(unsigned long addr) \
> > { \
> > - kasan_report(addr, size, false, _RET_IP_); \
> > + __kasan_report(addr, size, false, _RET_IP_); \
>
> Unless I am missing something, this seems to make this patch no-op. We
> fixed kasan_report for smap, but here we now use __kasan_report which
> is not fixed. So this won't work with smap again?..

I've not found callers of __asan_report_load* with AC=1 in the kernel
yet. Under what condtions does GCC emit calls to these functions?

2019-03-06 15:53:48

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH 1/8] kasan,x86: Frob kasan_report() in an exception

On Wed, Mar 06, 2019 at 03:40:51PM +0100, Dmitry Vyukov wrote:
> On Wed, Mar 6, 2019 at 3:34 PM Peter Zijlstra <[email protected]> wrote:

> > mm/kasan/generic_report.o: warning: objtool: __asan_report_load1_noabort()+0x0: call to __fentry__() with UACCESS enabled

> But if it makes things simpler for the objtool, then I think we can
> disable function tracer for generic_report.c too.

It's not simpler; it is actually a correctness issue. Those functions
must not call into the tracer with AC=1

2019-03-06 15:54:15

by Dmitry Vyukov

[permalink] [raw]
Subject: Re: [PATCH 1/8] kasan,x86: Frob kasan_report() in an exception

On Wed, Mar 6, 2019 at 3:56 PM Peter Zijlstra <[email protected]> wrote:
>
> On Wed, Mar 06, 2019 at 03:40:51PM +0100, Dmitry Vyukov wrote:
> > On Wed, Mar 6, 2019 at 3:34 PM Peter Zijlstra <[email protected]> wrote:
>
> > > mm/kasan/generic_report.o: warning: objtool: __asan_report_load1_noabort()+0x0: call to __fentry__() with UACCESS enabled
>
> > But if it makes things simpler for the objtool, then I think we can
> > disable function tracer for generic_report.c too.
>
> It's not simpler; it is actually a correctness issue. Those functions
> must not call into the tracer with AC=1

You are right!
I assumed they are defined in kasan.c and then call into a report
function that is compiled with -pg.
We can either disable -pg for report.c, or move these callbacks into
kasan.c which already has -pg disabled. I don't have strong preference
either way as long as the option works in all cases.

2019-03-06 15:54:22

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH 1/8] kasan,x86: Frob kasan_report() in an exception

On Wed, Mar 06, 2019 at 03:12:37PM +0100, Peter Zijlstra wrote:
> On Wed, Mar 06, 2019 at 03:01:33PM +0100, Dmitry Vyukov wrote:
> > On Wed, Mar 6, 2019 at 2:57 PM Peter Zijlstra <[email protected]> wrote:
>
> > > I've not found callers of __asan_report_load* with AC=1 in the kernel
> > > yet. Under what condtions does GCC emit calls to these functions?
> >
> > CONFIG_KASAN_INLINE=y
> > Then compiler inlines fast path into generated code and only calls
> > into runtime to report errors (also, faster, this should be a default
> > for anything other than tiny ROM controllers).
>
> *sigh*, clearly I've not build enough kernels yet... Lemme go try that.

mm/kasan/generic_report.o: warning: objtool: __asan_report_load1_noabort()+0x0: call to __fentry__() with UACCESS enabled

You want to do:

CFLAGS_REMOVE_generic_report.o = -pg

like generic.o has?

2019-03-06 15:54:31

by Dmitry Vyukov

[permalink] [raw]
Subject: Re: [PATCH 1/8] kasan,x86: Frob kasan_report() in an exception

On Wed, Mar 6, 2019 at 3:34 PM Peter Zijlstra <[email protected]> wrote:
>
> On Wed, Mar 06, 2019 at 03:12:37PM +0100, Peter Zijlstra wrote:
> > On Wed, Mar 06, 2019 at 03:01:33PM +0100, Dmitry Vyukov wrote:
> > > On Wed, Mar 6, 2019 at 2:57 PM Peter Zijlstra <[email protected]> wrote:
> >
> > > > I've not found callers of __asan_report_load* with AC=1 in the kernel
> > > > yet. Under what condtions does GCC emit calls to these functions?
> > >
> > > CONFIG_KASAN_INLINE=y
> > > Then compiler inlines fast path into generated code and only calls
> > > into runtime to report errors (also, faster, this should be a default
> > > for anything other than tiny ROM controllers).
> >
> > *sigh*, clearly I've not build enough kernels yet... Lemme go try that.
>
> mm/kasan/generic_report.o: warning: objtool: __asan_report_load1_noabort()+0x0: call to __fentry__() with UACCESS enabled
>
> You want to do:
>
> CFLAGS_REMOVE_f= -pg
>
> like generic.o has?

This should not matter for KASAN itself.
KASAN will call into function tracer, and function tracer will call
into KASAN, but unless function tracer is badly broken and causes a
KASAN report on every invocation, the recursion will end (function
tracer will get to the _report_ function). So we only disabled -pg for
fast paths.
But if it makes things simpler for the objtool, then I think we can
disable function tracer for generic_report.c too.

2019-03-06 17:02:45

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH 1/8] kasan,x86: Frob kasan_report() in an exception

On Fri, Mar 01, 2019 at 04:23:05PM +0100, Peter Zijlstra wrote:

> But yes, I'll try some annotation, see what that looks like.

OK; that took a lot of time.. and a number of objtool bugs fixed but I
think I have something that I don't hate -- although it is not as solid
as I'd like it to be.


unmodified:

0000 0000000000000150 <__asan_load1>:
0000 150: 48 b8 ff ff ff ff ff movabs $0xffff7fffffffffff,%rax
0007 157: 7f ff ff
000a 15a: 48 8b 0c 24 mov (%rsp),%rcx
000e 15e: 48 39 c7 cmp %rax,%rdi
0011 161: 76 23 jbe 186 <__asan_load1+0x36>
0013 163: 48 b8 00 00 00 00 00 movabs $0xdffffc0000000000,%rax
001a 16a: fc ff df
001d 16d: 48 89 fa mov %rdi,%rdx
0020 170: 48 c1 ea 03 shr $0x3,%rdx
0024 174: 0f b6 04 02 movzbl (%rdx,%rax,1),%eax
0028 178: 84 c0 test %al,%al
002a 17a: 75 01 jne 17d <__asan_load1+0x2d>
002c 17c: c3 retq
002d 17d: 89 fa mov %edi,%edx
002f 17f: 83 e2 07 and $0x7,%edx
0032 182: 38 d0 cmp %dl,%al
0034 184: 7f f6 jg 17c <__asan_load1+0x2c>
0036 186: 31 d2 xor %edx,%edx
0038 188: be 01 00 00 00 mov $0x1,%esi
003d 18d: e9 00 00 00 00 jmpq 192 <__asan_load1+0x42>
003e 18e: R_X86_64_PLT32 kasan_report-0x4

exception:

0000 0000000000000150 <__asan_load1>:
0000 150: 48 b8 ff ff ff ff ff movabs $0xffff7fffffffffff,%rax
0007 157: 7f ff ff
000a 15a: 48 8b 0c 24 mov (%rsp),%rcx
000e 15e: 48 39 c7 cmp %rax,%rdi
0011 161: 76 23 jbe 186 <__asan_load1+0x36>
0013 163: 48 b8 00 00 00 00 00 movabs $0xdffffc0000000000,%rax
001a 16a: fc ff df
001d 16d: 48 89 fa mov %rdi,%rdx
0020 170: 48 c1 ea 03 shr $0x3,%rdx
0024 174: 0f b6 04 02 movzbl (%rdx,%rax,1),%eax
0028 178: 84 c0 test %al,%al
002a 17a: 75 01 jne 17d <__asan_load1+0x2d>
002c 17c: c3 retq
002d 17d: 89 fa mov %edi,%edx
002f 17f: 83 e2 07 and $0x7,%edx
0032 182: 38 d0 cmp %dl,%al
0034 184: 7f f6 jg 17c <__asan_load1+0x2c>
0036 186: 65 48 8b 04 25 00 00 mov %gs:0x0,%rax
003d 18d: 00 00
003b 18b: R_X86_64_32S current_task
003f 18f: 8b 80 68 08 00 00 mov 0x868(%rax),%eax
0045 195: 85 c0 test %eax,%eax
0047 197: 75 e3 jne 17c <__asan_load1+0x2c>
0049 199: be 01 00 00 00 mov $0x1,%esi
004e 19e: 31 d2 xor %edx,%edx
0050 1a0: 0f 0b ud2
0052 1a2: c3 retq

annotated:

0000 0000000000000150 <__asan_load1>:
0000 150: 48 b8 ff ff ff ff ff movabs $0xffff7fffffffffff,%rax
0007 157: 7f ff ff
000a 15a: 53 push %rbx
000b 15b: 48 8b 4c 24 08 mov 0x8(%rsp),%rcx
0010 160: 48 39 c7 cmp %rax,%rdi
0013 163: 76 24 jbe 189 <__asan_load1+0x39>
0015 165: 48 b8 00 00 00 00 00 movabs $0xdffffc0000000000,%rax
001c 16c: fc ff df
001f 16f: 48 89 fa mov %rdi,%rdx
0022 172: 48 c1 ea 03 shr $0x3,%rdx
0026 176: 0f b6 04 02 movzbl (%rdx,%rax,1),%eax
002a 17a: 84 c0 test %al,%al
002c 17c: 75 02 jne 180 <__asan_load1+0x30>
002e 17e: 5b pop %rbx
002f 17f: c3 retq
0030 180: 89 fa mov %edi,%edx
0032 182: 83 e2 07 and $0x7,%edx
0035 185: 38 d0 cmp %dl,%al
0037 187: 7f f5 jg 17e <__asan_load1+0x2e>
0039 189: 9c pushfq
003a 18a: 5b pop %rbx
003b 18b: 90 nop
003c 18c: 90 nop
003d 18d: 90 nop
003e 18e: 31 d2 xor %edx,%edx
0040 190: be 01 00 00 00 mov $0x1,%esi
0045 195: e8 00 00 00 00 callq 19a <__asan_load1+0x4a>
0046 196: R_X86_64_PLT32 __kasan_report-0x4
004a 19a: 53 push %rbx
004b 19b: 9d popfq
004c 19c: 5b pop %rbx
004d 19d: c3 retq


---
--- a/arch/x86/include/asm/kasan.h
+++ b/arch/x86/include/asm/kasan.h
@@ -28,6 +28,23 @@
#ifdef CONFIG_KASAN
void __init kasan_early_init(void);
void __init kasan_init(void);
+
+#include <asm/smap.h>
+
+extern void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip);
+
+static __always_inline
+void kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip)
+{
+ unsigned long flags;
+
+ flags = smap_save();
+ __kasan_report(addr, size, is_write, ip);
+ smap_restore(flags);
+
+}
+#define kasan_report kasan_report
+
#else
static inline void kasan_early_init(void) { }
static inline void kasan_init(void) { }
--- a/arch/x86/include/asm/smap.h
+++ b/arch/x86/include/asm/smap.h
@@ -46,6 +46,8 @@

#ifdef CONFIG_X86_SMAP

+#include <asm/irqflags.h>
+
static __always_inline void clac(void)
{
/* Note: a barrier is implicit in alternative() */
@@ -58,6 +60,18 @@ static __always_inline void stac(void)
alternative("", __stringify(__ASM_STAC), X86_FEATURE_SMAP);
}

+static __always_inline unsigned long smap_save(void)
+{
+ unsigned long flags = arch_local_save_flags();
+ clac();
+ return flags;
+}
+
+static __always_inline void smap_restore(unsigned long flags)
+{
+ arch_local_irq_restore(flags);
+}
+
/* These macros can be used in asm() statements */
#define ASM_CLAC \
ALTERNATIVE("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP)
@@ -69,6 +83,9 @@ static __always_inline void stac(void)
static inline void clac(void) { }
static inline void stac(void) { }

+static inline unsigned long smap_save(void) { return 0; }
+static inline void smap_restore(unsigned long flags) { }
+
#define ASM_CLAC
#define ASM_STAC

--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -83,6 +83,8 @@ size_t kasan_metadata_size(struct kmem_c
bool kasan_save_enable_multi_shot(void);
void kasan_restore_multi_shot(bool enabled);

+void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip);
+
#else /* CONFIG_KASAN */

static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
@@ -153,8 +155,14 @@ static inline void kasan_remove_zero_sha
static inline void kasan_unpoison_slab(const void *ptr) { }
static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }

+static inline void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip) { }
+
#endif /* CONFIG_KASAN */

+#ifndef kasan_report
+#define kasan_report(addr, size, is_write, ip) __kasan_report(addr, size, is_write, ip)
+#endif
+
#ifdef CONFIG_KASAN_GENERIC

#define KASAN_SHADOW_INIT 0
@@ -177,9 +185,6 @@ void kasan_init_tags(void);

void *kasan_reset_tag(const void *addr);

-void kasan_report(unsigned long addr, size_t size,
- bool is_write, unsigned long ip);
-
#else /* CONFIG_KASAN_SW_TAGS */

static inline void kasan_init_tags(void) { }
--- a/mm/kasan/generic_report.c
+++ b/mm/kasan/generic_report.c
@@ -118,14 +118,14 @@ const char *get_bug_type(struct kasan_ac
#define DEFINE_ASAN_REPORT_LOAD(size) \
void __asan_report_load##size##_noabort(unsigned long addr) \
{ \
- kasan_report(addr, size, false, _RET_IP_); \
+ __kasan_report(addr, size, false, _RET_IP_); \
} \
EXPORT_SYMBOL(__asan_report_load##size##_noabort)

#define DEFINE_ASAN_REPORT_STORE(size) \
void __asan_report_store##size##_noabort(unsigned long addr) \
{ \
- kasan_report(addr, size, true, _RET_IP_); \
+ __kasan_report(addr, size, true, _RET_IP_); \
} \
EXPORT_SYMBOL(__asan_report_store##size##_noabort)

@@ -142,12 +142,12 @@ DEFINE_ASAN_REPORT_STORE(16);

void __asan_report_load_n_noabort(unsigned long addr, size_t size)
{
- kasan_report(addr, size, false, _RET_IP_);
+ __kasan_report(addr, size, false, _RET_IP_);
}
EXPORT_SYMBOL(__asan_report_load_n_noabort);

void __asan_report_store_n_noabort(unsigned long addr, size_t size)
{
- kasan_report(addr, size, true, _RET_IP_);
+ __kasan_report(addr, size, true, _RET_IP_);
}
EXPORT_SYMBOL(__asan_report_store_n_noabort);
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -130,8 +130,6 @@ void check_memory_region(unsigned long a
void *find_first_bad_addr(void *addr, size_t size);
const char *get_bug_type(struct kasan_access_info *info);

-void kasan_report(unsigned long addr, size_t size,
- bool is_write, unsigned long ip);
void kasan_report_invalid_free(void *object, unsigned long ip);

#if defined(CONFIG_KASAN_GENERIC) && \
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -281,7 +281,7 @@ void kasan_report_invalid_free(void *obj
end_report(&flags);
}

-void kasan_report(unsigned long addr, size_t size,
+void __kasan_report(unsigned long addr, size_t size,
bool is_write, unsigned long ip)
{
struct kasan_access_info info;
--- a/tools/objtool/arch.h
+++ b/tools/objtool/arch.h
@@ -43,6 +43,7 @@ enum op_dest_type {
OP_DEST_REG_INDIRECT,
OP_DEST_MEM,
OP_DEST_PUSH,
+ OP_DEST_PUSHF,
OP_DEST_LEAVE,
};

@@ -57,6 +58,7 @@ enum op_src_type {
OP_SRC_REG_INDIRECT,
OP_SRC_CONST,
OP_SRC_POP,
+ OP_SRC_POPF,
OP_SRC_ADD,
OP_SRC_AND,
};
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -357,13 +357,13 @@ int arch_decode_instruction(struct elf *
/* pushf */
*type = INSN_STACK;
op->src.type = OP_SRC_CONST;
- op->dest.type = OP_DEST_PUSH;
+ op->dest.type = OP_DEST_PUSHF;
break;

case 0x9d:
/* popf */
*type = INSN_STACK;
- op->src.type = OP_SRC_POP;
+ op->src.type = OP_SRC_POPF;
op->dest.type = OP_DEST_MEM;
break;

--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -1359,11 +1359,11 @@ static int update_insn_state_regs(struct
return 0;

/* push */
- if (op->dest.type == OP_DEST_PUSH)
+ if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF)
cfa->offset += 8;

/* pop */
- if (op->src.type == OP_SRC_POP)
+ if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF)
cfa->offset -= 8;

/* add immediate to sp */
@@ -1620,6 +1620,7 @@ static int update_insn_state(struct inst
break;

case OP_SRC_POP:
+ case OP_SRC_POPF:
if (!state->drap && op->dest.type == OP_DEST_REG &&
op->dest.reg == cfa->base) {

@@ -1684,6 +1685,7 @@ static int update_insn_state(struct inst
break;

case OP_DEST_PUSH:
+ case OP_DEST_PUSHF:
state->stack_size += 8;
if (cfa->base == CFI_SP)
cfa->offset += 8;
@@ -1774,7 +1776,7 @@ static int update_insn_state(struct inst
break;

case OP_DEST_MEM:
- if (op->src.type != OP_SRC_POP) {
+ if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) {
WARN_FUNC("unknown stack-related memory operation",
insn->sec, insn->offset);
return -1;
@@ -2071,6 +2073,16 @@ static int validate_branch(struct objtoo
if (update_insn_state(insn, &state))
return 1;

+ if (insn->stack_op.dest.type == OP_DEST_PUSHF) {
+ if (state.uaccess)
+ state.uaccess_stack++;
+ }
+
+ if (insn->stack_op.src.type == OP_SRC_POPF) {
+ if (state.uaccess_stack && !--state.uaccess_stack)
+ state.uaccess = func_uaccess_safe(func);
+ }
+
break;

case INSN_STAC:
@@ -2088,7 +2100,7 @@ static int validate_branch(struct objtoo
return 1;
}

- if (func_uaccess_safe(func)) {
+ if (func_uaccess_safe(func) && !state.uaccess_stack) {
WARN_FUNC("UACCESS-safe disables UACCESS", sec, insn->offset);
return 1;
}
--- a/tools/objtool/check.h
+++ b/tools/objtool/check.h
@@ -32,6 +32,7 @@ struct insn_state {
unsigned char type;
bool bp_scratch;
bool drap, end, uaccess;
+ int uaccess_stack;
int drap_reg, drap_offset;
struct cfi_reg vals[CFI_NUM_REGS];
};

2019-03-06 17:37:24

by Dmitry Vyukov

[permalink] [raw]
Subject: Re: [PATCH 1/8] kasan,x86: Frob kasan_report() in an exception

On Wed, Mar 6, 2019 at 2:57 PM Peter Zijlstra <[email protected]> wrote:
>
> On Wed, Mar 06, 2019 at 02:39:33PM +0100, Dmitry Vyukov wrote:
> > On Wed, Mar 6, 2019 at 2:13 PM Peter Zijlstra <[email protected]> wrote:
>
> > > annotated:
> > >
> > > 0000 0000000000000150 <__asan_load1>:
> > > 0000 150: 48 b8 ff ff ff ff ff movabs $0xffff7fffffffffff,%rax
> > > 0007 157: 7f ff ff
> > > 000a 15a: 53 push %rbx
> >
> > /\/\/\/\/\/\
> >
> > This push is unpleasant on hot fast path. I think we need to move
> > whole report cold path into a separate noinline function as it is now,
> > and that function will do the magic with smap. Then this won't prevent
> > tail calling and won't affect fast-path codegen.
>
> It's a bit daft of GCC to do that anyway; since it only uses that rbx
> thing in the cold path at __asan_load1+0x30.
>
> But yes, that wants fixing or something. Then again; a kernel with KASAN
> on is unbearable slow anyway.
>
> > > 000b 15b: 48 8b 4c 24 08 mov 0x8(%rsp),%rcx
> > > 0010 160: 48 39 c7 cmp %rax,%rdi
> > > 0013 163: 76 24 jbe 189 <__asan_load1+0x39>
> > > 0015 165: 48 b8 00 00 00 00 00 movabs $0xdffffc0000000000,%rax
> > > 001c 16c: fc ff df
> > > 001f 16f: 48 89 fa mov %rdi,%rdx
> > > 0022 172: 48 c1 ea 03 shr $0x3,%rdx
> > > 0026 176: 0f b6 04 02 movzbl (%rdx,%rax,1),%eax
> > > 002a 17a: 84 c0 test %al,%al
> > > 002c 17c: 75 02 jne 180 <__asan_load1+0x30>
> > > 002e 17e: 5b pop %rbx
> > > 002f 17f: c3 retq
>
> ^^^ hot path, vvv cold path
>
> > > 0030 180: 89 fa mov %edi,%edx
> > > 0032 182: 83 e2 07 and $0x7,%edx
> > > 0035 185: 38 d0 cmp %dl,%al
> > > 0037 187: 7f f5 jg 17e <__asan_load1+0x2e>
> > > 0039 189: 9c pushfq
> > > 003a 18a: 5b pop %rbx
> > > 003b 18b: 90 nop
> > > 003c 18c: 90 nop
> > > 003d 18d: 90 nop
> > > 003e 18e: 31 d2 xor %edx,%edx
> > > 0040 190: be 01 00 00 00 mov $0x1,%esi
> > > 0045 195: e8 00 00 00 00 callq 19a <__asan_load1+0x4a>
> > > 0046 196: R_X86_64_PLT32 __kasan_report-0x4
> > > 004a 19a: 53 push %rbx
> > > 004b 19b: 9d popfq
> > > 004c 19c: 5b pop %rbx
> > > 004d 19d: c3 retq
>
> > > +static __always_inline
> > > +void kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip)
> > > +{
> > > + unsigned long flags;
> > > +
> > > + flags = smap_save();
> >
> > Previously you said that messing with smap here causes boot errors.
> > Shouldn't we do smap_save iff kasan_report_enabled? Otherwise we just
> > bail out, so no need to enable/disable smap.
> >
> > > + __kasan_report(addr, size, is_write, ip);
> > > + smap_restore(flags);
> > > +
> > > +}
>
> Ah, you think I booted this :-) Still, this is only PUSHF;CLAC, which I
> think should actually work really early. It was that #UD thing that
> didn't work early, simply because we'd not set up the exception vector
> yet when first this happens.
>
> > > --- a/mm/kasan/generic_report.c
> > > +++ b/mm/kasan/generic_report.c
> > > @@ -118,14 +118,14 @@ const char *get_bug_type(struct kasan_ac
> > > #define DEFINE_ASAN_REPORT_LOAD(size) \
> > > void __asan_report_load##size##_noabort(unsigned long addr) \
> > > { \
> > > - kasan_report(addr, size, false, _RET_IP_); \
> > > + __kasan_report(addr, size, false, _RET_IP_); \
> >
> > Unless I am missing something, this seems to make this patch no-op. We
> > fixed kasan_report for smap, but here we now use __kasan_report which
> > is not fixed. So this won't work with smap again?..
>
> I've not found callers of __asan_report_load* with AC=1 in the kernel
> yet. Under what condtions does GCC emit calls to these functions?

CONFIG_KASAN_INLINE=y
Then compiler inlines fast path into generated code and only calls
into runtime to report errors (also, faster, this should be a default
for anything other than tiny ROM controllers).

2019-03-06 17:46:21

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH 1/8] kasan,x86: Frob kasan_report() in an exception

On Wed, Mar 06, 2019 at 03:01:33PM +0100, Dmitry Vyukov wrote:
> On Wed, Mar 6, 2019 at 2:57 PM Peter Zijlstra <[email protected]> wrote:

> > I've not found callers of __asan_report_load* with AC=1 in the kernel
> > yet. Under what condtions does GCC emit calls to these functions?
>
> CONFIG_KASAN_INLINE=y
> Then compiler inlines fast path into generated code and only calls
> into runtime to report errors (also, faster, this should be a default
> for anything other than tiny ROM controllers).

*sigh*, clearly I've not build enough kernels yet... Lemme go try that.

2019-03-06 17:47:46

by Dmitry Vyukov

[permalink] [raw]
Subject: Re: [PATCH 1/8] kasan,x86: Frob kasan_report() in an exception

On Wed, Mar 6, 2019 at 3:40 PM Dmitry Vyukov <[email protected]> wrote:
>
> On Wed, Mar 6, 2019 at 3:34 PM Peter Zijlstra <[email protected]> wrote:
> >
> > On Wed, Mar 06, 2019 at 03:12:37PM +0100, Peter Zijlstra wrote:
> > > On Wed, Mar 06, 2019 at 03:01:33PM +0100, Dmitry Vyukov wrote:
> > > > On Wed, Mar 6, 2019 at 2:57 PM Peter Zijlstra <[email protected]> wrote:
> > >
> > > > > I've not found callers of __asan_report_load* with AC=1 in the kernel
> > > > > yet. Under what condtions does GCC emit calls to these functions?
> > > >
> > > > CONFIG_KASAN_INLINE=y
> > > > Then compiler inlines fast path into generated code and only calls
> > > > into runtime to report errors (also, faster, this should be a default
> > > > for anything other than tiny ROM controllers).
> > >
> > > *sigh*, clearly I've not build enough kernels yet... Lemme go try that.
> >
> > mm/kasan/generic_report.o: warning: objtool: __asan_report_load1_noabort()+0x0: call to __fentry__() with UACCESS enabled
> >
> > You want to do:
> >
> > CFLAGS_REMOVE_f= -pg
> >
> > like generic.o has?
>
> This should not matter for KASAN itself.
> KASAN will call into function tracer, and function tracer will call
> into KASAN, but unless function tracer is badly broken and causes a
> KASAN report on every invocation, the recursion will end (function
> tracer will get to the _report_ function). So we only disabled -pg for

tracer will _not_ get to the _report_ function

> fast paths.
> But if it makes things simpler for the objtool, then I think we can
> disable function tracer for generic_report.c too.

2019-03-06 18:34:08

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH 1/8] kasan,x86: Frob kasan_report() in an exception

On Wed, Mar 06, 2019 at 06:14:51PM +0100, Peter Zijlstra wrote:
> On Wed, Mar 06, 2019 at 02:13:47PM +0100, Peter Zijlstra wrote:
> > +static __always_inline unsigned long smap_save(void)
> > +{
> > + unsigned long flags = arch_local_save_flags();
> > + clac();
> > + return flags;
> > +}
> > +
> > +static __always_inline void smap_restore(unsigned long flags)
> > +{
> > + arch_local_irq_restore(flags);
> > +}
>
> ARGH; the bloody paravirt me harder nonsense makes that pvops calls.
>
> And that (obviously) explodes.. Anybody got any clue why that Xen
> trainwreck wants to paravirt: "PUSHF;POP" and "PUSH;POPF" !?
>
> Also; I suppose I can ALTERNATIVE the whole thing, because Xen will not
> be having SMAP in the first place I suppose.

The below seems to 'work'.

--- a/arch/x86/include/asm/smap.h
+++ b/arch/x86/include/asm/smap.h
@@ -46,8 +46,6 @@

#ifdef CONFIG_X86_SMAP

-#include <asm/irqflags.h>
-
static __always_inline void clac(void)
{
/* Note: a barrier is implicit in alternative() */
@@ -62,14 +60,19 @@ static __always_inline void stac(void)

static __always_inline unsigned long smap_save(void)
{
- unsigned long flags = arch_local_save_flags();
- clac();
+ unsigned long flags;
+
+ asm volatile (ALTERNATIVE("", "pushf; pop %0; " __stringify(__ASM_CLAC),
+ X86_FEATURE_SMAP)
+ : "=rm" (flags) : : "memory", "cc");
+
return flags;
}

static __always_inline void smap_restore(unsigned long flags)
{
- arch_local_irq_restore(flags);
+ asm volatile (ALTERNATIVE("", "push %0; popf", X86_FEATURE_SMAP)
+ : : "g" (flags) : "memory", "cc");
}

/* These macros can be used in asm() statements */

2019-03-06 18:34:46

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH 1/8] kasan,x86: Frob kasan_report() in an exception

On Wed, Mar 06, 2019 at 02:13:47PM +0100, Peter Zijlstra wrote:
> +static __always_inline unsigned long smap_save(void)
> +{
> + unsigned long flags = arch_local_save_flags();
> + clac();
> + return flags;
> +}
> +
> +static __always_inline void smap_restore(unsigned long flags)
> +{
> + arch_local_irq_restore(flags);
> +}

ARGH; the bloody paravirt me harder nonsense makes that pvops calls.

And that (obviously) explodes.. Anybody got any clue why that Xen
trainwreck wants to paravirt: "PUSHF;POP" and "PUSH;POPF" !?

Also; I suppose I can ALTERNATIVE the whole thing, because Xen will not
be having SMAP in the first place I suppose.

2019-03-06 20:05:03

by Linus Torvalds

[permalink] [raw]
Subject: Re: [PATCH 1/8] kasan,x86: Frob kasan_report() in an exception

On Wed, Mar 6, 2019 at 9:14 AM Peter Zijlstra <[email protected]> wrote:
>
> And that (obviously) explodes.. Anybody got any clue why that Xen
> trainwreck wants to paravirt: "PUSHF;POP" and "PUSH;POPF" !?

Trying to regenerate the IF flag? I dunno.

But yeah, I think you could just force all of it into an explicit asm.
Or just use native_save_fl(). Except now that I look at it, for some
reason that is "extern inline" and we have an out-of-line option. I
can't for the life of me imagine why.

[ Looking at history. Oh, it's because of that same Xen issue and Xen
wanting to use it as a function pointer. Ugh. What a crock. ]

Linus

2019-03-06 20:20:41

by Linus Torvalds

[permalink] [raw]
Subject: Re: [PATCH 1/8] kasan,x86: Frob kasan_report() in an exception

On Wed, Mar 6, 2019 at 9:37 AM Peter Zijlstra <[email protected]> wrote:
>
> The below seems to 'work'.

Yeah, and makes things cheaper for the non-SMAP case too. Looks sane.

One note:

+ asm volatile (ALTERNATIVE("", "pushf; pop %0; "
__stringify(__ASM_CLAC),

Hmm. Every single use of __ASM_CLAC is together with "__stringity()".

Maybe we could just get rid of that oddity, and just make __ASM_CLAC
be a string to begin with.

At one point it was used bare in the __ASSEMBLY__ version, but that
does not appear to the case any more since commit 669f8a900198
("x86/smap: Use ALTERNATIVE macro") back in 2015.

Linus

2019-03-07 13:50:38

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH 1/8] kasan,x86: Frob kasan_report() in an exception

On Wed, Mar 06, 2019 at 09:59:11AM -0800, Linus Torvalds wrote:
> Maybe we could just get rid of that oddity, and just make __ASM_CLAC
> be a string to begin with.

Seems to work.. I'll add it to the pile.

---
arch/x86/include/asm/smap.h | 19 +++++++++----------
1 file changed, 9 insertions(+), 10 deletions(-)

--- a/arch/x86/include/asm/smap.h
+++ b/arch/x86/include/asm/smap.h
@@ -13,13 +13,12 @@
#ifndef _ASM_X86_SMAP_H
#define _ASM_X86_SMAP_H

-#include <linux/stringify.h>
#include <asm/nops.h>
#include <asm/cpufeatures.h>

/* "Raw" instruction opcodes */
-#define __ASM_CLAC .byte 0x0f,0x01,0xca
-#define __ASM_STAC .byte 0x0f,0x01,0xcb
+#define __ASM_CLAC ".byte 0x0f,0x01,0xca"
+#define __ASM_STAC ".byte 0x0f,0x01,0xcb"

#ifdef __ASSEMBLY__

@@ -28,10 +27,10 @@
#ifdef CONFIG_X86_SMAP

#define ASM_CLAC \
- ALTERNATIVE "", __stringify(__ASM_CLAC), X86_FEATURE_SMAP
+ ALTERNATIVE "", __ASM_CLAC, X86_FEATURE_SMAP

#define ASM_STAC \
- ALTERNATIVE "", __stringify(__ASM_STAC), X86_FEATURE_SMAP
+ ALTERNATIVE "", __ASM_STAC, X86_FEATURE_SMAP

#else /* CONFIG_X86_SMAP */

@@ -49,20 +48,20 @@
static __always_inline void clac(void)
{
/* Note: a barrier is implicit in alternative() */
- alternative("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP);
+ alternative("", __ASM_CLAC, X86_FEATURE_SMAP);
}

static __always_inline void stac(void)
{
/* Note: a barrier is implicit in alternative() */
- alternative("", __stringify(__ASM_STAC), X86_FEATURE_SMAP);
+ alternative("", __ASM_STAC, X86_FEATURE_SMAP);
}

static __always_inline unsigned long smap_save(void)
{
unsigned long flags;

- asm volatile (ALTERNATIVE("", "pushf; pop %0; " __stringify(__ASM_CLAC),
+ asm volatile (ALTERNATIVE("", "pushf; pop %0; " __ASM_CLAC,
X86_FEATURE_SMAP)
: "=rm" (flags) : : "memory", "cc");

@@ -77,9 +76,9 @@ static __always_inline void smap_restore

/* These macros can be used in asm() statements */
#define ASM_CLAC \
- ALTERNATIVE("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP)
+ ALTERNATIVE("", __ASM_CLAC, X86_FEATURE_SMAP)
#define ASM_STAC \
- ALTERNATIVE("", __stringify(__ASM_STAC), X86_FEATURE_SMAP)
+ ALTERNATIVE("", __ASM_STAC, X86_FEATURE_SMAP)

#else /* CONFIG_X86_SMAP */