2018-09-06 17:08:58

by Andrey Ryabinin

[permalink] [raw]
Subject: [PATCH] arm64: lib: use C string functions with KASAN enabled.

ARM64 has asm implementations of memchr(), memcmp(), str[r]chr(),
str[n]cmp(), str[n]len(). KASAN don't see memory accesses in asm
code, thus it can potentially miss many bugs.

Ifdef out __HAVE_ARCH_* defines of these functions when KASAN is
enabled, so the generic implementations from lib/string.c will be used.

Declare asm functions as weak instead of removing them because they
still can be used by efistub.

Reported-by: Kyeongdon Kim <[email protected]>
Signed-off-by: Andrey Ryabinin <[email protected]>
---
arch/arm64/include/asm/string.h | 14 ++++++++------
arch/arm64/kernel/arm64ksyms.c | 7 +++++--
arch/arm64/lib/memchr.S | 1 +
arch/arm64/lib/memcmp.S | 1 +
arch/arm64/lib/strchr.S | 1 +
arch/arm64/lib/strcmp.S | 1 +
arch/arm64/lib/strlen.S | 1 +
arch/arm64/lib/strncmp.S | 1 +
arch/arm64/lib/strnlen.S | 1 +
arch/arm64/lib/strrchr.S | 1 +
10 files changed, 21 insertions(+), 8 deletions(-)

diff --git a/arch/arm64/include/asm/string.h b/arch/arm64/include/asm/string.h
index dd95d33a5bd5..03a6c256b7ec 100644
--- a/arch/arm64/include/asm/string.h
+++ b/arch/arm64/include/asm/string.h
@@ -16,6 +16,7 @@
#ifndef __ASM_STRING_H
#define __ASM_STRING_H

+#ifndef CONFIG_KASAN
#define __HAVE_ARCH_STRRCHR
extern char *strrchr(const char *, int c);

@@ -34,6 +35,13 @@ extern __kernel_size_t strlen(const char *);
#define __HAVE_ARCH_STRNLEN
extern __kernel_size_t strnlen(const char *, __kernel_size_t);

+#define __HAVE_ARCH_MEMCMP
+extern int memcmp(const void *, const void *, size_t);
+
+#define __HAVE_ARCH_MEMCHR
+extern void *memchr(const void *, int, __kernel_size_t);
+#endif
+
#define __HAVE_ARCH_MEMCPY
extern void *memcpy(void *, const void *, __kernel_size_t);
extern void *__memcpy(void *, const void *, __kernel_size_t);
@@ -42,16 +50,10 @@ extern void *__memcpy(void *, const void *, __kernel_size_t);
extern void *memmove(void *, const void *, __kernel_size_t);
extern void *__memmove(void *, const void *, __kernel_size_t);

-#define __HAVE_ARCH_MEMCHR
-extern void *memchr(const void *, int, __kernel_size_t);
-
#define __HAVE_ARCH_MEMSET
extern void *memset(void *, int, __kernel_size_t);
extern void *__memset(void *, int, __kernel_size_t);

-#define __HAVE_ARCH_MEMCMP
-extern int memcmp(const void *, const void *, size_t);
-
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
#define __HAVE_ARCH_MEMCPY_FLUSHCACHE
void memcpy_flushcache(void *dst, const void *src, size_t cnt);
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index d894a20b70b2..72f63a59b008 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -44,20 +44,23 @@ EXPORT_SYMBOL(__arch_copy_in_user);
EXPORT_SYMBOL(memstart_addr);

/* string / mem functions */
+#ifndef CONFIG_KASAN
EXPORT_SYMBOL(strchr);
EXPORT_SYMBOL(strrchr);
EXPORT_SYMBOL(strcmp);
EXPORT_SYMBOL(strncmp);
EXPORT_SYMBOL(strlen);
EXPORT_SYMBOL(strnlen);
+EXPORT_SYMBOL(memcmp);
+EXPORT_SYMBOL(memchr);
+#endif
+
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(__memset);
EXPORT_SYMBOL(__memcpy);
EXPORT_SYMBOL(__memmove);
-EXPORT_SYMBOL(memchr);
-EXPORT_SYMBOL(memcmp);

/* atomic bitops */
EXPORT_SYMBOL(set_bit);
diff --git a/arch/arm64/lib/memchr.S b/arch/arm64/lib/memchr.S
index 4444c1d25f4b..b790ec0228f6 100644
--- a/arch/arm64/lib/memchr.S
+++ b/arch/arm64/lib/memchr.S
@@ -30,6 +30,7 @@
* Returns:
* x0 - address of first occurrence of 'c' or 0
*/
+.weak memchr
ENTRY(memchr)
and w1, w1, #0xff
1: subs x2, x2, #1
diff --git a/arch/arm64/lib/memcmp.S b/arch/arm64/lib/memcmp.S
index 2a4e239bd17a..de9975b0afda 100644
--- a/arch/arm64/lib/memcmp.S
+++ b/arch/arm64/lib/memcmp.S
@@ -58,6 +58,7 @@ pos .req x11
limit_wd .req x12
mask .req x13

+.weak memcmp
ENTRY(memcmp)
cbz limit, .Lret0
eor tmp1, src1, src2
diff --git a/arch/arm64/lib/strchr.S b/arch/arm64/lib/strchr.S
index dae0cf5591f9..10799adb8d5f 100644
--- a/arch/arm64/lib/strchr.S
+++ b/arch/arm64/lib/strchr.S
@@ -29,6 +29,7 @@
* Returns:
* x0 - address of first occurrence of 'c' or 0
*/
+.weak strchr
ENTRY(strchr)
and w1, w1, #0xff
1: ldrb w2, [x0], #1
diff --git a/arch/arm64/lib/strcmp.S b/arch/arm64/lib/strcmp.S
index 471fe61760ef..5629b4fa5431 100644
--- a/arch/arm64/lib/strcmp.S
+++ b/arch/arm64/lib/strcmp.S
@@ -60,6 +60,7 @@ tmp3 .req x9
zeroones .req x10
pos .req x11

+.weak strcmp
ENTRY(strcmp)
eor tmp1, src1, src2
mov zeroones, #REP8_01
diff --git a/arch/arm64/lib/strlen.S b/arch/arm64/lib/strlen.S
index 55ccc8e24c08..f00df4b1b8d9 100644
--- a/arch/arm64/lib/strlen.S
+++ b/arch/arm64/lib/strlen.S
@@ -56,6 +56,7 @@ pos .req x12
#define REP8_7f 0x7f7f7f7f7f7f7f7f
#define REP8_80 0x8080808080808080

+.weak strlen
ENTRY(strlen)
mov zeroones, #REP8_01
bic src, srcin, #15
diff --git a/arch/arm64/lib/strncmp.S b/arch/arm64/lib/strncmp.S
index e267044761c6..28563ac1c19f 100644
--- a/arch/arm64/lib/strncmp.S
+++ b/arch/arm64/lib/strncmp.S
@@ -64,6 +64,7 @@ limit_wd .req x13
mask .req x14
endloop .req x15

+.weak strncmp
ENTRY(strncmp)
cbz limit, .Lret0
eor tmp1, src1, src2
diff --git a/arch/arm64/lib/strnlen.S b/arch/arm64/lib/strnlen.S
index eae38da6e0bb..bdbfd41164f4 100644
--- a/arch/arm64/lib/strnlen.S
+++ b/arch/arm64/lib/strnlen.S
@@ -59,6 +59,7 @@ limit_wd .req x14
#define REP8_7f 0x7f7f7f7f7f7f7f7f
#define REP8_80 0x8080808080808080

+.weak strnlen
ENTRY(strnlen)
cbz limit, .Lhit_limit
mov zeroones, #REP8_01
diff --git a/arch/arm64/lib/strrchr.S b/arch/arm64/lib/strrchr.S
index f8e2784d5752..31c77f605014 100644
--- a/arch/arm64/lib/strrchr.S
+++ b/arch/arm64/lib/strrchr.S
@@ -29,6 +29,7 @@
* Returns:
* x0 - address of last occurrence of 'c' or 0
*/
+.weak strrchr
ENTRY(strrchr)
mov x3, #0
and w1, w1, #0xff
--
2.16.4



2018-09-06 17:08:58

by Andrey Ryabinin

[permalink] [raw]
Subject: [PATCH] lib/test_kasan: Add tests for several string/memory API functions

Arch code may have asm implementation of string/memory API functions
instead of using generic one from lib/string.c. KASAN don't see
memory accesses in asm code, thus can miss many bugs.

E.g. on ARM64 KASAN don't see bugs in memchr(), memcmp(), str[r]chr(),
str[n]cmp(), str[n]len(). Add tests for these functions to be sure
that we notice the problem on other architectures.

Signed-off-by: Andrey Ryabinin <[email protected]>
---
lib/test_kasan.c | 70 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 70 insertions(+)

diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index ec657105edbf..51b78405bf24 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -579,6 +579,73 @@ static noinline void __init kmem_cache_invalid_free(void)
kmem_cache_destroy(cache);
}

+static noinline void __init kasan_memchr(void)
+{
+ char *ptr;
+ size_t size = 24;
+
+ pr_info("out-of-bounds in memchr\n");
+ ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
+ if (!ptr)
+ return;
+
+ memchr(ptr, '1', size + 1);
+ kfree(ptr);
+}
+
+static noinline void __init kasan_memcmp(void)
+{
+ char *ptr;
+ size_t size = 24;
+ int arr[9];
+
+ pr_info("out-of-bounds in memcmp\n");
+ ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
+ if (!ptr)
+ return;
+
+ memset(arr, 0, sizeof(arr));
+ memcmp(ptr, arr, size+1);
+ kfree(ptr);
+}
+
+static noinline void __init kasan_strings(void)
+{
+ char *ptr;
+ size_t size = 24;
+
+ pr_info("use-after-free in strchr\n");
+ ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
+ if (!ptr)
+ return;
+
+ kfree(ptr);
+
+ /*
+ * Try to cause only 1 invalid access (less spam in dmesg).
+ * For that we need ptr to point to zeroed byte.
+ * Skip metadata that could be stored in freed object so ptr
+ * will likely point to zeroed byte.
+ */
+ ptr += 16;
+ strchr(ptr, '1');
+
+ pr_info("use-after-free in strrchr\n");
+ strrchr(ptr, '1');
+
+ pr_info("use-after-free in strcmp\n");
+ strcmp(ptr, "2");
+
+ pr_info("use-after-free in strncmp\n");
+ strncmp(ptr, "2", 1);
+
+ pr_info("use-after-free in strlen\n");
+ strlen(ptr);
+
+ pr_info("use-after-free in strnlen\n");
+ strnlen(ptr, 1);
+}
+
static int __init kmalloc_tests_init(void)
{
/*
@@ -618,6 +685,9 @@ static int __init kmalloc_tests_init(void)
use_after_scope_test();
kmem_cache_double_free();
kmem_cache_invalid_free();
+ kasan_memchr();
+ kasan_memcmp();
+ kasan_strings();

kasan_restore_multi_shot(multishot);

--
2.16.4


2018-09-07 15:52:15

by Will Deacon

[permalink] [raw]
Subject: Re: [PATCH] arm64: lib: use C string functions with KASAN enabled.

On Thu, Sep 06, 2018 at 08:05:33PM +0300, Andrey Ryabinin wrote:
> ARM64 has asm implementations of memchr(), memcmp(), str[r]chr(),
> str[n]cmp(), str[n]len(). KASAN don't see memory accesses in asm
> code, thus it can potentially miss many bugs.
>
> Ifdef out __HAVE_ARCH_* defines of these functions when KASAN is
> enabled, so the generic implementations from lib/string.c will be used.
>
> Declare asm functions as weak instead of removing them because they
> still can be used by efistub.

I don't understand this bit: efistub uses the __pi_ prefixed versions of the
routines, so why do we need to declare them as weak?

Will

2018-09-07 15:57:53

by Andrey Ryabinin

[permalink] [raw]
Subject: Re: [PATCH] arm64: lib: use C string functions with KASAN enabled.



On 09/07/2018 05:56 PM, Will Deacon wrote:
> On Thu, Sep 06, 2018 at 08:05:33PM +0300, Andrey Ryabinin wrote:
>> ARM64 has asm implementations of memchr(), memcmp(), str[r]chr(),
>> str[n]cmp(), str[n]len(). KASAN don't see memory accesses in asm
>> code, thus it can potentially miss many bugs.
>>
>> Ifdef out __HAVE_ARCH_* defines of these functions when KASAN is
>> enabled, so the generic implementations from lib/string.c will be used.
>>
>> Declare asm functions as weak instead of removing them because they
>> still can be used by efistub.
>
> I don't understand this bit: efistub uses the __pi_ prefixed versions of the
> routines, so why do we need to declare them as weak?

Weak needed because we can't have two non-weak functions with the same name.

Alternative approach would be to never use e.g. "strlen" name for asm implementation of strlen() under CONFIG_KASAN=y.
But that would require adding some special ENDPIPROC_KASAN() macro since we want __pi_strlen() to point
to the asm_strlen().

Using weak seems like a way better solution to me.

>
> Will
>

2018-09-10 11:35:07

by Mark Rutland

[permalink] [raw]
Subject: Re: [PATCH] arm64: lib: use C string functions with KASAN enabled.

On Fri, Sep 07, 2018 at 06:48:10PM +0300, Andrey Ryabinin wrote:
> On 09/07/2018 05:56 PM, Will Deacon wrote:
> > On Thu, Sep 06, 2018 at 08:05:33PM +0300, Andrey Ryabinin wrote:
> >> ARM64 has asm implementations of memchr(), memcmp(), str[r]chr(),
> >> str[n]cmp(), str[n]len(). KASAN don't see memory accesses in asm
> >> code, thus it can potentially miss many bugs.
> >>
> >> Ifdef out __HAVE_ARCH_* defines of these functions when KASAN is
> >> enabled, so the generic implementations from lib/string.c will be
> >> used.
> >>
> >> Declare asm functions as weak instead of removing them because they
> >> still can be used by efistub.
> >
> > I don't understand this bit: efistub uses the __pi_ prefixed
> > versions of the routines, so why do we need to declare them as weak?
>
> Weak needed because we can't have two non-weak functions with the same
> name.
>
> Alternative approach would be to never use e.g. "strlen" name for asm
> implementation of strlen() under CONFIG_KASAN=y. But that would
> require adding some special ENDPIPROC_KASAN() macro since we want
> __pi_strlen() to point to the asm_strlen().

Somehow, what we have today works with CONFIG_FORTIFY_SOURCE, which
AFAICT would suffer from texactly the same problem with things like
memcpy.

So either we're getting away with that by chance already (and should fix
that regardless of this patch), or this is not actually a problem.

Conditionally aliasing <foo> to pi_<foo> in a linker script (or header,
for functions which aren't special per the c spec) seems sane to me.

> Using weak seems like a way better solution to me.

I would strongly prefer fixing this without weak, even if we need a
ENDPRPROC_KASAN, and/or wrappers in some header file somewhere, since if
something goes wrong that will fail deterministically at build time
rather than silently falling back to the wrong piece of code.

Thanks,
Mark.

2018-09-10 12:54:36

by Mark Rutland

[permalink] [raw]
Subject: Re: [PATCH] arm64: lib: use C string functions with KASAN enabled.

On Mon, Sep 10, 2018 at 12:33:22PM +0100, Mark Rutland wrote:
> On Fri, Sep 07, 2018 at 06:48:10PM +0300, Andrey Ryabinin wrote:
> > On 09/07/2018 05:56 PM, Will Deacon wrote:
> > > I don't understand this bit: efistub uses the __pi_ prefixed
> > > versions of the routines, so why do we need to declare them as weak?
> >
> > Weak needed because we can't have two non-weak functions with the same
> > name.
> >
> > Alternative approach would be to never use e.g. "strlen" name for asm
> > implementation of strlen() under CONFIG_KASAN=y. But that would
> > require adding some special ENDPIPROC_KASAN() macro since we want
> > __pi_strlen() to point to the asm_strlen().
>
> Somehow, what we have today works with CONFIG_FORTIFY_SOURCE, which
> AFAICT would suffer from texactly the same problem with things like
> memcpy.
>
> So either we're getting away with that by chance already (and should fix
> that regardless of this patch), or this is not actually a problem.

I now see those functions are marked weak in the assembly
implementation; sorry for the noise.

Regardless, I still think it's preferable to avoid weak wherever
possible.

I have a couple of local patches to do that for KASAN, though it's not
clear to me how that should interact with FORTIFY_SOURCE.

Thanks,
Mark.

2018-09-10 13:07:53

by Will Deacon

[permalink] [raw]
Subject: Re: [PATCH] arm64: lib: use C string functions with KASAN enabled.

On Mon, Sep 10, 2018 at 01:53:03PM +0100, Mark Rutland wrote:
> On Mon, Sep 10, 2018 at 12:33:22PM +0100, Mark Rutland wrote:
> > On Fri, Sep 07, 2018 at 06:48:10PM +0300, Andrey Ryabinin wrote:
> > > On 09/07/2018 05:56 PM, Will Deacon wrote:
> > > > I don't understand this bit: efistub uses the __pi_ prefixed
> > > > versions of the routines, so why do we need to declare them as weak?
> > >
> > > Weak needed because we can't have two non-weak functions with the same
> > > name.
> > >
> > > Alternative approach would be to never use e.g. "strlen" name for asm
> > > implementation of strlen() under CONFIG_KASAN=y. But that would
> > > require adding some special ENDPIPROC_KASAN() macro since we want
> > > __pi_strlen() to point to the asm_strlen().
> >
> > Somehow, what we have today works with CONFIG_FORTIFY_SOURCE, which
> > AFAICT would suffer from texactly the same problem with things like
> > memcpy.
> >
> > So either we're getting away with that by chance already (and should fix
> > that regardless of this patch), or this is not actually a problem.
>
> I now see those functions are marked weak in the assembly
> implementation; sorry for the noise.
>
> Regardless, I still think it's preferable to avoid weak wherever
> possible.

I was thinking along the same lines, but having played around with the code,
I agree with Andrey that this appears to be the cleanest solution.

Andrey -- could you respin using WEAK instead of .weak, removing any
redundant uses of ENTRY in the process? We might also need to throw an
ALIGN directive into the WEAK definition.

Will

2018-09-11 13:02:28

by Andrey Ryabinin

[permalink] [raw]
Subject: Re: [PATCH] arm64: lib: use C string functions with KASAN enabled.



On 09/10/2018 04:06 PM, Will Deacon wrote:
> On Mon, Sep 10, 2018 at 01:53:03PM +0100, Mark Rutland wrote:
>> On Mon, Sep 10, 2018 at 12:33:22PM +0100, Mark Rutland wrote:
>>> On Fri, Sep 07, 2018 at 06:48:10PM +0300, Andrey Ryabinin wrote:
>>>> On 09/07/2018 05:56 PM, Will Deacon wrote:
>>>>> I don't understand this bit: efistub uses the __pi_ prefixed
>>>>> versions of the routines, so why do we need to declare them as weak?
>>>>
>>>> Weak needed because we can't have two non-weak functions with the same
>>>> name.
>>>>
>>>> Alternative approach would be to never use e.g. "strlen" name for asm
>>>> implementation of strlen() under CONFIG_KASAN=y. But that would
>>>> require adding some special ENDPIPROC_KASAN() macro since we want
>>>> __pi_strlen() to point to the asm_strlen().
>>>
>>> Somehow, what we have today works with CONFIG_FORTIFY_SOURCE, which
>>> AFAICT would suffer from texactly the same problem with things like
>>> memcpy.
>>>

FORTIFY_SOURCE seems uses "extern inline" to redefine functions.
I obviously cannot make the whole lib/string.c 'extern inline'.


>>> So either we're getting away with that by chance already (and should fix
>>> that regardless of this patch), or this is not actually a problem.
>>
>> I now see those functions are marked weak in the assembly
>> implementation; sorry for the noise.
>>
>> Regardless, I still think it's preferable to avoid weak wherever
>> possible.
>
> I was thinking along the same lines, but having played around with the code,
> I agree with Andrey that this appears to be the cleanest solution.
>
> Andrey -- could you respin using WEAK instead of .weak, removing any
> redundant uses of ENTRY in the process? We might also need to throw an
> ALIGN directive into the WEAK definition.
>

Actually I come up with something that looks decent, without using weak symbols, see below.
"#ifndef CONFIG_KASAN" could be moved to the header. In that ALIAS probably should be renamed to
something like NOKASAN_ALIAS().

---
arch/arm64/include/asm/assembler.h | 7 +++++++
arch/arm64/include/asm/string.h | 14 ++++++++------
arch/arm64/kernel/arm64ksyms.c | 7 +++++--
arch/arm64/lib/memchr.S | 8 ++++++--
arch/arm64/lib/memcmp.S | 8 ++++++--
arch/arm64/lib/strchr.S | 8 ++++++--
arch/arm64/lib/strcmp.S | 8 ++++++--
arch/arm64/lib/strlen.S | 8 ++++++--
arch/arm64/lib/strncmp.S | 8 ++++++--
arch/arm64/lib/strnlen.S | 8 ++++++--
arch/arm64/lib/strrchr.S | 8 ++++++--
11 files changed, 68 insertions(+), 24 deletions(-)

diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 0bcc98dbba56..9779c6e03337 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -467,6 +467,13 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
.size __pi_##x, . - x; \
ENDPROC(x)

+#define ALIAS(x, y) \
+ .globl y; \
+ .type y, %function; \
+ .set y, x; \
+ .size y, . - x; \
+ ENDPROC(y)
+
/*
* Annotate a function as being unsuitable for kprobes.
*/
diff --git a/arch/arm64/include/asm/string.h b/arch/arm64/include/asm/string.h
index dd95d33a5bd5..8ddc7bd1f03e 100644
--- a/arch/arm64/include/asm/string.h
+++ b/arch/arm64/include/asm/string.h
@@ -16,6 +16,7 @@
#ifndef __ASM_STRING_H
#define __ASM_STRING_H

+#ifndef CONFIG_KASAN
#define __HAVE_ARCH_STRRCHR
extern char *strrchr(const char *, int c);

@@ -34,6 +35,13 @@ extern __kernel_size_t strlen(const char *);
#define __HAVE_ARCH_STRNLEN
extern __kernel_size_t strnlen(const char *, __kernel_size_t);

+#define __HAVE_ARCH_MEMCHR
+extern void *memchr(const void *, int, __kernel_size_t);
+
+#define __HAVE_ARCH_MEMCMP
+extern int memcmp(const void *, const void *, size_t);
+#endif
+
#define __HAVE_ARCH_MEMCPY
extern void *memcpy(void *, const void *, __kernel_size_t);
extern void *__memcpy(void *, const void *, __kernel_size_t);
@@ -42,16 +50,10 @@ extern void *__memcpy(void *, const void *, __kernel_size_t);
extern void *memmove(void *, const void *, __kernel_size_t);
extern void *__memmove(void *, const void *, __kernel_size_t);

-#define __HAVE_ARCH_MEMCHR
-extern void *memchr(const void *, int, __kernel_size_t);
-
#define __HAVE_ARCH_MEMSET
extern void *memset(void *, int, __kernel_size_t);
extern void *__memset(void *, int, __kernel_size_t);

-#define __HAVE_ARCH_MEMCMP
-extern int memcmp(const void *, const void *, size_t);
-
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
#define __HAVE_ARCH_MEMCPY_FLUSHCACHE
void memcpy_flushcache(void *dst, const void *src, size_t cnt);
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index d894a20b70b2..d72a32ea5335 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -43,6 +43,7 @@ EXPORT_SYMBOL(__arch_copy_in_user);
/* physical memory */
EXPORT_SYMBOL(memstart_addr);

+#ifndef CONFIG_KASAN
/* string / mem functions */
EXPORT_SYMBOL(strchr);
EXPORT_SYMBOL(strrchr);
@@ -50,14 +51,16 @@ EXPORT_SYMBOL(strcmp);
EXPORT_SYMBOL(strncmp);
EXPORT_SYMBOL(strlen);
EXPORT_SYMBOL(strnlen);
+EXPORT_SYMBOL(memchr);
+EXPORT_SYMBOL(memcmp);
+#endif
+
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(__memset);
EXPORT_SYMBOL(__memcpy);
EXPORT_SYMBOL(__memmove);
-EXPORT_SYMBOL(memchr);
-EXPORT_SYMBOL(memcmp);

/* atomic bitops */
EXPORT_SYMBOL(set_bit);
diff --git a/arch/arm64/lib/memchr.S b/arch/arm64/lib/memchr.S
index 4444c1d25f4b..a2f711baaaec 100644
--- a/arch/arm64/lib/memchr.S
+++ b/arch/arm64/lib/memchr.S
@@ -30,7 +30,7 @@
* Returns:
* x0 - address of first occurrence of 'c' or 0
*/
-ENTRY(memchr)
+ENTRY(__pi_memchr)
and w1, w1, #0xff
1: subs x2, x2, #1
b.mi 2f
@@ -41,4 +41,8 @@ ENTRY(memchr)
ret
2: mov x0, #0
ret
-ENDPIPROC(memchr)
+ENDPROC(__pi_memchr)
+
+#ifndef CONFIG_KASAN
+ALIAS(__pi_memchr, memchr)
+#endif
diff --git a/arch/arm64/lib/memcmp.S b/arch/arm64/lib/memcmp.S
index 2a4e239bd17a..d2d6b76d1a44 100644
--- a/arch/arm64/lib/memcmp.S
+++ b/arch/arm64/lib/memcmp.S
@@ -58,7 +58,7 @@ pos .req x11
limit_wd .req x12
mask .req x13

-ENTRY(memcmp)
+ENTRY(__pi_memcmp)
cbz limit, .Lret0
eor tmp1, src1, src2
tst tmp1, #7
@@ -255,4 +255,8 @@ CPU_LE( rev data2, data2 )
.Lret0:
mov result, #0
ret
-ENDPIPROC(memcmp)
+ENDPROC(__pi_memcmp)
+
+#ifndef CONFIG_KASAN
+ALIAS(__pi_memcmp, memcmp)
+#endif
diff --git a/arch/arm64/lib/strchr.S b/arch/arm64/lib/strchr.S
index dae0cf5591f9..5bcfcf66042e 100644
--- a/arch/arm64/lib/strchr.S
+++ b/arch/arm64/lib/strchr.S
@@ -29,7 +29,7 @@
* Returns:
* x0 - address of first occurrence of 'c' or 0
*/
-ENTRY(strchr)
+ENTRY(__pi_strchr)
and w1, w1, #0xff
1: ldrb w2, [x0], #1
cmp w2, w1
@@ -39,4 +39,8 @@ ENTRY(strchr)
cmp w2, w1
csel x0, x0, xzr, eq
ret
-ENDPROC(strchr)
+ENDPROC(__pi_strchr)
+
+#ifndef CONFIG_KASAN
+ALIAS(__pi_strchr, strchr)
+#endif
diff --git a/arch/arm64/lib/strcmp.S b/arch/arm64/lib/strcmp.S
index 471fe61760ef..e0dd23f36be9 100644
--- a/arch/arm64/lib/strcmp.S
+++ b/arch/arm64/lib/strcmp.S
@@ -60,7 +60,7 @@ tmp3 .req x9
zeroones .req x10
pos .req x11

-ENTRY(strcmp)
+ENTRY(__pi_strcmp)
eor tmp1, src1, src2
mov zeroones, #REP8_01
tst tmp1, #7
@@ -231,4 +231,8 @@ CPU_BE( orr syndrome, diff, has_nul )
lsr data1, data1, #56
sub result, data1, data2, lsr #56
ret
-ENDPIPROC(strcmp)
+ENDPROC(__pi_strcmp)
+
+#ifndef CONFIG_KASAN
+ALIAS(__pi_strcmp, strcmp)
+#endif
diff --git a/arch/arm64/lib/strlen.S b/arch/arm64/lib/strlen.S
index 55ccc8e24c08..f73e6a6c2fc0 100644
--- a/arch/arm64/lib/strlen.S
+++ b/arch/arm64/lib/strlen.S
@@ -56,7 +56,7 @@ pos .req x12
#define REP8_7f 0x7f7f7f7f7f7f7f7f
#define REP8_80 0x8080808080808080

-ENTRY(strlen)
+ENTRY(__pi_strlen)
mov zeroones, #REP8_01
bic src, srcin, #15
ands tmp1, srcin, #15
@@ -123,4 +123,8 @@ CPU_LE( lsr tmp2, tmp2, tmp1 ) /* Shift (tmp1 & 63). */
csinv data1, data1, xzr, le
csel data2, data2, data2a, le
b .Lrealigned
-ENDPIPROC(strlen)
+ENDPROC(__pi_strlen)
+
+#ifndef CONFIG_KASAN
+ALIAS(__pi_strlen, strlen)
+#endif
diff --git a/arch/arm64/lib/strncmp.S b/arch/arm64/lib/strncmp.S
index e267044761c6..640dc77d4a2c 100644
--- a/arch/arm64/lib/strncmp.S
+++ b/arch/arm64/lib/strncmp.S
@@ -64,7 +64,7 @@ limit_wd .req x13
mask .req x14
endloop .req x15

-ENTRY(strncmp)
+ENTRY(__pi_strncmp)
cbz limit, .Lret0
eor tmp1, src1, src2
mov zeroones, #REP8_01
@@ -307,4 +307,8 @@ CPU_BE( orr syndrome, diff, has_nul )
.Lret0:
mov result, #0
ret
-ENDPIPROC(strncmp)
+ENDPROC(__pi_strncmp)
+
+#ifndef CONFIG_KASAN
+ALIAS(__pi_strncmp, strncmp)
+#endif
diff --git a/arch/arm64/lib/strnlen.S b/arch/arm64/lib/strnlen.S
index eae38da6e0bb..c9749b807f84 100644
--- a/arch/arm64/lib/strnlen.S
+++ b/arch/arm64/lib/strnlen.S
@@ -59,7 +59,7 @@ limit_wd .req x14
#define REP8_7f 0x7f7f7f7f7f7f7f7f
#define REP8_80 0x8080808080808080

-ENTRY(strnlen)
+ENTRY(__pi_strnlen)
cbz limit, .Lhit_limit
mov zeroones, #REP8_01
bic src, srcin, #15
@@ -168,4 +168,8 @@ CPU_LE( lsr tmp2, tmp2, tmp4 ) /* Shift (tmp1 & 63). */
.Lhit_limit:
mov len, limit
ret
-ENDPIPROC(strnlen)
+ENDPROC(__pi_strnlen)
+
+#ifndef CONFIG_KASAN
+ALIAS(__pi_strnlen, strnlen)
+#endif
diff --git a/arch/arm64/lib/strrchr.S b/arch/arm64/lib/strrchr.S
index f8e2784d5752..27bb369de8d9 100644
--- a/arch/arm64/lib/strrchr.S
+++ b/arch/arm64/lib/strrchr.S
@@ -29,7 +29,7 @@
* Returns:
* x0 - address of last occurrence of 'c' or 0
*/
-ENTRY(strrchr)
+ENTRY(__pi_strrchr)
mov x3, #0
and w1, w1, #0xff
1: ldrb w2, [x0], #1
@@ -40,4 +40,8 @@ ENTRY(strrchr)
b 1b
2: mov x0, x3
ret
-ENDPIPROC(strrchr)
+ENDPROC(__pi_strrchr)
+
+#ifndef CONFIG_KASAN
+ALIAS(__pi_strrchr, strrchr)
+#endif
--
2.16.4

2018-09-14 15:29:30

by Will Deacon

[permalink] [raw]
Subject: Re: [PATCH] arm64: lib: use C string functions with KASAN enabled.

On Tue, Sep 11, 2018 at 04:01:28PM +0300, Andrey Ryabinin wrote:
>
>
> On 09/10/2018 04:06 PM, Will Deacon wrote:
> > On Mon, Sep 10, 2018 at 01:53:03PM +0100, Mark Rutland wrote:
> >> On Mon, Sep 10, 2018 at 12:33:22PM +0100, Mark Rutland wrote:
> >>> On Fri, Sep 07, 2018 at 06:48:10PM +0300, Andrey Ryabinin wrote:
> >>>> On 09/07/2018 05:56 PM, Will Deacon wrote:
> >>>>> I don't understand this bit: efistub uses the __pi_ prefixed
> >>>>> versions of the routines, so why do we need to declare them as weak?
> >>>>
> >>>> Weak needed because we can't have two non-weak functions with the same
> >>>> name.
> >>>>
> >>>> Alternative approach would be to never use e.g. "strlen" name for asm
> >>>> implementation of strlen() under CONFIG_KASAN=y. But that would
> >>>> require adding some special ENDPIPROC_KASAN() macro since we want
> >>>> __pi_strlen() to point to the asm_strlen().
> >>>
> >>> Somehow, what we have today works with CONFIG_FORTIFY_SOURCE, which
> >>> AFAICT would suffer from texactly the same problem with things like
> >>> memcpy.
> >>>
>
> FORTIFY_SOURCE seems uses "extern inline" to redefine functions.
> I obviously cannot make the whole lib/string.c 'extern inline'.
>
>
> >>> So either we're getting away with that by chance already (and should fix
> >>> that regardless of this patch), or this is not actually a problem.
> >>
> >> I now see those functions are marked weak in the assembly
> >> implementation; sorry for the noise.
> >>
> >> Regardless, I still think it's preferable to avoid weak wherever
> >> possible.
> >
> > I was thinking along the same lines, but having played around with the code,
> > I agree with Andrey that this appears to be the cleanest solution.
> >
> > Andrey -- could you respin using WEAK instead of .weak, removing any
> > redundant uses of ENTRY in the process? We might also need to throw an
> > ALIGN directive into the WEAK definition.
> >
>
> Actually I come up with something that looks decent, without using weak symbols, see below.
> "#ifndef CONFIG_KASAN" could be moved to the header. In that ALIAS probably should be renamed to
> something like NOKASAN_ALIAS().

Hmm, to be honest, I'd kinda got used to the version using weak symbols
and I reckon it'd be cleaner still if you respin it using WEAK.

Will

2018-09-20 13:56:52

by Andrey Ryabinin

[permalink] [raw]
Subject: [PATCH v2 1/3] linkage.h: Align weak symbols.

Since WEAK() supposed to be used instead of ENTRY() to define weak
symbols, but unlike ENTRY() it doesn't have ALIGN directive.
It seems there is no actual reason to not have, so let's add
ALIGN to WEAK() too.

Signed-off-by: Andrey Ryabinin <[email protected]>
---
include/linux/linkage.h | 1 +
1 file changed, 1 insertion(+)

diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index d7618c41f74c..7c47b1a471d4 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -90,6 +90,7 @@
#ifndef WEAK
#define WEAK(name) \
.weak name ASM_NL \
+ ALIGN ASM_NL \
name:
#endif

--
2.16.4


2018-09-20 13:57:02

by Andrey Ryabinin

[permalink] [raw]
Subject: [PATCH v2 2/3] arm64: lib: use C string functions with KASAN enabled.

ARM64 has asm implementation of memchr(), memcmp(), str[r]chr(),
str[n]cmp(), str[n]len(). KASAN don't see memory accesses in asm
code, thus it can potentially miss many bugs.

Ifdef out __HAVE_ARCH_* defines of these functions when KASAN is
enabled, so the generic implementations from lib/string.c will be used.

We can't just remove the asm functions because efistub uses them.
And we can't have two non-weak functions either, so declare the asm
functions as weak.

Reported-by: Kyeongdon Kim <[email protected]>
Signed-off-by: Andrey Ryabinin <[email protected]>
---
Changes since v1:
- Use WEAK() instead of .weak

arch/arm64/include/asm/string.h | 14 ++++++++------
arch/arm64/kernel/arm64ksyms.c | 7 +++++--
arch/arm64/lib/memchr.S | 2 +-
arch/arm64/lib/memcmp.S | 2 +-
arch/arm64/lib/strchr.S | 2 +-
arch/arm64/lib/strcmp.S | 2 +-
arch/arm64/lib/strlen.S | 2 +-
arch/arm64/lib/strncmp.S | 2 +-
arch/arm64/lib/strnlen.S | 2 +-
arch/arm64/lib/strrchr.S | 2 +-
10 files changed, 21 insertions(+), 16 deletions(-)

diff --git a/arch/arm64/include/asm/string.h b/arch/arm64/include/asm/string.h
index dd95d33a5bd5..03a6c256b7ec 100644
--- a/arch/arm64/include/asm/string.h
+++ b/arch/arm64/include/asm/string.h
@@ -16,6 +16,7 @@
#ifndef __ASM_STRING_H
#define __ASM_STRING_H

+#ifndef CONFIG_KASAN
#define __HAVE_ARCH_STRRCHR
extern char *strrchr(const char *, int c);

@@ -34,6 +35,13 @@ extern __kernel_size_t strlen(const char *);
#define __HAVE_ARCH_STRNLEN
extern __kernel_size_t strnlen(const char *, __kernel_size_t);

+#define __HAVE_ARCH_MEMCMP
+extern int memcmp(const void *, const void *, size_t);
+
+#define __HAVE_ARCH_MEMCHR
+extern void *memchr(const void *, int, __kernel_size_t);
+#endif
+
#define __HAVE_ARCH_MEMCPY
extern void *memcpy(void *, const void *, __kernel_size_t);
extern void *__memcpy(void *, const void *, __kernel_size_t);
@@ -42,16 +50,10 @@ extern void *__memcpy(void *, const void *, __kernel_size_t);
extern void *memmove(void *, const void *, __kernel_size_t);
extern void *__memmove(void *, const void *, __kernel_size_t);

-#define __HAVE_ARCH_MEMCHR
-extern void *memchr(const void *, int, __kernel_size_t);
-
#define __HAVE_ARCH_MEMSET
extern void *memset(void *, int, __kernel_size_t);
extern void *__memset(void *, int, __kernel_size_t);

-#define __HAVE_ARCH_MEMCMP
-extern int memcmp(const void *, const void *, size_t);
-
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
#define __HAVE_ARCH_MEMCPY_FLUSHCACHE
void memcpy_flushcache(void *dst, const void *src, size_t cnt);
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index d894a20b70b2..72f63a59b008 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -44,20 +44,23 @@ EXPORT_SYMBOL(__arch_copy_in_user);
EXPORT_SYMBOL(memstart_addr);

/* string / mem functions */
+#ifndef CONFIG_KASAN
EXPORT_SYMBOL(strchr);
EXPORT_SYMBOL(strrchr);
EXPORT_SYMBOL(strcmp);
EXPORT_SYMBOL(strncmp);
EXPORT_SYMBOL(strlen);
EXPORT_SYMBOL(strnlen);
+EXPORT_SYMBOL(memcmp);
+EXPORT_SYMBOL(memchr);
+#endif
+
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(__memset);
EXPORT_SYMBOL(__memcpy);
EXPORT_SYMBOL(__memmove);
-EXPORT_SYMBOL(memchr);
-EXPORT_SYMBOL(memcmp);

/* atomic bitops */
EXPORT_SYMBOL(set_bit);
diff --git a/arch/arm64/lib/memchr.S b/arch/arm64/lib/memchr.S
index 4444c1d25f4b..0f164a4baf52 100644
--- a/arch/arm64/lib/memchr.S
+++ b/arch/arm64/lib/memchr.S
@@ -30,7 +30,7 @@
* Returns:
* x0 - address of first occurrence of 'c' or 0
*/
-ENTRY(memchr)
+WEAK(memchr)
and w1, w1, #0xff
1: subs x2, x2, #1
b.mi 2f
diff --git a/arch/arm64/lib/memcmp.S b/arch/arm64/lib/memcmp.S
index 2a4e239bd17a..fb295f52e9f8 100644
--- a/arch/arm64/lib/memcmp.S
+++ b/arch/arm64/lib/memcmp.S
@@ -58,7 +58,7 @@ pos .req x11
limit_wd .req x12
mask .req x13

-ENTRY(memcmp)
+WEAK(memcmp)
cbz limit, .Lret0
eor tmp1, src1, src2
tst tmp1, #7
diff --git a/arch/arm64/lib/strchr.S b/arch/arm64/lib/strchr.S
index dae0cf5591f9..7c83091d1bcd 100644
--- a/arch/arm64/lib/strchr.S
+++ b/arch/arm64/lib/strchr.S
@@ -29,7 +29,7 @@
* Returns:
* x0 - address of first occurrence of 'c' or 0
*/
-ENTRY(strchr)
+WEAK(strchr)
and w1, w1, #0xff
1: ldrb w2, [x0], #1
cmp w2, w1
diff --git a/arch/arm64/lib/strcmp.S b/arch/arm64/lib/strcmp.S
index 471fe61760ef..7d5d15398bfb 100644
--- a/arch/arm64/lib/strcmp.S
+++ b/arch/arm64/lib/strcmp.S
@@ -60,7 +60,7 @@ tmp3 .req x9
zeroones .req x10
pos .req x11

-ENTRY(strcmp)
+WEAK(strcmp)
eor tmp1, src1, src2
mov zeroones, #REP8_01
tst tmp1, #7
diff --git a/arch/arm64/lib/strlen.S b/arch/arm64/lib/strlen.S
index 55ccc8e24c08..8e0b14205dcb 100644
--- a/arch/arm64/lib/strlen.S
+++ b/arch/arm64/lib/strlen.S
@@ -56,7 +56,7 @@ pos .req x12
#define REP8_7f 0x7f7f7f7f7f7f7f7f
#define REP8_80 0x8080808080808080

-ENTRY(strlen)
+WEAK(strlen)
mov zeroones, #REP8_01
bic src, srcin, #15
ands tmp1, srcin, #15
diff --git a/arch/arm64/lib/strncmp.S b/arch/arm64/lib/strncmp.S
index e267044761c6..66bd145935d9 100644
--- a/arch/arm64/lib/strncmp.S
+++ b/arch/arm64/lib/strncmp.S
@@ -64,7 +64,7 @@ limit_wd .req x13
mask .req x14
endloop .req x15

-ENTRY(strncmp)
+WEAK(strncmp)
cbz limit, .Lret0
eor tmp1, src1, src2
mov zeroones, #REP8_01
diff --git a/arch/arm64/lib/strnlen.S b/arch/arm64/lib/strnlen.S
index eae38da6e0bb..355be04441fe 100644
--- a/arch/arm64/lib/strnlen.S
+++ b/arch/arm64/lib/strnlen.S
@@ -59,7 +59,7 @@ limit_wd .req x14
#define REP8_7f 0x7f7f7f7f7f7f7f7f
#define REP8_80 0x8080808080808080

-ENTRY(strnlen)
+WEAK(strnlen)
cbz limit, .Lhit_limit
mov zeroones, #REP8_01
bic src, srcin, #15
diff --git a/arch/arm64/lib/strrchr.S b/arch/arm64/lib/strrchr.S
index f8e2784d5752..ea84924d5990 100644
--- a/arch/arm64/lib/strrchr.S
+++ b/arch/arm64/lib/strrchr.S
@@ -29,7 +29,7 @@
* Returns:
* x0 - address of last occurrence of 'c' or 0
*/
-ENTRY(strrchr)
+WEAK(strrchr)
mov x3, #0
and w1, w1, #0xff
1: ldrb w2, [x0], #1
--
2.16.4


2018-09-20 13:57:12

by Andrey Ryabinin

[permalink] [raw]
Subject: [PATCH v2 3/3] lib/test_kasan: Add tests for several string/memory API functions

Arch code may have asm implementation of string/memory API functions
instead of using generic one from lib/string.c. KASAN don't see
memory accesses in asm code, thus can miss many bugs.

E.g. on ARM64 KASAN don't see bugs in memchr(), memcmp(), str[r]chr(),
str[n]cmp(), str[n]len(). Add tests for these functions to be sure
that we notice the problem on other architectures.

Signed-off-by: Andrey Ryabinin <[email protected]>
---

No changes since v1.

lib/test_kasan.c | 70 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 70 insertions(+)

diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index ec657105edbf..51b78405bf24 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -579,6 +579,73 @@ static noinline void __init kmem_cache_invalid_free(void)
kmem_cache_destroy(cache);
}

+static noinline void __init kasan_memchr(void)
+{
+ char *ptr;
+ size_t size = 24;
+
+ pr_info("out-of-bounds in memchr\n");
+ ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
+ if (!ptr)
+ return;
+
+ memchr(ptr, '1', size + 1);
+ kfree(ptr);
+}
+
+static noinline void __init kasan_memcmp(void)
+{
+ char *ptr;
+ size_t size = 24;
+ int arr[9];
+
+ pr_info("out-of-bounds in memcmp\n");
+ ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
+ if (!ptr)
+ return;
+
+ memset(arr, 0, sizeof(arr));
+ memcmp(ptr, arr, size+1);
+ kfree(ptr);
+}
+
+static noinline void __init kasan_strings(void)
+{
+ char *ptr;
+ size_t size = 24;
+
+ pr_info("use-after-free in strchr\n");
+ ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
+ if (!ptr)
+ return;
+
+ kfree(ptr);
+
+ /*
+ * Try to cause only 1 invalid access (less spam in dmesg).
+ * For that we need ptr to point to zeroed byte.
+ * Skip metadata that could be stored in freed object so ptr
+ * will likely point to zeroed byte.
+ */
+ ptr += 16;
+ strchr(ptr, '1');
+
+ pr_info("use-after-free in strrchr\n");
+ strrchr(ptr, '1');
+
+ pr_info("use-after-free in strcmp\n");
+ strcmp(ptr, "2");
+
+ pr_info("use-after-free in strncmp\n");
+ strncmp(ptr, "2", 1);
+
+ pr_info("use-after-free in strlen\n");
+ strlen(ptr);
+
+ pr_info("use-after-free in strnlen\n");
+ strnlen(ptr, 1);
+}
+
static int __init kmalloc_tests_init(void)
{
/*
@@ -618,6 +685,9 @@ static int __init kmalloc_tests_init(void)
use_after_scope_test();
kmem_cache_double_free();
kmem_cache_invalid_free();
+ kasan_memchr();
+ kasan_memcmp();
+ kasan_strings();

kasan_restore_multi_shot(multishot);

--
2.16.4


2018-10-29 10:29:53

by Will Deacon

[permalink] [raw]
Subject: Re: [PATCH v2 1/3] linkage.h: Align weak symbols.

On Thu, Sep 20, 2018 at 04:56:29PM +0300, Andrey Ryabinin wrote:
> Since WEAK() supposed to be used instead of ENTRY() to define weak
> symbols, but unlike ENTRY() it doesn't have ALIGN directive.
> It seems there is no actual reason to not have, so let's add
> ALIGN to WEAK() too.
>
> Signed-off-by: Andrey Ryabinin <[email protected]>
> ---
> include/linux/linkage.h | 1 +
> 1 file changed, 1 insertion(+)

Looks sensible to me:

Acked-by: Will Deacon <[email protected]>

Will

> diff --git a/include/linux/linkage.h b/include/linux/linkage.h
> index d7618c41f74c..7c47b1a471d4 100644
> --- a/include/linux/linkage.h
> +++ b/include/linux/linkage.h
> @@ -90,6 +90,7 @@
> #ifndef WEAK
> #define WEAK(name) \
> .weak name ASM_NL \
> + ALIGN ASM_NL \
> name:
> #endif
>
> --
> 2.16.4
>

2018-10-29 10:31:12

by Will Deacon

[permalink] [raw]
Subject: Re: [PATCH v2 2/3] arm64: lib: use C string functions with KASAN enabled.

On Thu, Sep 20, 2018 at 04:56:30PM +0300, Andrey Ryabinin wrote:
> ARM64 has asm implementation of memchr(), memcmp(), str[r]chr(),
> str[n]cmp(), str[n]len(). KASAN don't see memory accesses in asm
> code, thus it can potentially miss many bugs.
>
> Ifdef out __HAVE_ARCH_* defines of these functions when KASAN is
> enabled, so the generic implementations from lib/string.c will be used.
>
> We can't just remove the asm functions because efistub uses them.
> And we can't have two non-weak functions either, so declare the asm
> functions as weak.
>
> Reported-by: Kyeongdon Kim <[email protected]>
> Signed-off-by: Andrey Ryabinin <[email protected]>
> ---
> Changes since v1:
> - Use WEAK() instead of .weak
>
> arch/arm64/include/asm/string.h | 14 ++++++++------
> arch/arm64/kernel/arm64ksyms.c | 7 +++++--
> arch/arm64/lib/memchr.S | 2 +-
> arch/arm64/lib/memcmp.S | 2 +-
> arch/arm64/lib/strchr.S | 2 +-
> arch/arm64/lib/strcmp.S | 2 +-
> arch/arm64/lib/strlen.S | 2 +-
> arch/arm64/lib/strncmp.S | 2 +-
> arch/arm64/lib/strnlen.S | 2 +-
> arch/arm64/lib/strrchr.S | 2 +-
> 10 files changed, 21 insertions(+), 16 deletions(-)

Acked-by: Will Deacon <[email protected]>

Please post these again after the merge window and we'll figure out how to
get them queued.

Will

2018-10-29 11:17:29

by Andrey Ryabinin

[permalink] [raw]
Subject: Re: [PATCH v2 2/3] arm64: lib: use C string functions with KASAN enabled.



On 10/29/2018 01:29 PM, Will Deacon wrote:
> On Thu, Sep 20, 2018 at 04:56:30PM +0300, Andrey Ryabinin wrote:
>> ARM64 has asm implementation of memchr(), memcmp(), str[r]chr(),
>> str[n]cmp(), str[n]len(). KASAN don't see memory accesses in asm
>> code, thus it can potentially miss many bugs.
>>
>> Ifdef out __HAVE_ARCH_* defines of these functions when KASAN is
>> enabled, so the generic implementations from lib/string.c will be used.
>>
>> We can't just remove the asm functions because efistub uses them.
>> And we can't have two non-weak functions either, so declare the asm
>> functions as weak.
>>
>> Reported-by: Kyeongdon Kim <[email protected]>
>> Signed-off-by: Andrey Ryabinin <[email protected]>
>> ---
>> Changes since v1:
>> - Use WEAK() instead of .weak
>>
>> arch/arm64/include/asm/string.h | 14 ++++++++------
>> arch/arm64/kernel/arm64ksyms.c | 7 +++++--
>> arch/arm64/lib/memchr.S | 2 +-
>> arch/arm64/lib/memcmp.S | 2 +-
>> arch/arm64/lib/strchr.S | 2 +-
>> arch/arm64/lib/strcmp.S | 2 +-
>> arch/arm64/lib/strlen.S | 2 +-
>> arch/arm64/lib/strncmp.S | 2 +-
>> arch/arm64/lib/strnlen.S | 2 +-
>> arch/arm64/lib/strrchr.S | 2 +-
>> 10 files changed, 21 insertions(+), 16 deletions(-)
>
> Acked-by: Will Deacon <[email protected]>
>
> Please post these again after the merge window and we'll figure out how to
> get them queued.
>


Andrew sent these patches to Linus couple days ago, so they are in tree already.

Something went wrong with mail notification though. I didn't even realize that they
were in -mm tree, because I didn't receive the usual 'the patch has been added to -mm tree' email.
But I did receive email that was sent to Linus.

Also there was no you or Catalin in Cc tags in 2,3 patches, and in the first patch, the Cc tags were
corrupted:

From: Andrey Ryabinin <[email protected]>
Subject: include/linux/linkage.h: align weak symbols

Since WEAK() supposed to be used instead of ENTRY() to define weak
symbols, but unlike ENTRY() it doesn't have ALIGN directive. It seems
there is no actual reason to not have, so let's add ALIGN to WEAK() too.

Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Andrey Ryabinin <[email protected]>
Will Deacon <[email protected]>, Catalin Marinas <[email protected]>
Cc: Kyeongdon Kim <[email protected]>
Cc: Ard Biesheuvel <[email protected]>
Cc: Alexander Potapenko <[email protected]>
Cc: Dmitry Vyukov <[email protected]>
Cc: Mark Rutland <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>

2018-10-29 11:30:51

by Will Deacon

[permalink] [raw]
Subject: Re: [PATCH v2 2/3] arm64: lib: use C string functions with KASAN enabled.

Hi Andrey, Andrew,

On Mon, Oct 29, 2018 at 11:16:15AM +0000, Andrey Ryabinin wrote:
> On 10/29/2018 01:29 PM, Will Deacon wrote:
> > On Thu, Sep 20, 2018 at 04:56:30PM +0300, Andrey Ryabinin wrote:
> >> ARM64 has asm implementation of memchr(), memcmp(), str[r]chr(),
> >> str[n]cmp(), str[n]len(). KASAN don't see memory accesses in asm
> >> code, thus it can potentially miss many bugs.
> >>
> >> Ifdef out __HAVE_ARCH_* defines of these functions when KASAN is
> >> enabled, so the generic implementations from lib/string.c will be used.
> >>
> >> We can't just remove the asm functions because efistub uses them.
> >> And we can't have two non-weak functions either, so declare the asm
> >> functions as weak.
> >>
> >> Reported-by: Kyeongdon Kim <[email protected]>
> >> Signed-off-by: Andrey Ryabinin <[email protected]>
> >> ---
> >> Changes since v1:
> >> - Use WEAK() instead of .weak
> >>
> >> arch/arm64/include/asm/string.h | 14 ++++++++------
> >> arch/arm64/kernel/arm64ksyms.c | 7 +++++--
> >> arch/arm64/lib/memchr.S | 2 +-
> >> arch/arm64/lib/memcmp.S | 2 +-
> >> arch/arm64/lib/strchr.S | 2 +-
> >> arch/arm64/lib/strcmp.S | 2 +-
> >> arch/arm64/lib/strlen.S | 2 +-
> >> arch/arm64/lib/strncmp.S | 2 +-
> >> arch/arm64/lib/strnlen.S | 2 +-
> >> arch/arm64/lib/strrchr.S | 2 +-
> >> 10 files changed, 21 insertions(+), 16 deletions(-)
> >
> > Acked-by: Will Deacon <[email protected]>
> >
> > Please post these again after the merge window and we'll figure out how to
> > get them queued.
>
> Andrew sent these patches to Linus couple days ago, so they are in tree
> already.

Oh, good thing I was happy with them in the end, then!

> Something went wrong with mail notification though. I didn't even realize
> that they were in -mm tree, because I didn't receive the usual 'the patch
> has been added to -mm tree' email. But I did receive email that was sent
> to Linus.

Yeah, strange. I usually see the notifications from Andrew.

> Also there was no you or Catalin in Cc tags in 2,3 patches, and in the
> first patch, the Cc tags were corrupted:

:/

Andrew -- have we broken your scripts somehow, or is this just a one-off
for these patches?

Thanks,

Will

> From: Andrey Ryabinin <[email protected]>
> Subject: include/linux/linkage.h: align weak symbols
>
> Since WEAK() supposed to be used instead of ENTRY() to define weak
> symbols, but unlike ENTRY() it doesn't have ALIGN directive. It seems
> there is no actual reason to not have, so let's add ALIGN to WEAK() too.
>
> Link: http://lkml.kernel.org/r/[email protected]
> Signed-off-by: Andrey Ryabinin <[email protected]>
> Will Deacon <[email protected]>, Catalin Marinas <[email protected]>
> Cc: Kyeongdon Kim <[email protected]>
> Cc: Ard Biesheuvel <[email protected]>
> Cc: Alexander Potapenko <[email protected]>
> Cc: Dmitry Vyukov <[email protected]>
> Cc: Mark Rutland <[email protected]>
> Signed-off-by: Andrew Morton <[email protected]>