2021-02-01 19:50:58

by Andrey Konovalov

[permalink] [raw]
Subject: [PATCH 10/12] arm64: kasan: simplify and inline MTE functions

This change provides a simpler implementation of mte_get_mem_tag(),
mte_get_random_tag(), and mte_set_mem_tag_range().

Simplifications include removing system_supports_mte() checks as these
functions are onlye called from KASAN runtime that had already checked
system_supports_mte(). Besides that, size and address alignment checks
are removed from mte_set_mem_tag_range(), as KASAN now does those.

This change also moves these functions into the asm/mte-kasan.h header
and implements mte_set_mem_tag_range() via inline assembly to avoid
unnecessary functions calls.

Co-developed-by: Vincenzo Frascino <[email protected]>
Signed-off-by: Andrey Konovalov <[email protected]>
---
arch/arm64/include/asm/cache.h | 1 -
arch/arm64/include/asm/kasan.h | 1 +
arch/arm64/include/asm/mte-def.h | 2 +
arch/arm64/include/asm/mte-kasan.h | 64 ++++++++++++++++++++++++++----
arch/arm64/include/asm/mte.h | 2 -
arch/arm64/kernel/mte.c | 46 ---------------------
arch/arm64/lib/mte.S | 16 --------
7 files changed, 60 insertions(+), 72 deletions(-)

diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index 77cbbe3625f2..a074459f8f2f 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -6,7 +6,6 @@
#define __ASM_CACHE_H

#include <asm/cputype.h>
-#include <asm/mte-kasan.h>

#define CTR_L1IP_SHIFT 14
#define CTR_L1IP_MASK 3
diff --git a/arch/arm64/include/asm/kasan.h b/arch/arm64/include/asm/kasan.h
index 0aaf9044cd6a..12d5f47f7dbe 100644
--- a/arch/arm64/include/asm/kasan.h
+++ b/arch/arm64/include/asm/kasan.h
@@ -6,6 +6,7 @@

#include <linux/linkage.h>
#include <asm/memory.h>
+#include <asm/mte-kasan.h>
#include <asm/pgtable-types.h>

#define arch_kasan_set_tag(addr, tag) __tag_set(addr, tag)
diff --git a/arch/arm64/include/asm/mte-def.h b/arch/arm64/include/asm/mte-def.h
index 2d73a1612f09..cf241b0f0a42 100644
--- a/arch/arm64/include/asm/mte-def.h
+++ b/arch/arm64/include/asm/mte-def.h
@@ -11,4 +11,6 @@
#define MTE_TAG_SIZE 4
#define MTE_TAG_MASK GENMASK((MTE_TAG_SHIFT + (MTE_TAG_SIZE - 1)), MTE_TAG_SHIFT)

+#define __MTE_PREAMBLE ARM64_ASM_PREAMBLE ".arch_extension memtag\n"
+
#endif /* __ASM_MTE_DEF_H */
diff --git a/arch/arm64/include/asm/mte-kasan.h b/arch/arm64/include/asm/mte-kasan.h
index 8ad981069afb..1f090beda7e6 100644
--- a/arch/arm64/include/asm/mte-kasan.h
+++ b/arch/arm64/include/asm/mte-kasan.h
@@ -11,13 +11,16 @@

#include <linux/types.h>

+#ifdef CONFIG_ARM64_MTE
+
/*
- * The functions below are meant to be used only for the
- * KASAN_HW_TAGS interface defined in asm/memory.h.
+ * These functions are meant to be only used from KASAN runtime through
+ * the arch_*() interface defined in asm/memory.h.
+ * These functions don't include system_supports_mte() checks,
+ * as KASAN only calls them when MTE is supported and enabled.
*/
-#ifdef CONFIG_ARM64_MTE

-static inline u8 mte_get_ptr_tag(void *ptr)
+static __always_inline u8 mte_get_ptr_tag(void *ptr)
{
/* Note: The format of KASAN tags is 0xF<x> */
u8 tag = 0xF0 | (u8)(((u64)(ptr)) >> MTE_TAG_SHIFT);
@@ -25,9 +28,54 @@ static inline u8 mte_get_ptr_tag(void *ptr)
return tag;
}

-u8 mte_get_mem_tag(void *addr);
-u8 mte_get_random_tag(void);
-void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag);
+/* Get allocation tag for the address. */
+static __always_inline u8 mte_get_mem_tag(void *addr)
+{
+ asm(__MTE_PREAMBLE "ldg %0, [%0]"
+ : "+r" (addr));
+
+ return mte_get_ptr_tag(addr);
+}
+
+/* Generate a random tag. */
+static __always_inline u8 mte_get_random_tag(void)
+{
+ void *addr;
+
+ asm(__MTE_PREAMBLE "irg %0, %0"
+ : "+r" (addr));
+
+ return mte_get_ptr_tag(addr);
+}
+
+/*
+ * Assign allocation tags for a region of memory based on the pointer tag.
+ * Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and
+ * size must be non-zero and MTE_GRANULE_SIZE aligned.
+ */
+static __always_inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
+{
+ u64 curr, end;
+
+ if (!size)
+ return;
+
+ curr = (u64)__tag_set(addr, tag);
+ end = curr + size;
+
+ do {
+ /*
+ * 'asm volatile' is required to prevent the compiler to move
+ * the statement outside of the loop.
+ */
+ asm volatile(__MTE_PREAMBLE "stg %0, [%0]"
+ :
+ : "r" (curr)
+ : "memory");
+
+ curr += MTE_GRANULE_SIZE;
+ } while (curr != end);
+}

void mte_enable_kernel_sync(void);
void mte_enable_kernel_async(void);
@@ -47,10 +95,12 @@ static inline u8 mte_get_mem_tag(void *addr)
{
return 0xFF;
}
+
static inline u8 mte_get_random_tag(void)
{
return 0xFF;
}
+
static inline void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
{
return addr;
diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
index 237bb2f7309d..43169b978cd3 100644
--- a/arch/arm64/include/asm/mte.h
+++ b/arch/arm64/include/asm/mte.h
@@ -8,8 +8,6 @@
#include <asm/compiler.h>
#include <asm/mte-def.h>

-#define __MTE_PREAMBLE ARM64_ASM_PREAMBLE ".arch_extension memtag\n"
-
#ifndef __ASSEMBLY__

#include <linux/bitfield.h>
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index 7763ac1f2917..8b27b70e1aac 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -19,7 +19,6 @@
#include <asm/barrier.h>
#include <asm/cpufeature.h>
#include <asm/mte.h>
-#include <asm/mte-kasan.h>
#include <asm/ptrace.h>
#include <asm/sysreg.h>

@@ -88,51 +87,6 @@ int memcmp_pages(struct page *page1, struct page *page2)
return ret;
}

-u8 mte_get_mem_tag(void *addr)
-{
- if (!system_supports_mte())
- return 0xFF;
-
- asm(__MTE_PREAMBLE "ldg %0, [%0]"
- : "+r" (addr));
-
- return mte_get_ptr_tag(addr);
-}
-
-u8 mte_get_random_tag(void)
-{
- void *addr;
-
- if (!system_supports_mte())
- return 0xFF;
-
- asm(__MTE_PREAMBLE "irg %0, %0"
- : "+r" (addr));
-
- return mte_get_ptr_tag(addr);
-}
-
-void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
-{
- void *ptr = addr;
-
- if ((!system_supports_mte()) || (size == 0))
- return addr;
-
- /* Make sure that size is MTE granule aligned. */
- WARN_ON(size & (MTE_GRANULE_SIZE - 1));
-
- /* Make sure that the address is MTE granule aligned. */
- WARN_ON((u64)addr & (MTE_GRANULE_SIZE - 1));
-
- tag = 0xF0 | tag;
- ptr = (void *)__tag_set(ptr, tag);
-
- mte_assign_mem_tag_range(ptr, size);
-
- return ptr;
-}
-
void mte_init_tags(u64 max_tag)
{
static bool gcr_kernel_excl_initialized;
diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S
index 9e1a12e10053..351537c12f36 100644
--- a/arch/arm64/lib/mte.S
+++ b/arch/arm64/lib/mte.S
@@ -149,19 +149,3 @@ SYM_FUNC_START(mte_restore_page_tags)

ret
SYM_FUNC_END(mte_restore_page_tags)
-
-/*
- * Assign allocation tags for a region of memory based on the pointer tag
- * x0 - source pointer
- * x1 - size
- *
- * Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and
- * size must be non-zero and MTE_GRANULE_SIZE aligned.
- */
-SYM_FUNC_START(mte_assign_mem_tag_range)
-1: stg x0, [x0]
- add x0, x0, #MTE_GRANULE_SIZE
- subs x1, x1, #MTE_GRANULE_SIZE
- b.gt 1b
- ret
-SYM_FUNC_END(mte_assign_mem_tag_range)
--
2.30.0.365.g02bc693789-goog


2021-02-01 22:49:20

by Andrew Morton

[permalink] [raw]
Subject: Re: [PATCH 10/12] arm64: kasan: simplify and inline MTE functions

On Mon, 1 Feb 2021 20:43:34 +0100 Andrey Konovalov <[email protected]> wrote:

> This change provides a simpler implementation of mte_get_mem_tag(),
> mte_get_random_tag(), and mte_set_mem_tag_range().
>
> Simplifications include removing system_supports_mte() checks as these
> functions are onlye called from KASAN runtime that had already checked
> system_supports_mte(). Besides that, size and address alignment checks
> are removed from mte_set_mem_tag_range(), as KASAN now does those.
>
> This change also moves these functions into the asm/mte-kasan.h header
> and implements mte_set_mem_tag_range() via inline assembly to avoid
> unnecessary functions calls.
>
> Co-developed-by: Vincenzo Frascino <[email protected]>
> Signed-off-by: Andrey Konovalov <[email protected]>

Co-developed-by requires a Signed-off-by: as well. Vincenzo, please
send us one?


2021-02-02 22:43:50

by Catalin Marinas

[permalink] [raw]
Subject: Re: [PATCH 10/12] arm64: kasan: simplify and inline MTE functions

On Mon, Feb 01, 2021 at 08:43:34PM +0100, Andrey Konovalov wrote:
> +/*
> + * Assign allocation tags for a region of memory based on the pointer tag.
> + * Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and
> + * size must be non-zero and MTE_GRANULE_SIZE aligned.
> + */

OK, so we rely on the caller to sanity-check the range. Fine by me but I
can see (un)poison_range() only doing this for the size. Do we guarantee
that the start address is aligned?

> +static __always_inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
> +{
> + u64 curr, end;
> +
> + if (!size)
> + return;
> +
> + curr = (u64)__tag_set(addr, tag);
> + end = curr + size;
> +
> + do {
> + /*
> + * 'asm volatile' is required to prevent the compiler to move
> + * the statement outside of the loop.
> + */
> + asm volatile(__MTE_PREAMBLE "stg %0, [%0]"
> + :
> + : "r" (curr)
> + : "memory");
> +
> + curr += MTE_GRANULE_SIZE;
> + } while (curr != end);
> +}
>
> void mte_enable_kernel_sync(void);
> void mte_enable_kernel_async(void);
> @@ -47,10 +95,12 @@ static inline u8 mte_get_mem_tag(void *addr)
> {
> return 0xFF;
> }
> +
> static inline u8 mte_get_random_tag(void)
> {
> return 0xFF;
> }
> +
> static inline void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag)

This function used to return a pointer and that's what the dummy static
inline does here. However, the new mte_set_mem_tag_range() doesn't
return anything. We should have consistency between the two (the new
static void definition is fine by me).

Otherwise the patch looks fine.

Reviewed-by: Catalin Marinas <[email protected]>

2021-02-03 00:34:06

by Andrey Konovalov

[permalink] [raw]
Subject: Re: [PATCH 10/12] arm64: kasan: simplify and inline MTE functions

On Tue, Feb 2, 2021 at 4:42 PM Catalin Marinas <[email protected]> wrote:
>
> On Mon, Feb 01, 2021 at 08:43:34PM +0100, Andrey Konovalov wrote:
> > +/*
> > + * Assign allocation tags for a region of memory based on the pointer tag.
> > + * Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and
> > + * size must be non-zero and MTE_GRANULE_SIZE aligned.
> > + */
>
> OK, so we rely on the caller to sanity-check the range. Fine by me but I
> can see (un)poison_range() only doing this for the size. Do we guarantee
> that the start address is aligned?

See the previous patch in the series. kasan_poison() checks and warns
on both unaligned addr and size. kasan_unpoison() checks addr and
rounds up size.

> > +static __always_inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
> > +{
> > + u64 curr, end;
> > +
> > + if (!size)
> > + return;
> > +
> > + curr = (u64)__tag_set(addr, tag);
> > + end = curr + size;
> > +
> > + do {
> > + /*
> > + * 'asm volatile' is required to prevent the compiler to move
> > + * the statement outside of the loop.
> > + */
> > + asm volatile(__MTE_PREAMBLE "stg %0, [%0]"
> > + :
> > + : "r" (curr)
> > + : "memory");
> > +
> > + curr += MTE_GRANULE_SIZE;
> > + } while (curr != end);
> > +}
> >
> > void mte_enable_kernel_sync(void);
> > void mte_enable_kernel_async(void);
> > @@ -47,10 +95,12 @@ static inline u8 mte_get_mem_tag(void *addr)
> > {
> > return 0xFF;
> > }
> > +
> > static inline u8 mte_get_random_tag(void)
> > {
> > return 0xFF;
> > }
> > +
> > static inline void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
>
> This function used to return a pointer and that's what the dummy static
> inline does here. However, the new mte_set_mem_tag_range() doesn't
> return anything. We should have consistency between the two (the new
> static void definition is fine by me).

Right, forgot to update the empty function definition. Will do in v2.

>
> Otherwise the patch looks fine.
>
> Reviewed-by: Catalin Marinas <[email protected]>

Thanks!

2021-02-04 12:39:48

by Vincenzo Frascino

[permalink] [raw]
Subject: Re: [PATCH 10/12] arm64: kasan: simplify and inline MTE functions

On 2/1/21 7:43 PM, Andrey Konovalov wrote:
> This change provides a simpler implementation of mte_get_mem_tag(),
> mte_get_random_tag(), and mte_set_mem_tag_range().
>
> Simplifications include removing system_supports_mte() checks as these
> functions are onlye called from KASAN runtime that had already checked
> system_supports_mte(). Besides that, size and address alignment checks
> are removed from mte_set_mem_tag_range(), as KASAN now does those.
>
> This change also moves these functions into the asm/mte-kasan.h header
> and implements mte_set_mem_tag_range() via inline assembly to avoid
> unnecessary functions calls.
>
> Co-developed-by: Vincenzo Frascino <[email protected]>

Signed-off-by: Vincenzo Frascino <[email protected]>

> Signed-off-by: Andrey Konovalov <[email protected]>
> ---
> arch/arm64/include/asm/cache.h | 1 -
> arch/arm64/include/asm/kasan.h | 1 +
> arch/arm64/include/asm/mte-def.h | 2 +
> arch/arm64/include/asm/mte-kasan.h | 64 ++++++++++++++++++++++++++----
> arch/arm64/include/asm/mte.h | 2 -
> arch/arm64/kernel/mte.c | 46 ---------------------
> arch/arm64/lib/mte.S | 16 --------
> 7 files changed, 60 insertions(+), 72 deletions(-)
>
> diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
> index 77cbbe3625f2..a074459f8f2f 100644
> --- a/arch/arm64/include/asm/cache.h
> +++ b/arch/arm64/include/asm/cache.h
> @@ -6,7 +6,6 @@
> #define __ASM_CACHE_H
>
> #include <asm/cputype.h>
> -#include <asm/mte-kasan.h>
>
> #define CTR_L1IP_SHIFT 14
> #define CTR_L1IP_MASK 3
> diff --git a/arch/arm64/include/asm/kasan.h b/arch/arm64/include/asm/kasan.h
> index 0aaf9044cd6a..12d5f47f7dbe 100644
> --- a/arch/arm64/include/asm/kasan.h
> +++ b/arch/arm64/include/asm/kasan.h
> @@ -6,6 +6,7 @@
>
> #include <linux/linkage.h>
> #include <asm/memory.h>
> +#include <asm/mte-kasan.h>
> #include <asm/pgtable-types.h>
>
> #define arch_kasan_set_tag(addr, tag) __tag_set(addr, tag)
> diff --git a/arch/arm64/include/asm/mte-def.h b/arch/arm64/include/asm/mte-def.h
> index 2d73a1612f09..cf241b0f0a42 100644
> --- a/arch/arm64/include/asm/mte-def.h
> +++ b/arch/arm64/include/asm/mte-def.h
> @@ -11,4 +11,6 @@
> #define MTE_TAG_SIZE 4
> #define MTE_TAG_MASK GENMASK((MTE_TAG_SHIFT + (MTE_TAG_SIZE - 1)), MTE_TAG_SHIFT)
>
> +#define __MTE_PREAMBLE ARM64_ASM_PREAMBLE ".arch_extension memtag\n"
> +
> #endif /* __ASM_MTE_DEF_H */
> diff --git a/arch/arm64/include/asm/mte-kasan.h b/arch/arm64/include/asm/mte-kasan.h
> index 8ad981069afb..1f090beda7e6 100644
> --- a/arch/arm64/include/asm/mte-kasan.h
> +++ b/arch/arm64/include/asm/mte-kasan.h
> @@ -11,13 +11,16 @@
>
> #include <linux/types.h>
>
> +#ifdef CONFIG_ARM64_MTE
> +
> /*
> - * The functions below are meant to be used only for the
> - * KASAN_HW_TAGS interface defined in asm/memory.h.
> + * These functions are meant to be only used from KASAN runtime through
> + * the arch_*() interface defined in asm/memory.h.
> + * These functions don't include system_supports_mte() checks,
> + * as KASAN only calls them when MTE is supported and enabled.
> */
> -#ifdef CONFIG_ARM64_MTE
>
> -static inline u8 mte_get_ptr_tag(void *ptr)
> +static __always_inline u8 mte_get_ptr_tag(void *ptr)
> {
> /* Note: The format of KASAN tags is 0xF<x> */
> u8 tag = 0xF0 | (u8)(((u64)(ptr)) >> MTE_TAG_SHIFT);
> @@ -25,9 +28,54 @@ static inline u8 mte_get_ptr_tag(void *ptr)
> return tag;
> }
>
> -u8 mte_get_mem_tag(void *addr);
> -u8 mte_get_random_tag(void);
> -void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag);
> +/* Get allocation tag for the address. */
> +static __always_inline u8 mte_get_mem_tag(void *addr)
> +{
> + asm(__MTE_PREAMBLE "ldg %0, [%0]"
> + : "+r" (addr));
> +
> + return mte_get_ptr_tag(addr);
> +}
> +
> +/* Generate a random tag. */
> +static __always_inline u8 mte_get_random_tag(void)
> +{
> + void *addr;
> +
> + asm(__MTE_PREAMBLE "irg %0, %0"
> + : "+r" (addr));
> +
> + return mte_get_ptr_tag(addr);
> +}
> +
> +/*
> + * Assign allocation tags for a region of memory based on the pointer tag.
> + * Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and
> + * size must be non-zero and MTE_GRANULE_SIZE aligned.
> + */
> +static __always_inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
> +{
> + u64 curr, end;
> +
> + if (!size)
> + return;
> +
> + curr = (u64)__tag_set(addr, tag);
> + end = curr + size;
> +
> + do {
> + /*
> + * 'asm volatile' is required to prevent the compiler to move
> + * the statement outside of the loop.
> + */
> + asm volatile(__MTE_PREAMBLE "stg %0, [%0]"
> + :
> + : "r" (curr)
> + : "memory");
> +
> + curr += MTE_GRANULE_SIZE;
> + } while (curr != end);
> +}
>
> void mte_enable_kernel_sync(void);
> void mte_enable_kernel_async(void);
> @@ -47,10 +95,12 @@ static inline u8 mte_get_mem_tag(void *addr)
> {
> return 0xFF;
> }
> +
> static inline u8 mte_get_random_tag(void)
> {
> return 0xFF;
> }
> +
> static inline void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
> {
> return addr;
> diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
> index 237bb2f7309d..43169b978cd3 100644
> --- a/arch/arm64/include/asm/mte.h
> +++ b/arch/arm64/include/asm/mte.h
> @@ -8,8 +8,6 @@
> #include <asm/compiler.h>
> #include <asm/mte-def.h>
>
> -#define __MTE_PREAMBLE ARM64_ASM_PREAMBLE ".arch_extension memtag\n"
> -
> #ifndef __ASSEMBLY__
>
> #include <linux/bitfield.h>
> diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
> index 7763ac1f2917..8b27b70e1aac 100644
> --- a/arch/arm64/kernel/mte.c
> +++ b/arch/arm64/kernel/mte.c
> @@ -19,7 +19,6 @@
> #include <asm/barrier.h>
> #include <asm/cpufeature.h>
> #include <asm/mte.h>
> -#include <asm/mte-kasan.h>
> #include <asm/ptrace.h>
> #include <asm/sysreg.h>
>
> @@ -88,51 +87,6 @@ int memcmp_pages(struct page *page1, struct page *page2)
> return ret;
> }
>
> -u8 mte_get_mem_tag(void *addr)
> -{
> - if (!system_supports_mte())
> - return 0xFF;
> -
> - asm(__MTE_PREAMBLE "ldg %0, [%0]"
> - : "+r" (addr));
> -
> - return mte_get_ptr_tag(addr);
> -}
> -
> -u8 mte_get_random_tag(void)
> -{
> - void *addr;
> -
> - if (!system_supports_mte())
> - return 0xFF;
> -
> - asm(__MTE_PREAMBLE "irg %0, %0"
> - : "+r" (addr));
> -
> - return mte_get_ptr_tag(addr);
> -}
> -
> -void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
> -{
> - void *ptr = addr;
> -
> - if ((!system_supports_mte()) || (size == 0))
> - return addr;
> -
> - /* Make sure that size is MTE granule aligned. */
> - WARN_ON(size & (MTE_GRANULE_SIZE - 1));
> -
> - /* Make sure that the address is MTE granule aligned. */
> - WARN_ON((u64)addr & (MTE_GRANULE_SIZE - 1));
> -
> - tag = 0xF0 | tag;
> - ptr = (void *)__tag_set(ptr, tag);
> -
> - mte_assign_mem_tag_range(ptr, size);
> -
> - return ptr;
> -}
> -
> void mte_init_tags(u64 max_tag)
> {
> static bool gcr_kernel_excl_initialized;
> diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S
> index 9e1a12e10053..351537c12f36 100644
> --- a/arch/arm64/lib/mte.S
> +++ b/arch/arm64/lib/mte.S
> @@ -149,19 +149,3 @@ SYM_FUNC_START(mte_restore_page_tags)
>
> ret
> SYM_FUNC_END(mte_restore_page_tags)
> -
> -/*
> - * Assign allocation tags for a region of memory based on the pointer tag
> - * x0 - source pointer
> - * x1 - size
> - *
> - * Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and
> - * size must be non-zero and MTE_GRANULE_SIZE aligned.
> - */
> -SYM_FUNC_START(mte_assign_mem_tag_range)
> -1: stg x0, [x0]
> - add x0, x0, #MTE_GRANULE_SIZE
> - subs x1, x1, #MTE_GRANULE_SIZE
> - b.gt 1b
> - ret
> -SYM_FUNC_END(mte_assign_mem_tag_range)
>

--
Regards,
Vincenzo

2021-02-05 00:07:44

by Vincenzo Frascino

[permalink] [raw]
Subject: Re: [PATCH 10/12] arm64: kasan: simplify and inline MTE functions

Hi Andrew,

On 2/1/21 10:44 PM, Andrew Morton wrote:
> On Mon, 1 Feb 2021 20:43:34 +0100 Andrey Konovalov <[email protected]> wrote:
>
>> This change provides a simpler implementation of mte_get_mem_tag(),
>> mte_get_random_tag(), and mte_set_mem_tag_range().
>>
>> Simplifications include removing system_supports_mte() checks as these
>> functions are onlye called from KASAN runtime that had already checked
>> system_supports_mte(). Besides that, size and address alignment checks
>> are removed from mte_set_mem_tag_range(), as KASAN now does those.
>>
>> This change also moves these functions into the asm/mte-kasan.h header
>> and implements mte_set_mem_tag_range() via inline assembly to avoid
>> unnecessary functions calls.
>>
>> Co-developed-by: Vincenzo Frascino <[email protected]>
>> Signed-off-by: Andrey Konovalov <[email protected]>
>
> Co-developed-by requires a Signed-off-by: as well. Vincenzo, please
> send us one?
>
>

I added my Signed-off-by to the patch.

--
Regards,
Vincenzo