2021-06-17 16:17:02

by Matteo Croce

[permalink] [raw]
Subject: [PATCH v3 3/3] riscv: optimized memset

From: Matteo Croce <[email protected]>

The generic memset is defined as a byte at time write. This is always
safe, but it's slower than a 4 byte or even 8 byte write.

Write a generic memset which fills the data one byte at time until the
destination is aligned, then fills using the largest size allowed,
and finally fills the remaining data one byte at time.

Signed-off-by: Matteo Croce <[email protected]>
---
arch/riscv/include/asm/string.h | 10 +--
arch/riscv/kernel/Makefile | 1 -
arch/riscv/kernel/riscv_ksyms.c | 13 ----
arch/riscv/lib/Makefile | 1 -
arch/riscv/lib/memset.S | 113 --------------------------------
arch/riscv/lib/string.c | 39 +++++++++++
6 files changed, 42 insertions(+), 135 deletions(-)
delete mode 100644 arch/riscv/kernel/riscv_ksyms.c
delete mode 100644 arch/riscv/lib/memset.S

diff --git a/arch/riscv/include/asm/string.h b/arch/riscv/include/asm/string.h
index 25d9b9078569..90500635035a 100644
--- a/arch/riscv/include/asm/string.h
+++ b/arch/riscv/include/asm/string.h
@@ -6,14 +6,10 @@
#ifndef _ASM_RISCV_STRING_H
#define _ASM_RISCV_STRING_H

-#include <linux/types.h>
-#include <linux/linkage.h>
-
-#define __HAVE_ARCH_MEMSET
-extern asmlinkage void *memset(void *, int, size_t);
-extern asmlinkage void *__memset(void *, int, size_t);
-
#ifdef CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE
+#define __HAVE_ARCH_MEMSET
+extern void *memset(void *s, int c, size_t count);
+extern void *__memset(void *s, int c, size_t count);
#define __HAVE_ARCH_MEMCPY
extern void *memcpy(void *dest, const void *src, size_t count);
extern void *__memcpy(void *dest, const void *src, size_t count);
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index d3081e4d9600..e635ce1e5645 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -31,7 +31,6 @@ obj-y += syscall_table.o
obj-y += sys_riscv.o
obj-y += time.o
obj-y += traps.o
-obj-y += riscv_ksyms.o
obj-y += stacktrace.o
obj-y += cacheinfo.o
obj-y += patch.o
diff --git a/arch/riscv/kernel/riscv_ksyms.c b/arch/riscv/kernel/riscv_ksyms.c
deleted file mode 100644
index 361565c4db7e..000000000000
--- a/arch/riscv/kernel/riscv_ksyms.c
+++ /dev/null
@@ -1,13 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2017 Zihao Yu
- */
-
-#include <linux/export.h>
-#include <linux/uaccess.h>
-
-/*
- * Assembly functions that may be used (directly or indirectly) by modules
- */
-EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(__memset);
diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile
index 484f5ff7b508..e33263cc622a 100644
--- a/arch/riscv/lib/Makefile
+++ b/arch/riscv/lib/Makefile
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
lib-y += delay.o
-lib-y += memset.o
lib-$(CONFIG_MMU) += uaccess.o
lib-$(CONFIG_64BIT) += tishift.o
lib-$(CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE) += string.o
diff --git a/arch/riscv/lib/memset.S b/arch/riscv/lib/memset.S
deleted file mode 100644
index 34c5360c6705..000000000000
--- a/arch/riscv/lib/memset.S
+++ /dev/null
@@ -1,113 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2013 Regents of the University of California
- */
-
-
-#include <linux/linkage.h>
-#include <asm/asm.h>
-
-/* void *memset(void *, int, size_t) */
-ENTRY(__memset)
-WEAK(memset)
- move t0, a0 /* Preserve return value */
-
- /* Defer to byte-oriented fill for small sizes */
- sltiu a3, a2, 16
- bnez a3, 4f
-
- /*
- * Round to nearest XLEN-aligned address
- * greater than or equal to start address
- */
- addi a3, t0, SZREG-1
- andi a3, a3, ~(SZREG-1)
- beq a3, t0, 2f /* Skip if already aligned */
- /* Handle initial misalignment */
- sub a4, a3, t0
-1:
- sb a1, 0(t0)
- addi t0, t0, 1
- bltu t0, a3, 1b
- sub a2, a2, a4 /* Update count */
-
-2: /* Duff's device with 32 XLEN stores per iteration */
- /* Broadcast value into all bytes */
- andi a1, a1, 0xff
- slli a3, a1, 8
- or a1, a3, a1
- slli a3, a1, 16
- or a1, a3, a1
-#ifdef CONFIG_64BIT
- slli a3, a1, 32
- or a1, a3, a1
-#endif
-
- /* Calculate end address */
- andi a4, a2, ~(SZREG-1)
- add a3, t0, a4
-
- andi a4, a4, 31*SZREG /* Calculate remainder */
- beqz a4, 3f /* Shortcut if no remainder */
- neg a4, a4
- addi a4, a4, 32*SZREG /* Calculate initial offset */
-
- /* Adjust start address with offset */
- sub t0, t0, a4
-
- /* Jump into loop body */
- /* Assumes 32-bit instruction lengths */
- la a5, 3f
-#ifdef CONFIG_64BIT
- srli a4, a4, 1
-#endif
- add a5, a5, a4
- jr a5
-3:
- REG_S a1, 0(t0)
- REG_S a1, SZREG(t0)
- REG_S a1, 2*SZREG(t0)
- REG_S a1, 3*SZREG(t0)
- REG_S a1, 4*SZREG(t0)
- REG_S a1, 5*SZREG(t0)
- REG_S a1, 6*SZREG(t0)
- REG_S a1, 7*SZREG(t0)
- REG_S a1, 8*SZREG(t0)
- REG_S a1, 9*SZREG(t0)
- REG_S a1, 10*SZREG(t0)
- REG_S a1, 11*SZREG(t0)
- REG_S a1, 12*SZREG(t0)
- REG_S a1, 13*SZREG(t0)
- REG_S a1, 14*SZREG(t0)
- REG_S a1, 15*SZREG(t0)
- REG_S a1, 16*SZREG(t0)
- REG_S a1, 17*SZREG(t0)
- REG_S a1, 18*SZREG(t0)
- REG_S a1, 19*SZREG(t0)
- REG_S a1, 20*SZREG(t0)
- REG_S a1, 21*SZREG(t0)
- REG_S a1, 22*SZREG(t0)
- REG_S a1, 23*SZREG(t0)
- REG_S a1, 24*SZREG(t0)
- REG_S a1, 25*SZREG(t0)
- REG_S a1, 26*SZREG(t0)
- REG_S a1, 27*SZREG(t0)
- REG_S a1, 28*SZREG(t0)
- REG_S a1, 29*SZREG(t0)
- REG_S a1, 30*SZREG(t0)
- REG_S a1, 31*SZREG(t0)
- addi t0, t0, 32*SZREG
- bltu t0, a3, 3b
- andi a2, a2, SZREG-1 /* Update count */
-
-4:
- /* Handle trailing misalignment */
- beqz a2, 6f
- add a3, t0, a2
-5:
- sb a1, 0(t0)
- addi t0, t0, 1
- bltu t0, a3, 5b
-6:
- ret
-END(__memset)
diff --git a/arch/riscv/lib/string.c b/arch/riscv/lib/string.c
index 9c7009d43c39..1fb4de351516 100644
--- a/arch/riscv/lib/string.c
+++ b/arch/riscv/lib/string.c
@@ -112,3 +112,42 @@ EXPORT_SYMBOL(__memmove);

void *memmove(void *dest, const void *src, size_t count) __weak __alias(__memmove);
EXPORT_SYMBOL(memmove);
+
+void *__memset(void *s, int c, size_t count)
+{
+ union types dest = { .u8 = s };
+
+ if (count >= MIN_THRESHOLD) {
+ const int bytes_long = BITS_PER_LONG / 8;
+ unsigned long cu = (unsigned long)c;
+
+ /* Compose an ulong with 'c' repeated 4/8 times */
+ cu |= cu << 8;
+ cu |= cu << 16;
+#if BITS_PER_LONG == 64
+ cu |= cu << 32;
+#endif
+
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ /* Fill the buffer one byte at time until the destination
+ * is aligned on a 32/64 bit boundary.
+ */
+ for (; count && dest.uptr % bytes_long; count--)
+ *dest.u8++ = c;
+#endif
+
+ /* Copy using the largest size allowed */
+ for (; count >= bytes_long; count -= bytes_long)
+ *dest.ulong++ = cu;
+ }
+
+ /* copy the remainder */
+ while (count--)
+ *dest.u8++ = c;
+
+ return s;
+}
+EXPORT_SYMBOL(__memset);
+
+void *memset(void *s, int c, size_t count) __weak __alias(__memset);
+EXPORT_SYMBOL(memset);
--
2.31.1


2021-06-21 14:34:57

by Christoph Hellwig

[permalink] [raw]
Subject: Re: [PATCH v3 3/3] riscv: optimized memset

Looks nice, except IS_ENABLED would be useful here again, as well as
a placement in lib/.

2021-06-22 01:10:16

by Nick Kossifidis

[permalink] [raw]
Subject: Re: [PATCH v3 3/3] riscv: optimized memset

Στις 2021-06-17 18:27, Matteo Croce έγραψε:
> +
> +void *__memset(void *s, int c, size_t count)
> +{
> + union types dest = { .u8 = s };
> +
> + if (count >= MIN_THRESHOLD) {
> + const int bytes_long = BITS_PER_LONG / 8;

You could make 'const int bytes_long = BITS_PER_LONG / 8;' and 'const
int mask = bytes_long - 1;' from your memcpy patch visible to memset as
well (static const...) and use them here (mask would make more sense to
be named as word_mask).

> + unsigned long cu = (unsigned long)c;
> +
> + /* Compose an ulong with 'c' repeated 4/8 times */
> + cu |= cu << 8;
> + cu |= cu << 16;
> +#if BITS_PER_LONG == 64
> + cu |= cu << 32;
> +#endif
> +

You don't have to create cu here, you'll fill dest buffer with 'c'
anyway so after filling up enough 'c's to be able to grab an aligned
word full of them from dest, you can just grab that word and keep
filling up dest with it.

> +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
> + /* Fill the buffer one byte at time until the destination
> + * is aligned on a 32/64 bit boundary.
> + */
> + for (; count && dest.uptr % bytes_long; count--)

You could reuse & mask here instead of % bytes_long.

> + *dest.u8++ = c;
> +#endif

I noticed you also used CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS on your
memcpy patch, is it worth it here ? To begin with riscv doesn't set it
and even if it did we are talking about a loop that will run just a few
times to reach the alignment boundary (worst case scenario it'll run 7
times), I don't think we gain much here, even for archs that have
efficient unaligned access.

2021-06-22 08:40:15

by David Laight

[permalink] [raw]
Subject: RE: [PATCH v3 3/3] riscv: optimized memset

From: Nick Kossifidis
> Sent: 22 June 2021 02:08
>
> Στις 2021-06-17 18:27, Matteo Croce έγραψε:
> > +
> > +void *__memset(void *s, int c, size_t count)
> > +{
> > + union types dest = { .u8 = s };
> > +
> > + if (count >= MIN_THRESHOLD) {
> > + const int bytes_long = BITS_PER_LONG / 8;
>
> You could make 'const int bytes_long = BITS_PER_LONG / 8;'

What is wrong with sizeof (long) ?
...
> > + unsigned long cu = (unsigned long)c;
> > +
> > + /* Compose an ulong with 'c' repeated 4/8 times */
> > + cu |= cu << 8;
> > + cu |= cu << 16;
> > +#if BITS_PER_LONG == 64
> > + cu |= cu << 32;
> > +#endif
> > +
>
> You don't have to create cu here, you'll fill dest buffer with 'c'
> anyway so after filling up enough 'c's to be able to grab an aligned
> word full of them from dest, you can just grab that word and keep
> filling up dest with it.

That will be a lot slower - especially if run on something like x86.
A write-read of the same size is optimised by the store-load forwarder.
But the byte write, word read will have to go via the cache.

You can just write:
cu = (unsigned long)c * 0x0101010101010101ull;
and let the compiler sort out the best way to generate the constant.

>
> > +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
> > + /* Fill the buffer one byte at time until the destination
> > + * is aligned on a 32/64 bit boundary.
> > + */
> > + for (; count && dest.uptr % bytes_long; count--)
>
> You could reuse & mask here instead of % bytes_long.
>
> > + *dest.u8++ = c;
> > +#endif
>
> I noticed you also used CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS on your
> memcpy patch, is it worth it here ? To begin with riscv doesn't set it
> and even if it did we are talking about a loop that will run just a few
> times to reach the alignment boundary (worst case scenario it'll run 7
> times), I don't think we gain much here, even for archs that have
> efficient unaligned access.

With CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS it probably isn't worth
even checking the alignment.
While aligning the copy will be quicker for an unaligned buffer they
almost certainly don't happen often enough to worry about.
In any case you'd want to do a misaligned word write to the start
of the buffer - not separate byte writes.
Provided the buffer is long enough you can also do a misaligned write
to the end of the buffer before filling from the start.

I suspect you may need either barrier() or use a ptr to packed
to avoid the perverted 'undefined behaviour' fubar.'

David

-
Registered Address Lakeside, Bramley Road, Mount Farm, Milton Keynes, MK1 1PT, UK
Registration No: 1397386 (Wales)

2021-06-23 00:13:37

by Matteo Croce

[permalink] [raw]
Subject: Re: [PATCH v3 3/3] riscv: optimized memset

On Tue, Jun 22, 2021 at 3:07 AM Nick Kossifidis <[email protected]> wrote:
>
> Στις 2021-06-17 18:27, Matteo Croce έγραψε:
> > +
> > +void *__memset(void *s, int c, size_t count)
> > +{
> > + union types dest = { .u8 = s };
> > +
> > + if (count >= MIN_THRESHOLD) {
> > + const int bytes_long = BITS_PER_LONG / 8;
>
> You could make 'const int bytes_long = BITS_PER_LONG / 8;' and 'const
> int mask = bytes_long - 1;' from your memcpy patch visible to memset as
> well (static const...) and use them here (mask would make more sense to
> be named as word_mask).
>

I'll do

> > + unsigned long cu = (unsigned long)c;
> > +
> > + /* Compose an ulong with 'c' repeated 4/8 times */
> > + cu |= cu << 8;
> > + cu |= cu << 16;
> > +#if BITS_PER_LONG == 64
> > + cu |= cu << 32;
> > +#endif
> > +
>
> You don't have to create cu here, you'll fill dest buffer with 'c'
> anyway so after filling up enough 'c's to be able to grab an aligned
> word full of them from dest, you can just grab that word and keep
> filling up dest with it.
>

I tried that, but this way I have to wait 8 bytes more before starting
the memset.
And, the machine code needed to generate 'cu' is just 6 instructions on riscv:

slli a5,a0,8
or a5,a5,a0
slli a0,a5,16
or a0,a0,a5
slli a5,a0,32
or a0,a5,a0

so probably it's not worth it.

> > +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
> > + /* Fill the buffer one byte at time until the destination
> > + * is aligned on a 32/64 bit boundary.
> > + */
> > + for (; count && dest.uptr % bytes_long; count--)
>
> You could reuse & mask here instead of % bytes_long.
>

Sure, even if the machine code will be the same.

> > + *dest.u8++ = c;
> > +#endif
>
> I noticed you also used CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS on your
> memcpy patch, is it worth it here ? To begin with riscv doesn't set it
> and even if it did we are talking about a loop that will run just a few
> times to reach the alignment boundary (worst case scenario it'll run 7
> times), I don't think we gain much here, even for archs that have
> efficient unaligned access.

It doesn't _now_, but maybe in the future we will have a CPU which
handles unaligned accesses correctly!

--
per aspera ad upstream

2021-06-23 01:15:57

by Matteo Croce

[permalink] [raw]
Subject: Re: [PATCH v3 3/3] riscv: optimized memset

On Tue, Jun 22, 2021 at 10:38 AM David Laight <[email protected]> wrote:
>
> From: Nick Kossifidis
> > Sent: 22 June 2021 02:08
> >
> > Στις 2021-06-17 18:27, Matteo Croce έγραψε:
> > > +
> > > +void *__memset(void *s, int c, size_t count)
> > > +{
> > > + union types dest = { .u8 = s };
> > > +
> > > + if (count >= MIN_THRESHOLD) {
> > > + const int bytes_long = BITS_PER_LONG / 8;
> >
> > You could make 'const int bytes_long = BITS_PER_LONG / 8;'
>
> What is wrong with sizeof (long) ?
> ...

Nothing, I guess that BITS_PER_LONG is just (sizeof(long) * 8) anyway

> > > + unsigned long cu = (unsigned long)c;
> > > +
> > > + /* Compose an ulong with 'c' repeated 4/8 times */
> > > + cu |= cu << 8;
> > > + cu |= cu << 16;
> > > +#if BITS_PER_LONG == 64
> > > + cu |= cu << 32;
> > > +#endif
> > > +
> >
> > You don't have to create cu here, you'll fill dest buffer with 'c'
> > anyway so after filling up enough 'c's to be able to grab an aligned
> > word full of them from dest, you can just grab that word and keep
> > filling up dest with it.
>
> That will be a lot slower - especially if run on something like x86.
> A write-read of the same size is optimised by the store-load forwarder.
> But the byte write, word read will have to go via the cache.
>
> You can just write:
> cu = (unsigned long)c * 0x0101010101010101ull;
> and let the compiler sort out the best way to generate the constant.
>

Interesting. I see that most compilers do an integer multiplication,
is it faster than three shift and three or?

clang on riscv generates even more instructions to create the immediate:

unsigned long repeat_shift(int c)
{
unsigned long cu = (unsigned long)c;
cu |= cu << 8;
cu |= cu << 16;
cu |= cu << 32;

return cu;
}

unsigned long repeat_mul(int c)
{
return (unsigned long)c * 0x0101010101010101ull;
}

repeat_shift:
slli a1, a0, 8
or a0, a0, a1
slli a1, a0, 16
or a0, a0, a1
slli a1, a0, 32
or a0, a0, a1
ret

repeat_mul:
lui a1, 4112
addiw a1, a1, 257
slli a1, a1, 16
addi a1, a1, 257
slli a1, a1, 16
addi a1, a1, 257
mul a0, a0, a1
ret

> >
> > > +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
> > > + /* Fill the buffer one byte at time until the destination
> > > + * is aligned on a 32/64 bit boundary.
> > > + */
> > > + for (; count && dest.uptr % bytes_long; count--)
> >
> > You could reuse & mask here instead of % bytes_long.
> >
> > > + *dest.u8++ = c;
> > > +#endif
> >
> > I noticed you also used CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS on your
> > memcpy patch, is it worth it here ? To begin with riscv doesn't set it
> > and even if it did we are talking about a loop that will run just a few
> > times to reach the alignment boundary (worst case scenario it'll run 7
> > times), I don't think we gain much here, even for archs that have
> > efficient unaligned access.
>
> With CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS it probably isn't worth
> even checking the alignment.
> While aligning the copy will be quicker for an unaligned buffer they
> almost certainly don't happen often enough to worry about.
> In any case you'd want to do a misaligned word write to the start
> of the buffer - not separate byte writes.
> Provided the buffer is long enough you can also do a misaligned write
> to the end of the buffer before filling from the start.
>

I don't understand this one, a misaligned write here is ~30x slower
than an aligned one because it gets trapped and emulated in SBI.
How can this be convenient?

> I suspect you may need either barrier() or use a ptr to packed
> to avoid the perverted 'undefined behaviour' fubar.'
>

Which UB are you referring to?

Regards,
--
per aspera ad upstream

2021-06-23 09:07:48

by David Laight

[permalink] [raw]
Subject: RE: [PATCH v3 3/3] riscv: optimized memset

From: Matteo Croce
> Sent: 23 June 2021 02:15
>
> On Tue, Jun 22, 2021 at 10:38 AM David Laight <[email protected]> wrote:
> >
> > From: Nick Kossifidis
...
> > You can just write:
> > cu = (unsigned long)c * 0x0101010101010101ull;
> > and let the compiler sort out the best way to generate the constant.
> >
>
> Interesting. I see that most compilers do an integer multiplication,
> is it faster than three shift and three or?
>
> clang on riscv generates even more instructions to create the immediate:
>
> unsigned long repeat_shift(int c)
> {
> unsigned long cu = (unsigned long)c;
> cu |= cu << 8;
> cu |= cu << 16;
> cu |= cu << 32;
>
> return cu;
> }
>
> unsigned long repeat_mul(int c)
> {
> return (unsigned long)c * 0x0101010101010101ull;
> }
>
> repeat_shift:
> slli a1, a0, 8
> or a0, a0, a1
> slli a1, a0, 16
> or a0, a0, a1
> slli a1, a0, 32
> or a0, a0, a1
> ret
>
> repeat_mul:
> lui a1, 4112
> addiw a1, a1, 257
> slli a1, a1, 16
> addi a1, a1, 257
> slli a1, a1, 16
> addi a1, a1, 257
> mul a0, a0, a1
> ret

Hmmm... I expected the compiler to convert it to the first form.
It is also pretty crap at generating that constant.
Stupid compilers.

In any case, for the usual case of 'c' being a constant zero
you really don't want the latency of those instructions at all.

It is almost worth just pushing that expansion into the caller.

eg by having:
#define memset(p, v, l) memset_w(p, (v) * 0x0101010101010101, l)
(or some other byte replicator).

Really annoyingly you want to write the code that generates
the 64bit constant, and then have the compiler optimise away
the part that generates the high 32 bits on 32 bits systems.
But one of the compilers is going to 'bleat' about truncating
a constant value.
Stupid compilers (again).

David

-
Registered Address Lakeside, Bramley Road, Mount Farm, Milton Keynes, MK1 1PT, UK
Registration No: 1397386 (Wales)