2007-08-17 23:36:08

by Stephen Hemminger

[permalink] [raw]
Subject: [PATCH] x86-64: memset optimization

Optimize uses of memset with small constant offsets.
This will generate smaller code, and avoid the slow rep/string instructions.
Code copied from i386 with a little cleanup.

Signed-off-by: Stephen Hemminger <[email protected]>

--- a/include/asm-x86_64/string.h 2007-08-17 15:14:32.000000000 -0700
+++ b/include/asm-x86_64/string.h 2007-08-17 15:36:30.000000000 -0700
@@ -42,9 +42,51 @@ extern void *__memcpy(void *to, const vo
__ret = __builtin_memcpy((dst),(src),__len); \
__ret; })
#endif
-
#define __HAVE_ARCH_MEMSET
-void *memset(void *s, int c, size_t n);
+void *__memset(void *s, int c, size_t n);
+
+/* Optimize for cases of trivial memset's
+ * Compiler should optimize away all but the case used.
+ */
+static __always_inline void *
+__constant_c_and_count_memset(void *s, int c, size_t count)
+{
+ unsigned long pattern = 0x01010101UL * (unsigned char) c;
+
+ switch (count) {
+ case 0:
+ return s;
+ case 1:
+ *(unsigned char *)s = pattern;
+ return s;
+ case 2:
+ *(unsigned short *)s = pattern;
+ return s;
+ case 3:
+ *(unsigned short *)s = pattern;
+ *(2+(unsigned char *)s) = pattern;
+ return s;
+ case 4:
+ *(unsigned long *)s = pattern;
+ return s;
+ case 6:
+ *(unsigned long *)s = pattern;
+ *(2+(unsigned short *)s) = pattern;
+ return s;
+ case 8:
+ *(unsigned long *)s = pattern;
+ *(1+(unsigned long *)s) = pattern;
+ return s;
+ default:
+ return __memset(s, c, count);
+ }
+}
+#define memset(s, c, count) \
+ (__builtin_constant_p(c) \
+ ? __constant_c_and_count_memset((s),(c),(count)) \
+ : __memset((s),(c),(count)))
+
+

#define __HAVE_ARCH_MEMMOVE
void * memmove(void * dest,const void *src,size_t count);
--- a/arch/x86_64/kernel/x8664_ksyms.c 2007-08-17 15:14:32.000000000 -0700
+++ b/arch/x86_64/kernel/x8664_ksyms.c 2007-08-17 15:44:58.000000000 -0700
@@ -48,10 +48,12 @@ EXPORT_SYMBOL(__read_lock_failed);
#undef memmove

extern void * memset(void *,int,__kernel_size_t);
+extern void * __memset(void *,int,__kernel_size_t);
extern void * memcpy(void *,const void *,__kernel_size_t);
extern void * __memcpy(void *,const void *,__kernel_size_t);

EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(__memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(__memcpy);


2007-08-18 07:18:25

by Eric Dumazet

[permalink] [raw]
Subject: Re: [PATCH] x86-64: memset optimization

Stephen Hemminger a ?crit :
> Optimize uses of memset with small constant offsets.
> This will generate smaller code, and avoid the slow rep/string instructions.
> Code copied from i386 with a little cleanup.
>

You obviously didnt test it, did you ?

How can you be sure this is going to speedup things then ?

> Signed-off-by: Stephen Hemminger <[email protected]>
>
> --- a/include/asm-x86_64/string.h 2007-08-17 15:14:32.000000000 -0700
> +++ b/include/asm-x86_64/string.h 2007-08-17 15:36:30.000000000 -0700
> @@ -42,9 +42,51 @@ extern void *__memcpy(void *to, const vo
> __ret = __builtin_memcpy((dst),(src),__len); \
> __ret; })
> #endif
> -
> #define __HAVE_ARCH_MEMSET
> -void *memset(void *s, int c, size_t n);
> +void *__memset(void *s, int c, size_t n);
> +
> +/* Optimize for cases of trivial memset's
> + * Compiler should optimize away all but the case used.
> + */
> +static __always_inline void *
> +__constant_c_and_count_memset(void *s, int c, size_t count)
> +{
> + unsigned long pattern = 0x01010101UL * (unsigned char) c;

Main difference between x86_64 and i386 is sizeof(long) being 8 instead of 4

Why not let gcc do its job about memset() ?

On x86_64 at least, modern gcc are smart enough.

> +
> + switch (count) {
> + case 0:
> + return s;
> + case 1:
> + *(unsigned char *)s = pattern;
> + return s;
> + case 2:
> + *(unsigned short *)s = pattern;
> + return s;
> + case 3:
> + *(unsigned short *)s = pattern;
> + *(2+(unsigned char *)s) = pattern;
> + return s;
> + case 4:
> + *(unsigned long *)s = pattern;
> + return s;
> + case 6:
> + *(unsigned long *)s = pattern;
> + *(2+(unsigned short *)s) = pattern;
> + return s;
> + case 8:
> + *(unsigned long *)s = pattern;
> + *(1+(unsigned long *)s) = pattern;
> + return s;
> + default:
> + return __memset(s, c, count);
> + }
> +}
> +#define memset(s, c, count) \
> + (__builtin_constant_p(c) \
> + ? __constant_c_and_count_memset((s),(c),(count)) \
> + : __memset((s),(c),(count)))
> +
> +
>
> #define __HAVE_ARCH_MEMMOVE
> void * memmove(void * dest,const void *src,size_t count);
> --- a/arch/x86_64/kernel/x8664_ksyms.c 2007-08-17 15:14:32.000000000 -0700
> +++ b/arch/x86_64/kernel/x8664_ksyms.c 2007-08-17 15:44:58.000000000 -0700
> @@ -48,10 +48,12 @@ EXPORT_SYMBOL(__read_lock_failed);
> #undef memmove
>
> extern void * memset(void *,int,__kernel_size_t);
> +extern void * __memset(void *,int,__kernel_size_t);
> extern void * memcpy(void *,const void *,__kernel_size_t);
> extern void * __memcpy(void *,const void *,__kernel_size_t);
>
> EXPORT_SYMBOL(memset);
> +EXPORT_SYMBOL(__memset);
> EXPORT_SYMBOL(memcpy);
> EXPORT_SYMBOL(__memcpy);
>

2007-08-18 09:50:26

by Andi Kleen

[permalink] [raw]
Subject: Re: [PATCH] x86-64: memset optimization

On Saturday 18 August 2007 01:34:46 Stephen Hemminger wrote:
> Optimize uses of memset with small constant offsets.
> This will generate smaller code, and avoid the slow rep/string instructions.
> Code copied from i386 with a little cleanup.


Newer gcc should do all this on its own. That is why I intentionally
didn't implement it on 64bit.

On what compiler version did you see smaller code?

-Andi


>
> Signed-off-by: Stephen Hemminger <[email protected]>
>
> --- a/include/asm-x86_64/string.h 2007-08-17 15:14:32.000000000 -0700
> +++ b/include/asm-x86_64/string.h 2007-08-17 15:36:30.000000000 -0700
> @@ -42,9 +42,51 @@ extern void *__memcpy(void *to, const vo
> __ret = __builtin_memcpy((dst),(src),__len); \
> __ret; })
> #endif
> -
> #define __HAVE_ARCH_MEMSET
> -void *memset(void *s, int c, size_t n);
> +void *__memset(void *s, int c, size_t n);
> +
> +/* Optimize for cases of trivial memset's
> + * Compiler should optimize away all but the case used.
> + */
> +static __always_inline void *
> +__constant_c_and_count_memset(void *s, int c, size_t count)
> +{
> + unsigned long pattern = 0x01010101UL * (unsigned char) c;
> +
> + switch (count) {
> + case 0:
> + return s;
> + case 1:
> + *(unsigned char *)s = pattern;
> + return s;
> + case 2:
> + *(unsigned short *)s = pattern;
> + return s;
> + case 3:
> + *(unsigned short *)s = pattern;
> + *(2+(unsigned char *)s) = pattern;
> + return s;
> + case 4:
> + *(unsigned long *)s = pattern;
> + return s;
> + case 6:
> + *(unsigned long *)s = pattern;
> + *(2+(unsigned short *)s) = pattern;
> + return s;
> + case 8:
> + *(unsigned long *)s = pattern;
> + *(1+(unsigned long *)s) = pattern;
> + return s;
> + default:
> + return __memset(s, c, count);
> + }
> +}
> +#define memset(s, c, count) \
> + (__builtin_constant_p(c) \
> + ? __constant_c_and_count_memset((s),(c),(count)) \
> + : __memset((s),(c),(count)))
> +
> +
>
> #define __HAVE_ARCH_MEMMOVE
> void * memmove(void * dest,const void *src,size_t count);
> --- a/arch/x86_64/kernel/x8664_ksyms.c 2007-08-17 15:14:32.000000000 -0700
> +++ b/arch/x86_64/kernel/x8664_ksyms.c 2007-08-17 15:44:58.000000000 -0700
> @@ -48,10 +48,12 @@ EXPORT_SYMBOL(__read_lock_failed);
> #undef memmove
>
> extern void * memset(void *,int,__kernel_size_t);
> +extern void * __memset(void *,int,__kernel_size_t);
> extern void * memcpy(void *,const void *,__kernel_size_t);
> extern void * __memcpy(void *,const void *,__kernel_size_t);
>
> EXPORT_SYMBOL(memset);
> +EXPORT_SYMBOL(__memset);
> EXPORT_SYMBOL(memcpy);
> EXPORT_SYMBOL(__memcpy);
>
>


2007-08-18 14:56:24

by Stephen Hemminger

[permalink] [raw]
Subject: Re: [PATCH] x86-64: memset optimization

On Sat, 18 Aug 2007 11:46:24 +0200
Andi Kleen <[email protected]> wrote:

> On Saturday 18 August 2007 01:34:46 Stephen Hemminger wrote:
> > Optimize uses of memset with small constant offsets.
> > This will generate smaller code, and avoid the slow rep/string instructions.
> > Code copied from i386 with a little cleanup.
>
>
> Newer gcc should do all this on its own. That is why I intentionally
> didn't implement it on 64bit.
>
> On what compiler version did you see smaller code?
>
> -Andi
>

The problem is that on x86-64 you are overriding memset() so the builtin
version doesn't kick in. You allow gcc to inline memcpy but not memset.

What about adding code similar to memcpy() stuff.

--- a/include/asm-x86_64/string.h 2007-08-18 07:37:58.000000000 -0700
+++ b/include/asm-x86_64/string.h 2007-08-18 07:44:31.000000000 -0700
@@ -43,8 +43,13 @@ extern void *__memcpy(void *to, const vo
__ret; })
#endif

-#define __HAVE_ARCH_MEMSET
-void *memset(void *s, int c, size_t n);
+#define __HAVE_ARCH_MEMSET 1
+#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4
+extern void memset(void *s, int c, size_t n);
+#else
+#define memset(s, c, n) __builtin_memset((s),(c),(n))
+#endif
+

#define __HAVE_ARCH_MEMMOVE
void * memmove(void * dest,const void *src,size_t count);

2007-08-18 18:55:41

by Andi Kleen

[permalink] [raw]
Subject: Re: [PATCH] x86-64: memset optimization


> The problem is that on x86-64 you are overriding memset()

I don't. You must be looking at old source

asm-x86_64/string.h 2.6.23rc3:

#define __HAVE_ARCH_MEMSET
void *memset(void *s, int c, size_t n);

I wanted to do the same on i386 too, but there were some minor obstacles.
The problem is that the out of line fallback i386 memset is currently
quite dumb and needs to be rewritten to expand the fill char on its
own like the x86-64 version. Probably best would be just to port
the x86-64 version. I just hadn't had time for that.

[Patches welcome, but if you do ask me for my old memset test harness]

-Andi

2007-08-19 16:37:53

by Stephen Hemminger

[permalink] [raw]
Subject: Re: [PATCH] x86-64: memset optimization

On Sat, 18 Aug 2007 20:55:11 +0200
Andi Kleen <[email protected]> wrote:

>
> > The problem is that on x86-64 you are overriding memset()
>
> I don't. You must be looking at old source
>
> asm-x86_64/string.h 2.6.23rc3:
>
> #define __HAVE_ARCH_MEMSET
> void *memset(void *s, int c, size_t n);
>
> I wanted to do the same on i386 too, but there were some minor obstacles.
> The problem is that the out of line fallback i386 memset is currently
> quite dumb and needs to be rewritten to expand the fill char on its
> own like the x86-64 version. Probably best would be just to port
> the x86-64 version. I just hadn't had time for that.
>
> [Patches welcome, but if you do ask me for my old memset test harness]
>
> -Andi

I am looking at current source, built with current (non-experimental) GCC
from Fedora Core 7. If I dissassemble ether_setup, which is

void ether_setup(struct net_device *dev)
{
...

memset(dev->broadcast, 0xFF, ETH_ALEN);
}

I see a tail recursion (jmp) to memset which is the code in arch/x86_64/lib/memset.S

2007-08-19 18:24:51

by Andi Kleen

[permalink] [raw]
Subject: Re: [discuss] [PATCH] x86-64: memset optimization


> I am looking at current source, built with current (non-experimental) GCC
> from Fedora Core 7. If I dissassemble ether_setup, which is
>
> void ether_setup(struct net_device *dev)
> {
> ...
>
> memset(dev->broadcast, 0xFF, ETH_ALEN);
> }
>
> I see a tail recursion (jmp) to memset which is the code in arch/x86_64/lib/memset.S

That is likely gcc then deciding it can't use an inline memset for some reason.
It does that for example if it can't figure out the alignment or similar.
Honza (cc'ed) can probably give you more details why it happens, especially if you
give him a preprocessed self contained test case.

A simple example like
char x[6];

f()
{
memset(x, 1, 6);
}

gives with gcc 4.1:

.text
.p2align 4,,15
.globl f
.type f, @function
f:
.LFB2:
movl $16843009, x(%rip)
movw $257, x+4(%rip)
ret
.LFE2:

-Andi

2007-08-20 15:53:46

by Stephen Hemminger

[permalink] [raw]
Subject: Re: [discuss] [PATCH] x86-64: memset optimization

On Sun, 19 Aug 2007 20:24:24 +0200
Andi Kleen <[email protected]> wrote:

>
> > I am looking at current source, built with current (non-experimental) GCC
> > from Fedora Core 7. If I dissassemble ether_setup, which is
> >
> > void ether_setup(struct net_device *dev)
> > {
> > ...
> >
> > memset(dev->broadcast, 0xFF, ETH_ALEN);
> > }
> >
> > I see a tail recursion (jmp) to memset which is the code in arch/x86_64/lib/memset.S
>
> That is likely gcc then deciding it can't use an inline memset for some reason.
> It does that for example if it can't figure out the alignment or similar.
> Honza (cc'ed) can probably give you more details why it happens, especially if you
> give him a preprocessed self contained test case.
>
> A simple example like
> char x[6];
>
> f()
> {
> memset(x, 1, 6);
> }
>
> gives with gcc 4.1:
>
> .text
> .p2align 4,,15
> .globl f
> .type f, @function
> f:
> .LFB2:
> movl $16843009, x(%rip)
> movw $257, x+4(%rip)
> ret
> .LFE2:
>
> -Andi

The problem is with the optimization flags: passing -Os causes the compiler
to be stupid and not inline any memset/memcpy functions.


--
Stephen Hemminger <[email protected]>

2007-08-20 15:56:18

by Arjan van de Ven

[permalink] [raw]
Subject: Re: [discuss] [PATCH] x86-64: memset optimization


On Mon, 2007-08-20 at 08:52 -0700, Stephen Hemminger wrote:

> The problem is with the optimization flags: passing -Os causes the compiler
> to be stupid and not inline any memset/memcpy functions.

you get what you ask for.. if you don't want that then don't ask for
it ;)



2007-08-20 17:04:05

by Roland Dreier

[permalink] [raw]
Subject: Re: [discuss] [PATCH] x86-64: memset optimization

> > The problem is with the optimization flags: passing -Os causes the compiler
> > to be stupid and not inline any memset/memcpy functions.
>
> you get what you ask for.. if you don't want that then don't ask for
> it ;)

Well, the compiler is really being dumb about -Os and in fact it's
giving bigger code, so I'm not really getting what I ask for.

With my gcc at least (x86_64, gcc (GCC) 4.1.3 20070812 (prerelease)
(Ubuntu 4.1.2-15ubuntu2)) and Andi's example:

#include <string.h>

f(char x[6]) {
memset(x, 1, 6);
}

compiling with -O2 gives

0000000000000000 <f>:
0: c7 07 01 01 01 01 movl $0x1010101,(%rdi)
6: 66 c7 47 04 01 01 movw $0x101,0x4(%rdi)
c: c3 retq

and compiling with -Os gives

0000000000000000 <f>:
0: 48 83 ec 08 sub $0x8,%rsp
4: ba 06 00 00 00 mov $0x6,%edx
9: be 01 00 00 00 mov $0x1,%esi
e: e8 00 00 00 00 callq 13 <f+0x13>
13: 5a pop %rdx
14: c3 retq

so the code gets bigger and worse in every way.

- R.

2007-08-20 17:23:19

by Andi Kleen

[permalink] [raw]
Subject: Re: [discuss] [PATCH] x86-64: memset optimization

> so the code gets bigger and worse in every way.

I guess it would make sense to file this as a bug in the gcc
bugzilla

Another useful enhancement might be to have a -finline-string-functions
or similar that could be set with -Os. In general I've been wondering
for some time if the kernel really doesn't need some new optimization
setting somewhere between -Os and -O2. -Os occasionally has a few other
bad side effects too.

-Andi

2007-08-20 18:56:48

by Jan Hubicka

[permalink] [raw]
Subject: Re: [discuss] [PATCH] x86-64: memset optimization

> > > The problem is with the optimization flags: passing -Os causes the compiler
> > > to be stupid and not inline any memset/memcpy functions.
> >
> > you get what you ask for.. if you don't want that then don't ask for
> > it ;)
>
> Well, the compiler is really being dumb about -Os and in fact it's
> giving bigger code, so I'm not really getting what I ask for.
>
> With my gcc at least (x86_64, gcc (GCC) 4.1.3 20070812 (prerelease)
> (Ubuntu 4.1.2-15ubuntu2)) and Andi's example:
>
> #include <string.h>
>
> f(char x[6]) {
> memset(x, 1, 6);
> }
>
> compiling with -O2 gives
>
> 0000000000000000 <f>:
> 0: c7 07 01 01 01 01 movl $0x1010101,(%rdi)
> 6: 66 c7 47 04 01 01 movw $0x101,0x4(%rdi)
> c: c3 retq

GCC mainline (ie future GCC4.3.0) now give:
0000000000000000 <f>:
0: b0 01 mov $0x1,%al
2: b9 06 00 00 00 mov $0x6,%ecx
7: f3 aa rep stos %al,%es:(%rdi)
9: c3 retq
That is smallest, definitly not fastest.
GCC up to 4.3.0 won't be able to inline memset with non-0 operand...

Honza
>
> and compiling with -Os gives
>
> 0000000000000000 <f>:
> 0: 48 83 ec 08 sub $0x8,%rsp
> 4: ba 06 00 00 00 mov $0x6,%edx
> 9: be 01 00 00 00 mov $0x1,%esi
> e: e8 00 00 00 00 callq 13 <f+0x13>
> 13: 5a pop %rdx
> 14: c3 retq
>
> so the code gets bigger and worse in every way.
>
> - R.

2007-08-21 10:16:28

by Denys Vlasenko

[permalink] [raw]
Subject: Re: [discuss] [PATCH] x86-64: memset optimization

On Monday 20 August 2007 19:56, Jan Hubicka wrote:
> > > > The problem is with the optimization flags: passing -Os causes the
> > > > compiler to be stupid and not inline any memset/memcpy functions.
> > >
> > > you get what you ask for.. if you don't want that then don't ask for
> > > it ;)
> >
> > Well, the compiler is really being dumb about -Os and in fact it's
> > giving bigger code, so I'm not really getting what I ask for.
> >
> > With my gcc at least (x86_64, gcc (GCC) 4.1.3 20070812 (prerelease)
> > (Ubuntu 4.1.2-15ubuntu2)) and Andi's example:
> >
> > #include <string.h>
> >
> > f(char x[6]) {
> > memset(x, 1, 6);
> > }
> >
> > compiling with -O2 gives
> >
> > 0000000000000000 <f>:
> > 0: c7 07 01 01 01 01 movl $0x1010101,(%rdi)
> > 6: 66 c7 47 04 01 01 movw $0x101,0x4(%rdi)
> > c: c3 retq
>
> GCC mainline (ie future GCC4.3.0) now give:
> 0000000000000000 <f>:
> 0: b0 01 mov $0x1,%al
> 2: b9 06 00 00 00 mov $0x6,%ecx
> 7: f3 aa rep stos %al,%es:(%rdi)
> 9: c3 retq
> That is smallest, definitly not fastest.
> GCC up to 4.3.0 won't be able to inline memset with non-0 operand...

No, it's not smallest. This one is smaller by 1 byte, maybe faster
(rep ... prefix is microcoded -> slower) and frees %ecx for other uses:

mov $0x01010101,%eax # 5 bytes
stosl # 1 byte
stosw # 2 bytes
retq
--
vda