From: Ling Ma <[email protected]>
In this patch we manage to reduce miss branch prediction by
avoiding using branch instructions and force destination to be aligned
with general 64bit instruction.
Below compared results shows we improve performance up to 1.8x
(We modified test suit from Ondra, send after this patch)
Bytes: ORG_TIME: NEW_TIME: ORG vs NEW:
7 0.51 0.48 1.06
16 0.55 0.38 1.44
18 0.61 0.44 1.38
21 0.62 0.47 1.31
25 0.64 0.45 1.42
30 0.65 0.45 1.44
36 0.66 0.44 1.50
38 0.67 0.46 1.45
62 0.70 0.44 1.59
75 0.71 0.44 1.61
85 0.73 0.46 1.58
120 0.78 0.44 1.77
193 0.81 0.46 1.76
245 0.84 0.52 1.61
256 0.83 0.45 1.84
356 0.86 0.55 1.56
601 0.98 0.65 1.50
958 1.14 0.81 1.40
1024 1.19 0.86 1.38
2048 1.69 1.34 1.26
Signed-off-by: Ling Ma <[email protected]>
---
arch/x86/include/asm/alternative-asm.h | 4 +-
arch/x86/lib/memset_64.S | 172 +++++++++++++++++++++------------
2 files changed, 110 insertions(+), 66 deletions(-)
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
index 372231c..aaac545 100644
--- a/arch/x86/include/asm/alternative-asm.h
+++ b/arch/x86/include/asm/alternative-asm.h
@@ -22,8 +22,8 @@
.long \orig - .
.long \alt - .
.word \feature
- .byte \orig_len
- .byte \alt_len
+ .word \orig_len
+ .word \alt_len
.endm
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 2dcb380..3eca27c 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -59,77 +59,121 @@
ENTRY(memset)
ENTRY(__memset)
CFI_STARTPROC
- movq %rdi,%r10
-
- /* expand byte value */
movzbl %sil,%ecx
- movabs $0x0101010101010101,%rax
- imulq %rcx,%rax
-
- /* align dst */
- movl %edi,%r9d
- andl $7,%r9d
- jnz .Lbad_alignment
- CFI_REMEMBER_STATE
-.Lafter_bad_alignment:
-
- movq %rdx,%rcx
- shrq $6,%rcx
- jz .Lhandle_tail
-
+ mov $0x0101010101010101,%rsi
+ imulq %rsi,%rcx
+ movq %rdi,%rax
+ lea (%rdi, %rdx), %r8
+ cmp $128, %rdx
+ ja .Lmore128bytes
+ cmp $64, %edx
+ jb .Lless_64bytes
+ /*
+ * Move data from 65 bytes to 128 bytes.
+ */
+ mov %rcx, 0x00(%rdi)
+ mov %rcx, 0x08(%rdi)
+ mov %rcx, 0x10(%rdi)
+ mov %rcx, 0x18(%rdi)
+ mov %rcx, 0x20(%rdi)
+ mov %rcx, 0x28(%rdi)
+ mov %rcx, 0x30(%rdi)
+ mov %rcx, 0x38(%rdi)
+ mov %rcx, -0x40(%r8)
+ mov %rcx, -0x38(%r8)
+ mov %rcx, -0x30(%r8)
+ mov %rcx, -0x28(%r8)
+ mov %rcx, -0x20(%r8)
+ mov %rcx, -0x18(%r8)
+ mov %rcx, -0x10(%r8)
+ mov %rcx, -0x08(%r8)
+ ret
.p2align 4
-.Lloop_64:
- decq %rcx
- movq %rax,(%rdi)
- movq %rax,8(%rdi)
- movq %rax,16(%rdi)
- movq %rax,24(%rdi)
- movq %rax,32(%rdi)
- movq %rax,40(%rdi)
- movq %rax,48(%rdi)
- movq %rax,56(%rdi)
- leaq 64(%rdi),%rdi
- jnz .Lloop_64
-
- /* Handle tail in loops. The loops should be faster than hard
- to predict jump tables. */
+.Lless_64bytes:
+ cmp $32, %edx
+ jb .Lless_32bytes
+ /*
+ * Move data from 33 bytes to 64 bytes.
+ */
+ mov %rcx, 0x00(%rdi)
+ mov %rcx, 0x08(%rdi)
+ mov %rcx, 0x10(%rdi)
+ mov %rcx, 0x18(%rdi)
+ mov %rcx, -0x20(%r8)
+ mov %rcx, -0x18(%r8)
+ mov %rcx, -0x10(%r8)
+ mov %rcx, -0x08(%r8)
+ ret
.p2align 4
-.Lhandle_tail:
- movl %edx,%ecx
- andl $63&(~7),%ecx
- jz .Lhandle_7
- shrl $3,%ecx
+.Lless_32bytes:
+ cmp $16, %edx
+ jb .Lless_16bytes
+ mov %rcx, 0x00(%rdi)
+ mov %rcx, 0x08(%rdi)
+ mov %rcx, -0x10(%r8)
+ mov %rcx, -0x08(%r8)
+ ret
.p2align 4
-.Lloop_8:
- decl %ecx
- movq %rax,(%rdi)
- leaq 8(%rdi),%rdi
- jnz .Lloop_8
-
-.Lhandle_7:
- andl $7,%edx
- jz .Lende
+.Lless_16bytes:
+ cmp $8, %edx
+ jb .Lless_8bytes
+ mov %rcx, (%rdi)
+ mov %rcx, -0x08(%r8)
+ ret
.p2align 4
-.Lloop_1:
- decl %edx
- movb %al,(%rdi)
- leaq 1(%rdi),%rdi
- jnz .Lloop_1
-
-.Lende:
- movq %r10,%rax
+.Lless_8bytes:
+ cmp $4, %edx
+ jb .Lless_4bytes
+ mov %ecx, (%rdi)
+ mov %ecx, -0x04(%r8)
+ .p2align 4
+.Lless_4bytes:
+ cmp $2, %edx
+ jb .Lless_2bytes
+ mov %cx, (%rdi)
+ mov %cx, -0x02(%r8)
+ ret
+ .p2align 4
+.Lless_2bytes:
+ cmp $1, %edx
+ jb .Lless_1bytes
+ mov %cl, (%rdi)
+.Lless_1bytes:
ret
- CFI_RESTORE_STATE
-.Lbad_alignment:
- cmpq $7,%rdx
- jbe .Lhandle_7
- movq %rax,(%rdi) /* unaligned store */
- movq $8,%r8
- subq %r9,%r8
- addq %r8,%rdi
- subq %r8,%rdx
- jmp .Lafter_bad_alignment
+ .p2align 4
+.Lmore128bytes:
+ mov %rcx, (%rdi)
+ mov %rdi, %r9
+ and $-0x08, %rdi
+ add $0x08, %rdi
+ sub %rdi, %r9
+ add %r9, %rdx
+ sub $0x40, %rdx
+.Lgobble_64_loop:
+ mov %rcx, 0x00(%rdi)
+ mov %rcx, 0x08(%rdi)
+ mov %rcx, 0x10(%rdi)
+ mov %rcx, 0x18(%rdi)
+ mov %rcx, 0x20(%rdi)
+ mov %rcx, 0x28(%rdi)
+ mov %rcx, 0x30(%rdi)
+ mov %rcx, 0x38(%rdi)
+ lea 0x40(%rdi), %rdi
+ sub $0x40, %rdx
+ jae .Lgobble_64_loop
+ /*
+ * Move data from 0 bytes to 63 bytes.
+ */
+ mov %rcx, -0x40(%r8)
+ mov %rcx, -0x38(%r8)
+ mov %rcx, -0x30(%r8)
+ mov %rcx, -0x28(%r8)
+ mov %rcx, -0x20(%r8)
+ mov %rcx, -0x18(%r8)
+ mov %rcx, -0x10(%r8)
+ mov %rcx, -0x08(%r8)
+ ret
.Lfinal:
CFI_ENDPROC
ENDPROC(memset)
--
1.8.1.4
Append test suit
after tar, run ./test command please.
thanks
2014-04-07 22:50 GMT+08:00, [email protected]
<[email protected]>:
> From: Ling Ma <[email protected]>
>
> In this patch we manage to reduce miss branch prediction by
> avoiding using branch instructions and force destination to be aligned
> with general 64bit instruction.
> Below compared results shows we improve performance up to 1.8x
> (We modified test suit from Ondra, send after this patch)
>
> Bytes: ORG_TIME: NEW_TIME: ORG vs NEW:
> 7 0.51 0.48 1.06
> 16 0.55 0.38 1.44
> 18 0.61 0.44 1.38
> 21 0.62 0.47 1.31
> 25 0.64 0.45 1.42
> 30 0.65 0.45 1.44
> 36 0.66 0.44 1.50
> 38 0.67 0.46 1.45
> 62 0.70 0.44 1.59
> 75 0.71 0.44 1.61
> 85 0.73 0.46 1.58
> 120 0.78 0.44 1.77
> 193 0.81 0.46 1.76
> 245 0.84 0.52 1.61
> 256 0.83 0.45 1.84
> 356 0.86 0.55 1.56
> 601 0.98 0.65 1.50
> 958 1.14 0.81 1.40
> 1024 1.19 0.86 1.38
> 2048 1.69 1.34 1.26
> Signed-off-by: Ling Ma <[email protected]>
> ---
> arch/x86/include/asm/alternative-asm.h | 4 +-
> arch/x86/lib/memset_64.S | 172
> +++++++++++++++++++++------------
> 2 files changed, 110 insertions(+), 66 deletions(-)
>
> diff --git a/arch/x86/include/asm/alternative-asm.h
> b/arch/x86/include/asm/alternative-asm.h
> index 372231c..aaac545 100644
> --- a/arch/x86/include/asm/alternative-asm.h
> +++ b/arch/x86/include/asm/alternative-asm.h
> @@ -22,8 +22,8 @@
> .long \orig - .
> .long \alt - .
> .word \feature
> - .byte \orig_len
> - .byte \alt_len
> + .word \orig_len
> + .word \alt_len
> .endm
>
> #endif /* __ASSEMBLY__ */
> diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
> index 2dcb380..3eca27c 100644
> --- a/arch/x86/lib/memset_64.S
> +++ b/arch/x86/lib/memset_64.S
> @@ -59,77 +59,121 @@
> ENTRY(memset)
> ENTRY(__memset)
> CFI_STARTPROC
> - movq %rdi,%r10
> -
> - /* expand byte value */
> movzbl %sil,%ecx
> - movabs $0x0101010101010101,%rax
> - imulq %rcx,%rax
> -
> - /* align dst */
> - movl %edi,%r9d
> - andl $7,%r9d
> - jnz .Lbad_alignment
> - CFI_REMEMBER_STATE
> -.Lafter_bad_alignment:
> -
> - movq %rdx,%rcx
> - shrq $6,%rcx
> - jz .Lhandle_tail
> -
> + mov $0x0101010101010101,%rsi
> + imulq %rsi,%rcx
> + movq %rdi,%rax
> + lea (%rdi, %rdx), %r8
> + cmp $128, %rdx
> + ja .Lmore128bytes
> + cmp $64, %edx
> + jb .Lless_64bytes
> + /*
> + * Move data from 65 bytes to 128 bytes.
> + */
> + mov %rcx, 0x00(%rdi)
> + mov %rcx, 0x08(%rdi)
> + mov %rcx, 0x10(%rdi)
> + mov %rcx, 0x18(%rdi)
> + mov %rcx, 0x20(%rdi)
> + mov %rcx, 0x28(%rdi)
> + mov %rcx, 0x30(%rdi)
> + mov %rcx, 0x38(%rdi)
> + mov %rcx, -0x40(%r8)
> + mov %rcx, -0x38(%r8)
> + mov %rcx, -0x30(%r8)
> + mov %rcx, -0x28(%r8)
> + mov %rcx, -0x20(%r8)
> + mov %rcx, -0x18(%r8)
> + mov %rcx, -0x10(%r8)
> + mov %rcx, -0x08(%r8)
> + ret
> .p2align 4
> -.Lloop_64:
> - decq %rcx
> - movq %rax,(%rdi)
> - movq %rax,8(%rdi)
> - movq %rax,16(%rdi)
> - movq %rax,24(%rdi)
> - movq %rax,32(%rdi)
> - movq %rax,40(%rdi)
> - movq %rax,48(%rdi)
> - movq %rax,56(%rdi)
> - leaq 64(%rdi),%rdi
> - jnz .Lloop_64
> -
> - /* Handle tail in loops. The loops should be faster than hard
> - to predict jump tables. */
> +.Lless_64bytes:
> + cmp $32, %edx
> + jb .Lless_32bytes
> + /*
> + * Move data from 33 bytes to 64 bytes.
> + */
> + mov %rcx, 0x00(%rdi)
> + mov %rcx, 0x08(%rdi)
> + mov %rcx, 0x10(%rdi)
> + mov %rcx, 0x18(%rdi)
> + mov %rcx, -0x20(%r8)
> + mov %rcx, -0x18(%r8)
> + mov %rcx, -0x10(%r8)
> + mov %rcx, -0x08(%r8)
> + ret
> .p2align 4
> -.Lhandle_tail:
> - movl %edx,%ecx
> - andl $63&(~7),%ecx
> - jz .Lhandle_7
> - shrl $3,%ecx
> +.Lless_32bytes:
> + cmp $16, %edx
> + jb .Lless_16bytes
> + mov %rcx, 0x00(%rdi)
> + mov %rcx, 0x08(%rdi)
> + mov %rcx, -0x10(%r8)
> + mov %rcx, -0x08(%r8)
> + ret
> .p2align 4
> -.Lloop_8:
> - decl %ecx
> - movq %rax,(%rdi)
> - leaq 8(%rdi),%rdi
> - jnz .Lloop_8
> -
> -.Lhandle_7:
> - andl $7,%edx
> - jz .Lende
> +.Lless_16bytes:
> + cmp $8, %edx
> + jb .Lless_8bytes
> + mov %rcx, (%rdi)
> + mov %rcx, -0x08(%r8)
> + ret
> .p2align 4
> -.Lloop_1:
> - decl %edx
> - movb %al,(%rdi)
> - leaq 1(%rdi),%rdi
> - jnz .Lloop_1
> -
> -.Lende:
> - movq %r10,%rax
> +.Lless_8bytes:
> + cmp $4, %edx
> + jb .Lless_4bytes
> + mov %ecx, (%rdi)
> + mov %ecx, -0x04(%r8)
> + .p2align 4
> +.Lless_4bytes:
> + cmp $2, %edx
> + jb .Lless_2bytes
> + mov %cx, (%rdi)
> + mov %cx, -0x02(%r8)
> + ret
> + .p2align 4
> +.Lless_2bytes:
> + cmp $1, %edx
> + jb .Lless_1bytes
> + mov %cl, (%rdi)
> +.Lless_1bytes:
> ret
>
> - CFI_RESTORE_STATE
> -.Lbad_alignment:
> - cmpq $7,%rdx
> - jbe .Lhandle_7
> - movq %rax,(%rdi) /* unaligned store */
> - movq $8,%r8
> - subq %r9,%r8
> - addq %r8,%rdi
> - subq %r8,%rdx
> - jmp .Lafter_bad_alignment
> + .p2align 4
> +.Lmore128bytes:
> + mov %rcx, (%rdi)
> + mov %rdi, %r9
> + and $-0x08, %rdi
> + add $0x08, %rdi
> + sub %rdi, %r9
> + add %r9, %rdx
> + sub $0x40, %rdx
> +.Lgobble_64_loop:
> + mov %rcx, 0x00(%rdi)
> + mov %rcx, 0x08(%rdi)
> + mov %rcx, 0x10(%rdi)
> + mov %rcx, 0x18(%rdi)
> + mov %rcx, 0x20(%rdi)
> + mov %rcx, 0x28(%rdi)
> + mov %rcx, 0x30(%rdi)
> + mov %rcx, 0x38(%rdi)
> + lea 0x40(%rdi), %rdi
> + sub $0x40, %rdx
> + jae .Lgobble_64_loop
> + /*
> + * Move data from 0 bytes to 63 bytes.
> + */
> + mov %rcx, -0x40(%r8)
> + mov %rcx, -0x38(%r8)
> + mov %rcx, -0x30(%r8)
> + mov %rcx, -0x28(%r8)
> + mov %rcx, -0x20(%r8)
> + mov %rcx, -0x18(%r8)
> + mov %rcx, -0x10(%r8)
> + mov %rcx, -0x08(%r8)
> + ret
> .Lfinal:
> CFI_ENDPROC
> ENDPROC(memset)
> --
> 1.8.1.4
>
>
[email protected] writes:
> From: Ling Ma <[email protected]>
>
> In this patch we manage to reduce miss branch prediction by
> avoiding using branch instructions and force destination to be aligned
> with general 64bit instruction.
> Below compared results shows we improve performance up to 1.8x
> (We modified test suit from Ondra, send after this patch)
You didn't specify the CPU?
I assume it's some Atom, as nothing else uses these open coded functions
anymore?
-Andi
--
[email protected] -- Speaking for myself only
Andi,
The below is compared result on older machine(cpu info is attached):
That shows new code get better performance up to 1.6x.
Bytes: ORG_TIME: NEW_TIME: ORG vs NEW:
7 0.87 0.76 1.14
16 0.99 0.68 1.45
18 1.07 0.77 1.38
21 1.09 0.78 1.39
25 1.11 0.77 1.44
30 1.12 0.73 1.53
36 1.15 0.75 1.53
38 1.12 0.75 1.49
62 1.18 0.77 1.53
75 1.25 0.79 1.58
85 1.28 0.80 1.60
120 1.33 0.82 1.62
193 1.45 0.88 1.64
245 1.48 0.96 1.54
256 1.45 0.90 1.61
356 1.61 1.02 1.57
601 1.78 1.22 1.45
958 2.04 1.47 1.38
1024 2.07 1.48 1.39
2048 2.80 2.21 1.26
Thanks
Ling
2014-04-08 0:42 GMT+08:00, Andi Kleen <[email protected]>:
> [email protected] writes:
>
>> From: Ling Ma <[email protected]>
>>
>> In this patch we manage to reduce miss branch prediction by
>> avoiding using branch instructions and force destination to be aligned
>> with general 64bit instruction.
>> Below compared results shows we improve performance up to 1.8x
>> (We modified test suit from Ondra, send after this patch)
>
> You didn't specify the CPU?
>
> I assume it's some Atom, as nothing else uses these open coded functions
> anymore?
>
> -Andi
>
> --
> [email protected] -- Speaking for myself only
>
Any further comments ?
Thanks
Ling
2014-04-08 22:00 GMT+08:00, Ling Ma <[email protected]>:
> Andi,
>
> The below is compared result on older machine(cpu info is attached):
> That shows new code get better performance up to 1.6x.
>
> Bytes: ORG_TIME: NEW_TIME: ORG vs NEW:
> 7 0.87 0.76 1.14
> 16 0.99 0.68 1.45
> 18 1.07 0.77 1.38
> 21 1.09 0.78 1.39
> 25 1.11 0.77 1.44
> 30 1.12 0.73 1.53
> 36 1.15 0.75 1.53
> 38 1.12 0.75 1.49
> 62 1.18 0.77 1.53
> 75 1.25 0.79 1.58
> 85 1.28 0.80 1.60
> 120 1.33 0.82 1.62
> 193 1.45 0.88 1.64
> 245 1.48 0.96 1.54
> 256 1.45 0.90 1.61
> 356 1.61 1.02 1.57
> 601 1.78 1.22 1.45
> 958 2.04 1.47 1.38
> 1024 2.07 1.48 1.39
> 2048 2.80 2.21 1.26
>
> Thanks
> Ling
>
> 2014-04-08 0:42 GMT+08:00, Andi Kleen <[email protected]>:
>> [email protected] writes:
>>
>>> From: Ling Ma <[email protected]>
>>>
>>> In this patch we manage to reduce miss branch prediction by
>>> avoiding using branch instructions and force destination to be aligned
>>> with general 64bit instruction.
>>> Below compared results shows we improve performance up to 1.8x
>>> (We modified test suit from Ondra, send after this patch)
>>
>> You didn't specify the CPU?
>>
>> I assume it's some Atom, as nothing else uses these open coded functions
>> anymore?
>>
>> -Andi
>>
>> --
>> [email protected] -- Speaking for myself only
>>
>
On Sun, Apr 13, 2014 at 11:11:59PM +0800, Ling Ma wrote:
> Any further comments ?
It would be good to test on some more machines that you don't cause
regressions.
But I'm not aware of any workload that is doing a lot of memset (not
counting clear_page). Copies likely matter much more.
-Andi
The kernel version 3.14 shows memcpy, memset occur 19622 and 14189
times respectively.
so memset is still important for us, correct?
Thanks
Ling
2014-04-14 6:03 GMT+08:00, Andi Kleen <[email protected]>:
> On Sun, Apr 13, 2014 at 11:11:59PM +0800, Ling Ma wrote:
>> Any further comments ?
>
> It would be good to test on some more machines that you don't cause
> regressions.
>
> But I'm not aware of any workload that is doing a lot of memset (not
> counting clear_page). Copies likely matter much more.
>
> -Andi
>
On Mon, Apr 14, 2014 at 09:31:16PM +0800, Ling Ma wrote:
> The kernel version 3.14 shows memcpy, memset occur 19622 and 14189
> times respectively.
> so memset is still important for us, correct?
Did you ever see it in a profile log as being hot?
I haven't. Static counts don't mean much.
-Andi