Skip to content

Commit 2d9ee32

Browse files
justdoitqdmpe
authored andcommitted
powerpc/64: Align bytes before fall back to .Lshort in powerpc64 memcmp()
Currently memcmp() 64bytes version in powerpc will fall back to .Lshort (compare per byte mode) if either src or dst address is not 8 bytes aligned. It can be opmitized in 2 situations: 1) if both addresses are with the same offset with 8 bytes boundary: memcmp() can compare the unaligned bytes within 8 bytes boundary firstly and then compare the rest 8-bytes-aligned content with .Llong mode. 2) If src/dst addrs are not with the same offset of 8 bytes boundary: memcmp() can align src addr with 8 bytes, increment dst addr accordingly, then load src with aligned mode and load dst with unaligned mode. This patch optmizes memcmp() behavior in the above 2 situations. Tested with both little/big endian. Performance result below is based on little endian. Following is the test result with src/dst having the same offset case: (a similar result was observed when src/dst having different offset): (1) 256 bytes Test with the existing tools/testing/selftests/powerpc/stringloops/memcmp: - without patch 29.773018302 seconds time elapsed ( +- 0.09% ) - with patch 16.485568173 seconds time elapsed ( +- 0.02% ) -> There is ~+80% percent improvement (2) 32 bytes To observe performance impact on < 32 bytes, modify tools/testing/selftests/powerpc/stringloops/memcmp.c with following: ------- #include <string.h> #include "utils.h" -#define SIZE 256 +#define SIZE 32 #define ITERATIONS 10000 int test_memcmp(const void *s1, const void *s2, size_t n); -------- - Without patch 0.244746482 seconds time elapsed ( +- 0.36%) - with patch 0.215069477 seconds time elapsed ( +- 0.51%) -> There is ~+13% improvement (3) 0~8 bytes To observe <8 bytes performance impact, modify tools/testing/selftests/powerpc/stringloops/memcmp.c with following: ------- #include <string.h> #include "utils.h" -#define SIZE 256 -#define ITERATIONS 10000 +#define SIZE 8 +#define ITERATIONS 1000000 int test_memcmp(const void *s1, const void *s2, size_t n); ------- - Without patch 1.845642503 seconds time elapsed ( +- 0.12% ) - With patch 1.849767135 seconds time elapsed ( +- 0.26% ) -> They are nearly the same. (-0.2%) Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
1 parent ca42d8d commit 2d9ee32

File tree

1 file changed

+133
-7
lines changed

1 file changed

+133
-7
lines changed

arch/powerpc/lib/memcmp_64.S

Lines changed: 133 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -24,28 +24,41 @@
2424
#define rH r31
2525

2626
#ifdef __LITTLE_ENDIAN__
27+
#define LH lhbrx
28+
#define LW lwbrx
2729
#define LD ldbrx
2830
#else
31+
#define LH lhzx
32+
#define LW lwzx
2933
#define LD ldx
3034
#endif
3135

36+
/*
37+
* There are 2 categories for memcmp:
38+
* 1) src/dst has the same offset to the 8 bytes boundary. The handlers
39+
* are named like .Lsameoffset_xxxx
40+
* 2) src/dst has different offset to the 8 bytes boundary. The handlers
41+
* are named like .Ldiffoffset_xxxx
42+
*/
3243
_GLOBAL(memcmp)
3344
cmpdi cr1,r5,0
3445

35-
/* Use the short loop if both strings are not 8B aligned */
36-
or r6,r3,r4
46+
/* Use the short loop if the src/dst addresses are not
47+
* with the same offset of 8 bytes align boundary.
48+
*/
49+
xor r6,r3,r4
3750
andi. r6,r6,7
3851

39-
/* Use the short loop if length is less than 32B */
40-
cmpdi cr6,r5,31
52+
/* Fall back to short loop if compare at aligned addrs
53+
* with less than 8 bytes.
54+
*/
55+
cmpdi cr6,r5,7
4156

4257
beq cr1,.Lzero
43-
bne .Lshort
44-
bgt cr6,.Llong
58+
bgt cr6,.Lno_short
4559

4660
.Lshort:
4761
mtctr r5
48-
4962
1: lbz rA,0(r3)
5063
lbz rB,0(r4)
5164
subf. rC,rB,rA
@@ -78,11 +91,89 @@ _GLOBAL(memcmp)
7891
li r3,0
7992
blr
8093

94+
.Lno_short:
95+
dcbt 0,r3
96+
dcbt 0,r4
97+
bne .Ldiffoffset_8bytes_make_align_start
98+
99+
100+
.Lsameoffset_8bytes_make_align_start:
101+
/* attempt to compare bytes not aligned with 8 bytes so that
102+
* rest comparison can run based on 8 bytes alignment.
103+
*/
104+
andi. r6,r3,7
105+
106+
/* Try to compare the first double word which is not 8 bytes aligned:
107+
* load the first double word at (src & ~7UL) and shift left appropriate
108+
* bits before comparision.
109+
*/
110+
rlwinm r6,r3,3,26,28
111+
beq .Lsameoffset_8bytes_aligned
112+
clrrdi r3,r3,3
113+
clrrdi r4,r4,3
114+
LD rA,0,r3
115+
LD rB,0,r4
116+
sld rA,rA,r6
117+
sld rB,rB,r6
118+
cmpld cr0,rA,rB
119+
srwi r6,r6,3
120+
bne cr0,.LcmpAB_lightweight
121+
subfic r6,r6,8
122+
subf. r5,r6,r5
123+
addi r3,r3,8
124+
addi r4,r4,8
125+
beq .Lzero
126+
127+
.Lsameoffset_8bytes_aligned:
128+
/* now we are aligned with 8 bytes.
129+
* Use .Llong loop if left cmp bytes are equal or greater than 32B.
130+
*/
131+
cmpdi cr6,r5,31
132+
bgt cr6,.Llong
133+
134+
.Lcmp_lt32bytes:
135+
/* compare 1 ~ 32 bytes, at least r3 addr is 8 bytes aligned now */
136+
cmpdi cr5,r5,7
137+
srdi r0,r5,3
138+
ble cr5,.Lcmp_rest_lt8bytes
139+
140+
/* handle 8 ~ 31 bytes */
141+
clrldi r5,r5,61
142+
mtctr r0
143+
2:
144+
LD rA,0,r3
145+
LD rB,0,r4
146+
cmpld cr0,rA,rB
147+
addi r3,r3,8
148+
addi r4,r4,8
149+
bne cr0,.LcmpAB_lightweight
150+
bdnz 2b
151+
152+
cmpwi r5,0
153+
beq .Lzero
154+
155+
.Lcmp_rest_lt8bytes:
156+
/* Here we have only less than 8 bytes to compare with. at least s1
157+
* Address is aligned with 8 bytes.
158+
* The next double words are load and shift right with appropriate
159+
* bits.
160+
*/
161+
subfic r6,r5,8
162+
slwi r6,r6,3
163+
LD rA,0,r3
164+
LD rB,0,r4
165+
srd rA,rA,r6
166+
srd rB,rB,r6
167+
cmpld cr0,rA,rB
168+
bne cr0,.LcmpAB_lightweight
169+
b .Lzero
170+
81171
.Lnon_zero:
82172
mr r3,rC
83173
blr
84174

85175
.Llong:
176+
/* At least s1 addr is aligned with 8 bytes */
86177
li off8,8
87178
li off16,16
88179
li off24,24
@@ -232,4 +323,39 @@ _GLOBAL(memcmp)
232323
ld r28,-32(r1)
233324
ld r27,-40(r1)
234325
blr
326+
327+
.LcmpAB_lightweight: /* skip NV GPRS restore */
328+
li r3,1
329+
bgtlr
330+
li r3,-1
331+
blr
332+
333+
.Ldiffoffset_8bytes_make_align_start:
334+
/* now try to align s1 with 8 bytes */
335+
rlwinm r6,r3,3,26,28
336+
beq .Ldiffoffset_align_s1_8bytes
337+
338+
clrrdi r3,r3,3
339+
LD rA,0,r3
340+
LD rB,0,r4 /* unaligned load */
341+
sld rA,rA,r6
342+
srd rA,rA,r6
343+
srd rB,rB,r6
344+
cmpld cr0,rA,rB
345+
srwi r6,r6,3
346+
bne cr0,.LcmpAB_lightweight
347+
348+
subfic r6,r6,8
349+
subf. r5,r6,r5
350+
addi r3,r3,8
351+
add r4,r4,r6
352+
353+
beq .Lzero
354+
355+
.Ldiffoffset_align_s1_8bytes:
356+
/* now s1 is aligned with 8 bytes. */
357+
cmpdi cr5,r5,31
358+
ble cr5,.Lcmp_lt32bytes
359+
b .Llong
360+
235361
EXPORT_SYMBOL(memcmp)

0 commit comments

Comments
 (0)