1 /* Optimized 64-bit memset implementation for POWER6.
2 Copyright (C) 1997-2013 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
23 /* __ptr_t [r3] memset (__ptr_t s [r3], int c [r4], size_t n [r5]));
26 The memset is done in three sizes: byte (8 bits), word (32 bits),
27 cache line (256 bits). There is a special case for setting cache lines
28 to 0, to take advantage of the dcbz instruction. */
31 EALIGN (BP_SYM (memset), 7, 0)
35 #define rRTN r3 /* Initial value of 1st argument. */
36 #if __BOUNDED_POINTERS__
37 # define rMEMP0 r4 /* Original value of 1st arg. */
38 # define rCHR r5 /* Char to set in each byte. */
39 # define rLEN r6 /* Length of region to set. */
40 # define rMEMP r10 /* Address at which we are storing. */
42 # define rMEMP0 r3 /* Original value of 1st arg. */
43 # define rCHR r4 /* Char to set in each byte. */
44 # define rLEN r5 /* Length of region to set. */
45 # define rMEMP r6 /* Address at which we are storing. */
47 #define rALIGN r7 /* Number of bytes we are setting now (when aligning). */
49 #define rMEMP3 r9 /* Alt mem pointer. */
51 #if __BOUNDED_POINTERS__
53 CHECK_BOUNDS_BOTH_WIDE (rMEMP0, rTMP, rTMP2, rLEN)
55 STORE_RETURN_VALUE (rMEMP0)
56 STORE_RETURN_BOUNDS (rTMP, rTMP2)
59 /* Take care of case for size <= 4. */
61 andi. rALIGN, rMEMP0, 7
65 /* Align to doubleword boundary. */
67 rlwimi rCHR, rCHR, 8, 16, 23 /* Replicate byte to halfword. */
70 subfic rALIGN, rALIGN, 8
71 cror 28,30,31 /* Detect odd word aligned. */
72 add rMEMP, rMEMP, rALIGN
73 sub rLEN, rLEN, rALIGN
74 rlwimi rCHR, rCHR, 16, 0, 15 /* Replicate halfword to word. */
76 /* Process the even word of doubleword. */
85 /* Process the odd word of doubleword. */
87 bf 28, L(g4x) /* If false, word aligned on odd word. */
94 /* Handle the case of size < 31. */
96 rlwimi rCHR, rCHR, 16, 0, 15 /* Replicate halfword to word. */
100 /* Align to 32-byte boundary. */
101 andi. rALIGN, rMEMP, 0x18
102 subfic rALIGN, rALIGN, 0x20
103 insrdi rCHR,rCHR,32,0 /* Replicate word to double word. */
106 add rMEMP, rMEMP, rALIGN
107 sub rLEN, rLEN, rALIGN
108 cmplwi cr1, rALIGN, 0x10
111 stdu rCHR, -8(rMEMP2)
112 L(a1): blt cr1, L(a2)
114 stdu rCHR, -16(rMEMP2)
117 /* Now aligned to a 32 byte boundary. */
121 clrrdi. rALIGN, rLEN, 5
123 beq cr1, L(zloopstart) /* Special case for clearing memory using dcbz. */
124 beq L(medium) /* We may not actually get to do a full line. */
126 /* Storing a non-zero "c" value. We are aligned at a sector (32-byte)
127 boundary may not be at cache line (128-byte) boundary. */
129 /* memset in 32-byte chunks until we get to a cache line boundary.
130 If rLEN is less then the distance to the next cache-line boundary use
131 cacheAligned1 code to finish the tail. */
135 blt cr1,L(cacheAligned1)
137 beq L(nzCacheAligned)
143 andi. rTMP,rMEMP3,127
146 beq L(nzCacheAligned)
155 beq L(nzCacheAligned)
165 blt cr1,L(cacheAligned1)
166 b L(nzCacheAligned128)
168 /* Now we are aligned to the cache line and can use dcbtst. */
172 blt cr1,L(cacheAligned1)
173 b L(nzCacheAligned128)
175 L(nzCacheAligned128):
196 bge cr1,L(nzCacheAligned128)
200 /* Storing a zero "c" value. We are aligned at a sector (32-byte)
201 boundary but may not be at cache line (128-byte) boundary. If the
202 remaining length spans a full cache line we can use the Data cache
203 block zero instruction. */
205 /* memset in 32-byte chunks until we get to a cache line boundary.
206 If rLEN is less then the distance to the next cache-line boundary use
207 cacheAligned1 code to finish the tail. */
213 blt cr1,L(cacheAligned1)
221 andi. rTMP,rMEMP3,127
243 blt cr1,L(cacheAligned1)
244 blt cr6,L(cacheAligned128)
247 /* Now we are aligned to the cache line and can use dcbz. */
252 blt cr1,L(cacheAligned1)
256 blt cr6,L(cacheAligned128)
257 bgt cr5,L(cacheAligned512)
264 blt cr1,L(cacheAligned1)
265 blt cr6,L(cacheAligned128)
268 /* A simple loop for the longer (>640 bytes) lengths. This form limits
269 the branch miss-predicted to exactly 1 at loop exit.*/
272 blt cr1,L(cacheAligned1)
288 bge cr6,L(cacheAligned256)
290 blt cr1,L(cacheAligned1)
299 blt cr1,L(handletail32)
309 blt cr1,L(handletail32)
319 blt cr1,L(handletail32)
327 /* We are here because the length or remainder (rLEN) is less than the
328 cache line/sector size and does not justify aggressive loop unrolling.
329 So set up the preconditions for L(medium) and go there. */
338 /* Memset of 8 bytes or less. */
361 /* Memset of 0-31 bytes. */
364 insrdi rCHR,rCHR,32,0 /* Replicate word to double word. */
367 add rMEMP, rMEMP, rLEN
369 bt- 31, L(medium_31t)
370 bt- 30, L(medium_30t)
374 bge cr1, L(medium_27t)
381 bf- 30, L(medium_30f)
384 bf- 29, L(medium_29f)
387 blt cr1, L(medium_27f)
390 stdu rCHR, -16(rMEMP)
396 END_GEN_TB (BP_SYM (memset),TB_TOCLESS)
397 libc_hidden_builtin_def (memset)
399 /* Copied from bzero.S to prevent the linker from inserting a stub
400 between bzero and memset. */
401 ENTRY (BP_SYM (__bzero))
403 #if __BOUNDED_POINTERS__
407 /* Tell memset that we don't want a return value. */
415 END_GEN_TB (BP_SYM (__bzero),TB_TOCLESS)
417 weak_alias (BP_SYM (__bzero), BP_SYM (bzero))