1 /* Optimized 64-bit memset implementation for POWER6.
2 Copyright (C) 1997, 1999, 2000, 2002, 2003, 2007
3 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; if not, write to the Free
18 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
25 /* __ptr_t [r3] memset (__ptr_t s [r3], int c [r4], size_t n [r5]));
28 The memset is done in three sizes: byte (8 bits), word (32 bits),
29 cache line (256 bits). There is a special case for setting cache lines
30 to 0, to take advantage of the dcbz instruction. */
33 EALIGN (BP_SYM (memset), 7, 0)
37 #define rRTN r3 /* Initial value of 1st argument. */
38 #if __BOUNDED_POINTERS__
39 # define rMEMP0 r4 /* Original value of 1st arg. */
40 # define rCHR r5 /* Char to set in each byte. */
41 # define rLEN r6 /* Length of region to set. */
42 # define rMEMP r10 /* Address at which we are storing. */
44 # define rMEMP0 r3 /* Original value of 1st arg. */
45 # define rCHR r4 /* Char to set in each byte. */
46 # define rLEN r5 /* Length of region to set. */
47 # define rMEMP r6 /* Address at which we are storing. */
49 #define rALIGN r7 /* Number of bytes we are setting now (when aligning). */
51 #define rMEMP3 r9 /* Alt mem pointer. */
53 #if __BOUNDED_POINTERS__
55 CHECK_BOUNDS_BOTH_WIDE (rMEMP0, rTMP, rTMP2, rLEN)
57 STORE_RETURN_VALUE (rMEMP0)
58 STORE_RETURN_BOUNDS (rTMP, rTMP2)
61 /* Take care of case for size <= 4. */
63 andi. rALIGN, rMEMP0, 7
67 /* Align to doubleword boundary. */
69 rlwimi rCHR, rCHR, 8, 16, 23 /* Replicate byte to halfword. */
72 subfic rALIGN, rALIGN, 8
73 cror 28,30,31 /* Detect odd word aligned. */
74 add rMEMP, rMEMP, rALIGN
75 sub rLEN, rLEN, rALIGN
76 rlwimi rCHR, rCHR, 16, 0, 15 /* Replicate halfword to word. */
78 /* Process the even word of doubleword. */
87 /* Process the odd word of doubleword. */
89 bf 28, L(g4x) /* If false, word aligned on odd word. */
96 /* Handle the case of size < 31. */
98 rlwimi rCHR, rCHR, 16, 0, 15 /* Replicate halfword to word. */
102 /* Align to 32-byte boundary. */
103 andi. rALIGN, rMEMP, 0x18
104 subfic rALIGN, rALIGN, 0x20
105 insrdi rCHR,rCHR,32,0 /* Replicate word to double word. */
108 add rMEMP, rMEMP, rALIGN
109 sub rLEN, rLEN, rALIGN
110 cmplwi cr1, rALIGN, 0x10
113 stdu rCHR, -8(rMEMP2)
114 L(a1): blt cr1, L(a2)
116 stdu rCHR, -16(rMEMP2)
119 /* Now aligned to a 32 byte boundary. */
123 clrrdi. rALIGN, rLEN, 5
125 beq cr1, L(zloopstart) /* Special case for clearing memory using dcbz. */
126 beq L(medium) /* We may not actually get to do a full line. */
128 /* Storing a non-zero "c" value. We are aligned at a sector (32-byte)
129 boundary may not be at cache line (128-byte) boundary. */
131 /* memset in 32-byte chunks until we get to a cache line boundary.
132 If rLEN is less then the distance to the next cache-line boundary use
133 cacheAligned1 code to finish the tail. */
137 blt cr1,L(cacheAligned1)
139 beq L(nzCacheAligned)
145 andi. rTMP,rMEMP3,127
148 beq L(nzCacheAligned)
157 beq L(nzCacheAligned)
167 blt cr1,L(cacheAligned1)
168 b L(nzCacheAligned128)
170 /* Now we are aligned to the cache line and can use dcbtst. */
174 blt cr1,L(cacheAligned1)
175 b L(nzCacheAligned128)
177 L(nzCacheAligned128):
198 bge cr1,L(nzCacheAligned128)
202 /* Storing a zero "c" value. We are aligned at a sector (32-byte)
203 boundary but may not be at cache line (128-byte) boundary. If the
204 remaining length spans a full cache line we can use the Data cache
205 block zero instruction. */
207 /* memset in 32-byte chunks until we get to a cache line boundary.
208 If rLEN is less then the distance to the next cache-line boundary use
209 cacheAligned1 code to finish the tail. */
215 blt cr1,L(cacheAligned1)
223 andi. rTMP,rMEMP3,127
245 blt cr1,L(cacheAligned1)
246 blt cr6,L(cacheAligned128)
249 /* Now we are aligned to the cache line and can use dcbz. */
254 blt cr1,L(cacheAligned1)
258 blt cr6,L(cacheAligned128)
259 bgt cr5,L(cacheAligned512)
266 blt cr1,L(cacheAligned1)
267 blt cr6,L(cacheAligned128)
270 /* A simple loop for the longer (>640 bytes) lengths. This form limits
271 the branch miss-predicted to exactly 1 at loop exit.*/
274 blt cr1,L(cacheAligned1)
290 bge cr6,L(cacheAligned256)
292 blt cr1,L(cacheAligned1)
301 blt cr1,L(handletail32)
311 blt cr1,L(handletail32)
321 blt cr1,L(handletail32)
329 /* We are here because the length or remainder (rLEN) is less than the
330 cache line/sector size and does not justify aggressive loop unrolling.
331 So set up the preconditions for L(medium) and go there. */
340 /* Memset of 8 bytes or less. */
363 /* Memset of 0-31 bytes. */
366 insrdi rCHR,rCHR,32,0 /* Replicate word to double word. */
369 add rMEMP, rMEMP, rLEN
371 bt- 31, L(medium_31t)
372 bt- 30, L(medium_30t)
376 bge cr1, L(medium_27t)
383 bf- 30, L(medium_30f)
386 bf- 29, L(medium_29f)
389 blt cr1, L(medium_27f)
392 stdu rCHR, -16(rMEMP)
398 END_GEN_TB (BP_SYM (memset),TB_TOCLESS)
399 libc_hidden_builtin_def (memset)
401 /* Copied from bzero.S to prevent the linker from inserting a stub
402 between bzero and memset. */
403 ENTRY (BP_SYM (__bzero))
405 #if __BOUNDED_POINTERS__
409 /* Tell memset that we don't want a return value. */
417 END_GEN_TB (BP_SYM (__bzero),TB_TOCLESS)
419 weak_alias (BP_SYM (__bzero), BP_SYM (bzero))