1 /* Optimized 32-bit memset implementation for POWER6.
2 Copyright (C) 1997,99,2000,02,03,06,2007,2009 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
23 /* __ptr_t [r3] memset (__ptr_t s [r3], int c [r4], size_t n [r5]));
26 The memset is done in three sizes: byte (8 bits), word (32 bits),
27 cache line (1024 bits). There is a special case for setting cache lines
28 to 0, to take advantage of the dcbz instruction. */
31 EALIGN (BP_SYM (memset), 7, 0)
35 #define rRTN r3 /* Initial value of 1st argument. */
36 #define rMEMP0 r3 /* Original value of 1st arg. */
37 #define rCHR r4 /* Char to set in each byte. */
38 #define rLEN r5 /* Length of region to set. */
39 #define rMEMP r6 /* Address at which we are storing. */
40 #define rALIGN r7 /* Number of bytes we are setting now (when aligning). */
43 #define rNEG64 r8 /* Constant -64 for clearing with dcbz. */
44 #define rMEMP3 r9 /* Alt mem pointer. */
46 /* Take care of case for size <= 4. */
48 andi. rALIGN, rMEMP0, 3
51 /* Align to word boundary. */
53 rlwimi rCHR, rCHR, 8, 16, 23 /* Replicate byte to halfword. */
56 subfic rALIGN, rALIGN, 4
57 add rMEMP, rMEMP, rALIGN
58 sub rLEN, rLEN, rALIGN
66 /* Handle the case of size < 31. */
69 rlwimi rCHR, rCHR, 16, 0, 15 /* Replicate halfword to word. */
71 /* Align to 32-byte boundary. */
72 andi. rALIGN, rMEMP, 0x1C
73 subfic rALIGN, rALIGN, 0x20
76 add rMEMP, rMEMP, rALIGN
77 sub rLEN, rLEN, rALIGN
78 cmplwi cr1, rALIGN, 0x10
88 stwu rCHR, -16(rMEMP2)
89 L(a2): bf 29, L(caligned)
93 /* Now aligned to a 32 byte boundary. */
96 clrrwi. rALIGN, rLEN, 5
98 beq cr1, L(zloopstart) /* Special case for clearing memory using dcbz. */
100 beq L(medium) /* We may not actually get to do a full line. */
102 /* Storing a non-zero "c" value. We are aligned at a sector (32-byte)
103 boundary may not be at cache line (128-byte) boundary. */
105 /* memset in 32-byte chunks until we get to a cache line boundary.
106 If rLEN is less then the distance to the next cache-line boundary use
107 cacheAligned1 code to finish the tail. */
111 blt cr1,L(cacheAligned1)
113 beq L(nzCacheAligned)
122 andi. rTMP,rMEMP3,127
126 beq L(nzCacheAligned)
139 beq L(nzCacheAligned)
141 /* At this point we can overrun the store queue (pipe reject) so it is
142 time to slow things down. The store queue can merge two adjacent
143 stores into a single L1/L2 op, but the L2 is clocked at 1/2 the CPU.
144 So we add "group ending nops" to guarantee that we dispatch only two
145 stores every other cycle. */
164 blt cr1,L(cacheAligned1)
167 /* Now we are aligned to the cache line and can use dcbtst. */
172 blt cr1,L(cacheAligned1)
173 blt cr6,L(nzCacheAligned128)
175 L(nzCacheAligned128):
196 /* At this point we can overrun the store queue (pipe reject) so it is
197 time to slow things down. The store queue can merge two adjacent
198 stores into a single L1/L2 op, but the L2 is clocked at 1/2 the CPU.
199 So we add "group ending nops" to guarantee that we dispatch only one
232 blt cr6,L(cacheAligned1)
236 b L(nzCacheAligned256)
238 L(nzCacheAligned256):
242 /* When we are not in libc we should use only GPRs to avoid the FPU lock
279 /* We are in libc and this is a long memset so we can use FPRs and can afford
280 occasional FPU locked interrupts. */
300 bge cr1,L(nzCacheAligned256)
305 /* Storing a zero "c" value. We are aligned at a sector (32-byte)
306 boundary but may not be at cache line (128-byte) boundary. If the
307 remaining length spans a full cache line we can use the Data cache
308 block zero instruction. */
310 /* memset in 32-byte chunks until we get to a cache line boundary.
311 If rLEN is less then the distance to the next cache-line boundary use
312 cacheAligned1 code to finish the tail. */
317 blt cr1,L(cacheAligned1)
328 andi. rTMP,rMEMP3,127
347 /* At this point we can overrun the store queue (pipe reject) so it is
348 time to slow things down. The store queue can merge two adjacent
349 stores into a single L1/L2 op, but the L2 is clocked at 1/2 the CPU.
350 So we add "group ending nops" to guarantee that we dispatch only two
351 stores every other cycle. */
371 blt cr1,L(cacheAligned1)
372 blt cr6,L(cacheAligned128)
375 /* Now we are aligned to the cache line and can use dcbz. */
380 blt cr1,L(cacheAligned1)
384 blt cr6,L(cacheAligned128)
385 bgt cr5,L(cacheAligned512)
392 blt cr1,L(cacheAligned1)
393 blt cr6,L(cacheAligned128)
396 /* A simple loop for the longer (>640 bytes) lengths. This form limits
397 the branch miss-predicted to exactly 1 at loop exit.*/
400 blt cr1,L(cacheAligned1)
413 bge cr6,L(cacheAligned256)
414 blt cr1,L(cacheAligned1)
423 blt cr1,L(handletail32)
437 blt cr1,L(handletail32)
451 blt cr1,L(handletail32)
452 /* At this point we can overrun the store queue (pipe reject) so it is
453 time to slow things down. The store queue can merge two adjacent
454 stores into a single L1/L2 op, but the L2 is clocked at 1/2 the CPU.
455 So we add "group ending nops" to guarantee that we dispatch only two
456 stores every other cycle. */
478 /* We are here because the length or remainder (rLEN) is less than the
479 cache line/sector size and does not justify aggressive loop unrolling.
480 So set up the preconditions for L(medium) and go there. */
489 /* Memset of 4 bytes or less. */
502 /* Memset of 0-31 bytes. */
507 add rMEMP, rMEMP, rLEN
509 bt- 31, L(medium_31t)
510 bt- 30, L(medium_30t)
514 bge cr1, L(medium_27t)
522 bf- 30, L(medium_30f)
525 bf- 29, L(medium_29f)
528 blt cr1, L(medium_27f)
533 stwu rCHR, -16(rMEMP)
540 END (BP_SYM (memset))
541 libc_hidden_builtin_def (memset)