RT-AC66 3.0.0.4.374.130 core
[tomato.git] / release / src-rt-6.x / linux / linux-2.6 / arch / cris / arch-v10 / lib / memset.c
blob82bb668391715279afb0619a46d3d4b208323ed5
1 /*#************************************************************************#*/
2 /*#-------------------------------------------------------------------------*/
3 /*# */
4 /*# FUNCTION NAME: memset() */
5 /*# */
6 /*# PARAMETERS: void* dst; Destination address. */
7 /*# int c; Value of byte to write. */
8 /*# int len; Number of bytes to write. */
9 /*# */
10 /*# RETURNS: dst. */
11 /*# */
12 /*# DESCRIPTION: Sets the memory dst of length len bytes to c, as standard. */
13 /*# Framework taken from memcpy. This routine is */
14 /*# very sensitive to compiler changes in register allocation. */
15 /*# Should really be rewritten to avoid this problem. */
16 /*# */
17 /*#-------------------------------------------------------------------------*/
18 /*# */
19 /*# HISTORY */
20 /*# */
21 /*# DATE NAME CHANGES */
22 /*# ---- ---- ------- */
23 /*# 990713 HP Tired of watching this function (or */
24 /*# really, the nonoptimized generic */
25 /*# implementation) take up 90% of simulator */
26 /*# output. Measurements needed. */
27 /*# */
28 /*#-------------------------------------------------------------------------*/
30 #include <linux/types.h>
32 /* No, there's no macro saying 12*4, since it is "hard" to get it into
33 the asm in a good way. Thus better to expose the problem everywhere.
36 /* Assuming 1 cycle per dword written or read (ok, not really true), and
37 one per instruction, then 43+3*(n/48-1) <= 24+24*(n/48-1)
38 so n >= 45.7; n >= 0.9; we win on the first full 48-byte block to set. */
40 #define ZERO_BLOCK_SIZE (1*12*4)
42 void *memset(void *pdst,
43 int c,
44 size_t plen)
46 /* Ok. Now we want the parameters put in special registers.
47 Make sure the compiler is able to make something useful of this. */
49 register char *return_dst __asm__ ("r10") = pdst;
50 register int n __asm__ ("r12") = plen;
51 register int lc __asm__ ("r11") = c;
53 /* Most apps use memset sanely. Only those memsetting about 3..4
54 bytes or less get penalized compared to the generic implementation
55 - and that's not really sane use. */
57 /* Ugh. This is fragile at best. Check with newer GCC releases, if
58 they compile cascaded "x |= x << 8" sanely! */
59 __asm__("movu.b %0,$r13\n\t"
60 "lslq 8,$r13\n\t"
61 "move.b %0,$r13\n\t"
62 "move.d $r13,%0\n\t"
63 "lslq 16,$r13\n\t"
64 "or.d $r13,%0"
65 : "=r" (lc) : "0" (lc) : "r13");
68 register char *dst __asm__ ("r13") = pdst;
70 /* This is NONPORTABLE, but since this whole routine is */
71 /* grossly nonportable that doesn't matter. */
73 if (((unsigned long) pdst & 3) != 0
74 /* Oops! n=0 must be a legal call, regardless of alignment. */
75 && n >= 3)
77 if ((unsigned long)dst & 1)
79 *dst = (char) lc;
80 n--;
81 dst++;
84 if ((unsigned long)dst & 2)
86 *(short *)dst = lc;
87 n -= 2;
88 dst += 2;
92 /* Now the fun part. For the threshold value of this, check the equation
93 above. */
94 /* Decide which copying method to use. */
95 if (n >= ZERO_BLOCK_SIZE)
97 /* For large copies we use 'movem' */
99 /* It is not optimal to tell the compiler about clobbering any
100 registers; that will move the saving/restoring of those registers
101 to the function prologue/epilogue, and make non-movem sizes
102 suboptimal.
104 This method is not foolproof; it assumes that the "asm reg"
105 declarations at the beginning of the function really are used
106 here (beware: they may be moved to temporary registers).
107 This way, we do not have to save/move the registers around into
108 temporaries; we can safely use them straight away.
110 If you want to check that the allocation was right; then
111 check the equalities in the first comment. It should say
112 "r13=r13, r12=r12, r11=r11" */
113 __asm__ volatile ("
114 ;; Check that the following is true (same register names on
115 ;; both sides of equal sign, as in r8=r8):
116 ;; %0=r13, %1=r12, %4=r11
118 ;; Save the registers we'll clobber in the movem process
119 ;; on the stack. Don't mention them to gcc, it will only be
120 ;; upset.
121 subq 11*4,$sp
122 movem $r10,[$sp]
124 move.d $r11,$r0
125 move.d $r11,$r1
126 move.d $r11,$r2
127 move.d $r11,$r3
128 move.d $r11,$r4
129 move.d $r11,$r5
130 move.d $r11,$r6
131 move.d $r11,$r7
132 move.d $r11,$r8
133 move.d $r11,$r9
134 move.d $r11,$r10
136 ;; Now we've got this:
137 ;; r13 - dst
138 ;; r12 - n
140 ;; Update n for the first loop
141 subq 12*4,$r12
143 subq 12*4,$r12
144 bge 0b
145 movem $r11,[$r13+]
147 addq 12*4,$r12 ;; compensate for last loop underflowing n
149 ;; Restore registers from stack
150 movem [$sp+],$r10"
152 /* Outputs */ : "=r" (dst), "=r" (n)
153 /* Inputs */ : "0" (dst), "1" (n), "r" (lc));
157 /* Either we directly starts copying, using dword copying
158 in a loop, or we copy as much as possible with 'movem'
159 and then the last block (<44 bytes) is copied here.
160 This will work since 'movem' will have updated src,dst,n. */
162 while ( n >= 16 )
164 *((long*)dst)++ = lc;
165 *((long*)dst)++ = lc;
166 *((long*)dst)++ = lc;
167 *((long*)dst)++ = lc;
168 n -= 16;
171 /* A switch() is definitely the fastest although it takes a LOT of code.
172 * Particularly if you inline code this.
174 switch (n)
176 case 0:
177 break;
178 case 1:
179 *(char*)dst = (char) lc;
180 break;
181 case 2:
182 *(short*)dst = (short) lc;
183 break;
184 case 3:
185 *((short*)dst)++ = (short) lc;
186 *(char*)dst = (char) lc;
187 break;
188 case 4:
189 *((long*)dst)++ = lc;
190 break;
191 case 5:
192 *((long*)dst)++ = lc;
193 *(char*)dst = (char) lc;
194 break;
195 case 6:
196 *((long*)dst)++ = lc;
197 *(short*)dst = (short) lc;
198 break;
199 case 7:
200 *((long*)dst)++ = lc;
201 *((short*)dst)++ = (short) lc;
202 *(char*)dst = (char) lc;
203 break;
204 case 8:
205 *((long*)dst)++ = lc;
206 *((long*)dst)++ = lc;
207 break;
208 case 9:
209 *((long*)dst)++ = lc;
210 *((long*)dst)++ = lc;
211 *(char*)dst = (char) lc;
212 break;
213 case 10:
214 *((long*)dst)++ = lc;
215 *((long*)dst)++ = lc;
216 *(short*)dst = (short) lc;
217 break;
218 case 11:
219 *((long*)dst)++ = lc;
220 *((long*)dst)++ = lc;
221 *((short*)dst)++ = (short) lc;
222 *(char*)dst = (char) lc;
223 break;
224 case 12:
225 *((long*)dst)++ = lc;
226 *((long*)dst)++ = lc;
227 *((long*)dst)++ = lc;
228 break;
229 case 13:
230 *((long*)dst)++ = lc;
231 *((long*)dst)++ = lc;
232 *((long*)dst)++ = lc;
233 *(char*)dst = (char) lc;
234 break;
235 case 14:
236 *((long*)dst)++ = lc;
237 *((long*)dst)++ = lc;
238 *((long*)dst)++ = lc;
239 *(short*)dst = (short) lc;
240 break;
241 case 15:
242 *((long*)dst)++ = lc;
243 *((long*)dst)++ = lc;
244 *((long*)dst)++ = lc;
245 *((short*)dst)++ = (short) lc;
246 *(char*)dst = (char) lc;
247 break;
251 return return_dst; /* destination pointer. */
252 } /* memset() */