1 /* U1memcpy.S: UltraSPARC-I/II/IIi/IIe optimized memcpy.
3 * Copyright (C) 1997, 2004 David S. Miller (davem@redhat.com)
4 * Copyright (C) 1996, 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz)
8 #include <asm/visasm.h>
10 #define GLOBAL_SPARE g7
12 #define GLOBAL_SPARE g5
13 #define ASI_BLK_P 0xf0
16 #define VISEntry rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs; \
17 clr %g1; clr %g2; clr %g3; subcc %g0, %g0, %g0;
18 #define VISExit and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
20 #define VISEntry rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
21 #define VISExit and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
34 #define EX_RETVAL(x) x
38 #define LOAD(type,addr,dest) type [addr], dest
42 #define LOAD_BLK(addr,dest) ldda [addr] ASI_BLK_P, dest
46 #define STORE(type,src,addr) type src, [addr]
50 #define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_P
54 #define FUNC_NAME memcpy
65 #define FREG_FROB(f1, f2, f3, f4, f5, f6, f7, f8, f9) \
66 faligndata %f1, %f2, %f48; \
67 faligndata %f2, %f3, %f50; \
68 faligndata %f3, %f4, %f52; \
69 faligndata %f4, %f5, %f54; \
70 faligndata %f5, %f6, %f56; \
71 faligndata %f6, %f7, %f58; \
72 faligndata %f7, %f8, %f60; \
73 faligndata %f8, %f9, %f62;
75 #define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, len, jmptgt) \
76 EX_LD(LOAD_BLK(%src, %fdest)); \
77 EX_ST(STORE_BLK(%fsrc, %dest)); \
78 add %src, 0x40, %src; \
79 subcc %len, 0x40, %len; \
81 add %dest, 0x40, %dest; \
83 #define LOOP_CHUNK1(src, dest, len, branch_dest) \
84 MAIN_LOOP_CHUNK(src, dest, f0, f48, len, branch_dest)
85 #define LOOP_CHUNK2(src, dest, len, branch_dest) \
86 MAIN_LOOP_CHUNK(src, dest, f16, f48, len, branch_dest)
87 #define LOOP_CHUNK3(src, dest, len, branch_dest) \
88 MAIN_LOOP_CHUNK(src, dest, f32, f48, len, branch_dest)
90 #define DO_SYNC membar #Sync;
91 #define STORE_SYNC(dest, fsrc) \
92 EX_ST(STORE_BLK(%fsrc, %dest)); \
93 add %dest, 0x40, %dest; \
96 #define STORE_JUMP(dest, fsrc, target) \
97 EX_ST(STORE_BLK(%fsrc, %dest)); \
98 add %dest, 0x40, %dest; \
102 #define FINISH_VISCHUNK(dest, f0, f1, left) \
103 subcc %left, 8, %left;\
105 faligndata %f0, %f1, %f48; \
106 EX_ST(STORE(std, %f48, %dest)); \
109 #define UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \
110 subcc %left, 8, %left; \
114 #define UNEVEN_VISCHUNK(dest, f0, f1, left) \
115 UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \
118 .register %g2,#scratch
119 .register %g3,#scratch
125 .type FUNC_NAME,#function
126 FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
143 /* Clobbers o5/g1/g2/g3/g7/icc/xcc. */
146 /* Is 'dst' already aligned on an 64-byte boundary? */
150 /* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
151 * of bytes to copy to make 'dst' 64-byte aligned. We pre-
152 * subtract this from 'len'.
154 sub %o0, %o1, %GLOBAL_SPARE
162 1: subcc %g1, 0x1, %g1
163 EX_LD(LOAD(ldub, %o1 + 0x00, %o3))
164 EX_ST(STORE(stb, %o3, %o1 + %GLOBAL_SPARE))
168 add %o1, %GLOBAL_SPARE, %o0
173 alignaddr %o1, %g0, %o1
175 EX_LD(LOAD(ldd, %o1, %f4))
176 1: EX_LD(LOAD(ldd, %o1 + 0x8, %f6))
179 faligndata %f4, %f6, %f0
180 EX_ST(STORE(std, %f0, %o0))
184 EX_LD(LOAD(ldd, %o1 + 0x8, %f4))
187 faligndata %f6, %f4, %f0
188 EX_ST(STORE(std, %f0, %o0))
192 /* Destination is 64-byte aligned. */
194 membar #LoadStore | #StoreStore | #StoreLoad
196 subcc %o2, 0x40, %GLOBAL_SPARE
198 andncc %GLOBAL_SPARE, (0x40 - 1), %GLOBAL_SPARE
200 sub %o2, %GLOBAL_SPARE, %g3
201 andn %o1, (0x40 - 1), %o1
206 sub %o2, %GLOBAL_SPARE, %o2
208 add %g1, %GLOBAL_SPARE, %g1
211 EX_LD(LOAD_BLK(%o1, %f0))
214 EX_LD(LOAD_BLK(%o1, %f16))
216 sub %GLOBAL_SPARE, 0x80, %GLOBAL_SPARE
217 EX_LD(LOAD_BLK(%o1, %f32))
220 /* There are 8 instances of the unrolled loop,
221 * one for each possible alignment of the
222 * source buffer. Each loop instance is 452
231 add %o3, %lo(1f - 1b), %o3
236 1: FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
237 LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
238 FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
239 LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
240 FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
241 LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
243 faligndata %f0, %f2, %f48
244 1: FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
246 FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
247 STORE_JUMP(o0, f48, 40f)
248 2: FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
250 FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
251 STORE_JUMP(o0, f48, 48f)
252 3: FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
254 FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
255 STORE_JUMP(o0, f48, 56f)
257 1: FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
258 LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
259 FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
260 LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
261 FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
262 LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
264 faligndata %f2, %f4, %f48
265 1: FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
267 FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
268 STORE_JUMP(o0, f48, 41f)
269 2: FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
271 FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
272 STORE_JUMP(o0, f48, 49f)
273 3: FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
275 FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
276 STORE_JUMP(o0, f48, 57f)
278 1: FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
279 LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
280 FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
281 LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
282 FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
283 LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
285 faligndata %f4, %f6, %f48
286 1: FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
288 FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
289 STORE_JUMP(o0, f48, 42f)
290 2: FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
292 FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
293 STORE_JUMP(o0, f48, 50f)
294 3: FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
296 FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
297 STORE_JUMP(o0, f48, 58f)
299 1: FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
300 LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
301 FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
302 LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
303 FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)
304 LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
306 faligndata %f6, %f8, %f48
307 1: FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
309 FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)
310 STORE_JUMP(o0, f48, 43f)
311 2: FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)
313 FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
314 STORE_JUMP(o0, f48, 51f)
315 3: FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
317 FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
318 STORE_JUMP(o0, f48, 59f)
320 1: FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
321 LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
322 FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
323 LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
324 FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
325 LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
327 faligndata %f8, %f10, %f48
328 1: FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
330 FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
331 STORE_JUMP(o0, f48, 44f)
332 2: FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
334 FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
335 STORE_JUMP(o0, f48, 52f)
336 3: FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
338 FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
339 STORE_JUMP(o0, f48, 60f)
341 1: FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
342 LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
343 FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
344 LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
345 FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
346 LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
348 faligndata %f10, %f12, %f48
349 1: FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
351 FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
352 STORE_JUMP(o0, f48, 45f)
353 2: FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
355 FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
356 STORE_JUMP(o0, f48, 53f)
357 3: FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
359 FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
360 STORE_JUMP(o0, f48, 61f)
362 1: FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
363 LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
364 FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
365 LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
366 FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
367 LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
369 faligndata %f12, %f14, %f48
370 1: FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
372 FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
373 STORE_JUMP(o0, f48, 46f)
374 2: FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
376 FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
377 STORE_JUMP(o0, f48, 54f)
378 3: FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
380 FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
381 STORE_JUMP(o0, f48, 62f)
383 1: FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
384 LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
385 FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
386 LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
387 FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
388 LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
390 faligndata %f14, %f16, %f48
391 1: FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
393 FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
394 STORE_JUMP(o0, f48, 47f)
395 2: FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
397 FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
398 STORE_JUMP(o0, f48, 55f)
399 3: FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
401 FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
402 STORE_JUMP(o0, f48, 63f)
404 40: FINISH_VISCHUNK(o0, f0, f2, g3)
405 41: FINISH_VISCHUNK(o0, f2, f4, g3)
406 42: FINISH_VISCHUNK(o0, f4, f6, g3)
407 43: FINISH_VISCHUNK(o0, f6, f8, g3)
408 44: FINISH_VISCHUNK(o0, f8, f10, g3)
409 45: FINISH_VISCHUNK(o0, f10, f12, g3)
410 46: FINISH_VISCHUNK(o0, f12, f14, g3)
411 47: UNEVEN_VISCHUNK(o0, f14, f0, g3)
412 48: FINISH_VISCHUNK(o0, f16, f18, g3)
413 49: FINISH_VISCHUNK(o0, f18, f20, g3)
414 50: FINISH_VISCHUNK(o0, f20, f22, g3)
415 51: FINISH_VISCHUNK(o0, f22, f24, g3)
416 52: FINISH_VISCHUNK(o0, f24, f26, g3)
417 53: FINISH_VISCHUNK(o0, f26, f28, g3)
418 54: FINISH_VISCHUNK(o0, f28, f30, g3)
419 55: UNEVEN_VISCHUNK(o0, f30, f0, g3)
420 56: FINISH_VISCHUNK(o0, f32, f34, g3)
421 57: FINISH_VISCHUNK(o0, f34, f36, g3)
422 58: FINISH_VISCHUNK(o0, f36, f38, g3)
423 59: FINISH_VISCHUNK(o0, f38, f40, g3)
424 60: FINISH_VISCHUNK(o0, f40, f42, g3)
425 61: FINISH_VISCHUNK(o0, f42, f44, g3)
426 62: FINISH_VISCHUNK(o0, f44, f46, g3)
427 63: UNEVEN_VISCHUNK_LAST(o0, f46, f0, g3)
429 93: EX_LD(LOAD(ldd, %o1, %f2))
432 faligndata %f0, %f2, %f8
433 EX_ST(STORE(std, %f8, %o0))
436 EX_LD(LOAD(ldd, %o1, %f0))
439 faligndata %f2, %f0, %f8
440 EX_ST(STORE(std, %f8, %o0))
447 1: EX_LD(LOAD(ldub, %o1, %o3))
450 EX_ST(STORE(stb, %o3, %o0))
454 2: membar #StoreLoad | #StoreStore
457 mov EX_RETVAL(%o4), %o0
460 70: /* 16 < len <= (5 * 64) */
464 72: andn %o2, 0xf, %GLOBAL_SPARE
466 1: EX_LD(LOAD(ldx, %o1 + 0x00, %o5))
467 EX_LD(LOAD(ldx, %o1 + 0x08, %g1))
468 subcc %GLOBAL_SPARE, 0x10, %GLOBAL_SPARE
469 EX_ST(STORE(stx, %o5, %o1 + %o3))
471 EX_ST(STORE(stx, %g1, %o1 + %o3))
474 73: andcc %o2, 0x8, %g0
477 EX_LD(LOAD(ldx, %o1, %o5))
479 EX_ST(STORE(stx, %o5, %o1 + %o3))
481 1: andcc %o2, 0x4, %g0
484 EX_LD(LOAD(lduw, %o1, %o5))
486 EX_ST(STORE(stw, %o5, %o1 + %o3))
494 75: andcc %o0, 0x7, %g1
500 1: EX_LD(LOAD(ldub, %o1, %o5))
502 EX_ST(STORE(stb, %o5, %o1 + %o3))
518 EX_LD(LOAD(ldx, %o1, %g2))
520 andn %o2, 0x7, %GLOBAL_SPARE
522 1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3))
523 subcc %GLOBAL_SPARE, 0x8, %GLOBAL_SPARE
527 EX_ST(STORE(stx, %o5, %o0))
540 80: /* 0 < len <= 16 */
545 1: EX_LD(LOAD(lduw, %o1, %g1))
547 EX_ST(STORE(stw, %g1, %o1 + %o3))
552 mov EX_RETVAL(%o4), %o0
555 90: EX_LD(LOAD(ldub, %o1, %g1))
557 EX_ST(STORE(stb, %g1, %o1 + %o3))
561 mov EX_RETVAL(%o4), %o0
563 .size FUNC_NAME, .-FUNC_NAME