1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2006 by Jens Arnold
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
23 .section .icode,"ax",@progbits
27 .type _memmove,@function
29 /* Moves <length> bytes of data in memory from <source> to <dest>
30 * Regions may overlap.
31 * This version is optimized for speed, and needs the corresponding memcpy
32 * implementation for the forward copy branch.
35 * r4 - destination address
40 * r0 - destination address (like ANSI version)
44 * r1 - 2nd data / scratch
46 * r3 - last long bound / adjusted start address (only if >= 11 bytes)
47 * r4 - current dest address
48 * r5 - source start address
49 * r6 - current source address
51 * The instruction order is devised in a way to utilize the pipelining
52 * of the SH1 to the max. The routine also tries to utilize fast page mode.
56 cmp/hi r4,r5 /* source > destination */
57 bf .backward /* no: backward copy */
60 mov r4,r7 /* store dest for returning */
64 .long ___memcpy_fwd_entry
67 add r6,r4 /* r4 = destination end */
69 cmp/hs r0,r6 /* at least 11 bytes to copy? (ensures 2 aligned longs) */
70 add #-8,r5 /* adjust for late decrement (max. 2 longs) */
71 add r5,r6 /* r6 = source end - 8 */
72 bf .start_b2r /* no: jump directly to byte loop */
74 mov #-4,r3 /* r3 = 0xfffffffc */
75 and r6,r3 /* r3 = last source long bound */
76 cmp/hi r3,r6 /* already aligned? */
77 bf .end_b1r /* yes: skip leading byte loop */
80 mov.b @(7,r6),r0 /* load byte */
81 add #-1,r6 /* decrement source addr */
82 mov.b r0,@-r4 /* store byte */
83 cmp/hi r3,r6 /* runs r6 down to last long bound */
88 and r4,r1 /* r1 = dest alignment offset */
90 mov.b @(r0,r1),r1 /* select appropriate main loop.. */
92 mov r5,r3 /* copy start adress to r3 */
93 jmp @r1 /* ..and jump to it */
94 add #7,r3 /* adjust end addr for main loops doing 2 longs/pass */
96 /** main loops, copying 2 longs per pass to profit from fast page mode **/
98 /* long aligned destination (fastest) */
101 mov.l @r6,r1 /* load first long */
102 add #-8,r6 /* decrement source addr */
103 mov.l @(12,r6),r0 /* load second long */
104 cmp/hi r3,r6 /* runs r6 down to first or second long bound */
105 mov.l r0,@-r4 /* store second long */
106 mov.l r1,@-r4 /* store first long; NOT ALIGNED - no speed loss here! */
109 add #-4,r3 /* readjust end address */
110 cmp/hi r3,r6 /* first long left? */
111 bf .start_b2r /* no, jump to trailing byte loop */
113 mov.l @(4,r6),r0 /* load first long */
114 add #-4,r6 /* decrement source addr */
115 bra .start_b2r /* jump to trailing byte loop */
116 mov.l r0,@-r4 /* store first long */
118 /* word aligned destination (long + 2) */
121 mov.l @r6,r1 /* load first long */
122 add #-8,r6 /* decrement source addr */
123 mov.l @(12,r6),r0 /* load second long */
124 cmp/hi r3,r6 /* runs r6 down to first or second long bound */
125 mov.w r0,@-r4 /* store low word of second long */
126 xtrct r1,r0 /* extract low word of first long & high word of second long */
127 mov.l r0,@-r4 /* and store as long */
128 shlr16 r1 /* get high word of first long */
129 mov.w r1,@-r4 /* and store it */
132 add #-4,r3 /* readjust end address */
133 cmp/hi r3,r6 /* first long left? */
134 bf .start_b2r /* no, jump to trailing byte loop */
136 mov.l @(4,r6),r0 /* load first long & decrement source addr */
137 add #-4,r6 /* decrement source addr */
138 mov.w r0,@-r4 /* store low word */
139 shlr16 r0 /* get high word */
140 bra .start_b2r /* jump to trailing byte loop */
141 mov.w r0,@-r4 /* and store it */
143 /* jumptable for loop selector */
146 .byte .loop_do0r - .jmptab_r /* placed in the middle because the SH1 */
147 .byte .loop_do1r - .jmptab_r /* loads bytes sign-extended. Otherwise */
148 .byte .loop_do2r - .jmptab_r /* the last loop would be out of reach */
149 .byte .loop_do3r - .jmptab_r /* of the offset range. */
151 /* byte aligned destination (long + 1) */
154 mov.l @r6,r1 /* load first long */
155 add #-8,r6 /* decrement source addr */
156 mov.l @(12,r6),r0 /* load second long */
157 cmp/hi r3,r6 /* runs r6 down to first or second long bound */
158 mov.b r0,@-r4 /* store low byte of second long */
159 shlr8 r0 /* get upper 3 bytes */
160 mov r1,r2 /* copy first long */
161 shll16 r2 /* move low byte of first long all the way up, .. */
163 or r2,r0 /* ..combine with the 3 bytes of second long.. */
164 mov.l r0,@-r4 /* ..and store as long */
165 shlr8 r1 /* get middle 2 bytes */
166 mov.w r1,@-r4 /* store as word */
167 shlr16 r1 /* get upper byte */
168 mov.b r1,@-r4 /* and store */
171 add #-4,r3 /* readjust end address */
173 cmp/hi r3,r6 /* first long left? */
174 bf .start_b2r /* no, jump to trailing byte loop */
177 mov.l @(4,r6),r0 /* load first long */
178 add #-4,r6 /* decrement source addr */
179 mov.b r0,@-r4 /* store low byte */
180 shlr8 r0 /* get middle 2 bytes */
181 mov.w r0,@-r4 /* store as word */
182 shlr16 r0 /* get upper byte */
183 bra .start_b2r /* jump to trailing byte loop */
184 mov.b r0,@-r4 /* and store */
186 /* byte aligned destination (long + 3) */
189 mov.l @r6,r1 /* load first long */
190 add #-8,r6 /* decrement source addr */
191 mov.l @(12,r6),r0 /* load second long */
192 mov r1,r2 /* copy first long */
193 mov.b r0,@-r4 /* store low byte of second long */
194 shlr8 r0 /* get middle 2 bytes */
195 mov.w r0,@-r4 /* store as word */
196 shlr16 r0 /* get upper byte */
197 shll8 r2 /* move lower 3 bytes of first long one up.. */
198 or r2,r0 /* ..combine with the 1 byte of second long.. */
199 mov.l r0,@-r4 /* ..and store as long */
200 shlr16 r1 /* get upper byte of first long */
202 cmp/hi r3,r6 /* runs r6 down to first or second long bound */
203 mov.b r1,@-r4 /* ..and store */
206 bra .last_do13r /* handle first longword: reuse routine for (long + 1) */
207 add #-4,r3 /* readjust end address */
209 /* trailing byte loop: copies 0..3 bytes (or all for < 11 in total) */
212 mov.b @(7,r6),r0 /* load byte */
213 add #-1,r6 /* decrement source addr */
214 mov.b r0,@-r4 /* store byte */
216 cmp/hi r5,r6 /* runs r6 down to start address */
220 mov r4,r0 /* return dest start address */
222 .size _memmove,.end-_memmove