2 * arch/alpha/lib/ev6-stxncpy.S
3 * 21264 version contributed by Rick Gorton <rick.gorton@api-networks.com>
5 * Copy no more than COUNT bytes of the null-terminated string from
8 * This is an internal routine used by strncpy, stpncpy, and strncat.
9 * As such, it uses special linkage conventions to make implementation
10 * of these public functions more efficient.
18 * Furthermore, COUNT may not be zero.
21 * t0 = last word written
22 * t10 = bitmask (with one bit set) indicating the byte position of
23 * the end of the range specified by COUNT
24 * t12 = bitmask (with one bit set) indicating the last byte written
25 * a0 = unaligned address of the last *word* written
26 * a2 = the number of full words left in COUNT
28 * Furthermore, v0, a3-a5, t11, t12, and $at are untouched.
30 * Much of the information about 21264 scheduling/coding comes from:
31 * Compiler Writer's Guide for the Alpha 21264
32 * abbreviated as 'CWG' in other comments here
33 * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html
34 * Scheduling notation:
36 * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1
37 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
38 * Try not to change the actual algorithm if possible for consistency.
41 #include <alpha/regdef.h>
48 /* There is a problem with either gdb (as of 4.16) or gas (as of 2.7) that
49 doesn't like putting the entry point for a procedure somewhere in the
50 middle of the procedure descriptor. Work around this by putting the
51 aligned copy in its own procedure descriptor */
60 /* On entry to this basic block:
61 t0 == the first destination word for masking back in
62 t1 == the first source word. */
64 /* Create the 1st output word and detect 0's in the 1st input word. */
65 lda t2, -1 # E : build a mask against false zero
66 mskqh t2, a1, t2 # U : detection in the src word (stall)
67 mskqh t1, a1, t3 # U :
68 ornot t1, t2, t2 # E : (stall)
70 mskql t0, a1, t0 # U : assemble the first output word
71 cmpbge zero, t2, t8 # E : bits set iff null found
72 or t0, t3, t0 # E : (stall)
80 /* On entry to this basic block:
81 t0 == a source word not containing a null. */
85 * separate store quads from load quads
86 * limit of 1 bcond/quad to permit training
96 cmpbge zero, t0, t8 # E :
104 /* Take care of the final (partial) word store. At this point
105 the end-of-count bit is set in t8 iff it applies.
107 On entry to this basic block we have:
108 t0 == the source word containing the null
109 t8 == the cmpbge mask that found it. */
112 negq t8, t12 # E : find low bit set
113 and t8, t12, t12 # E : (stall)
114 /* For the sake of the cache, don't read a destination word
115 if we're not going to need it. */
116 and t12, 0x80, t6 # E : (stall)
117 bne t6, 1f # U : (stall)
119 /* We're doing a partial word store and so need to combine
120 our source and original destination words. */
121 ldq_u t1, 0(a0) # L :
122 subq t12, 1, t6 # E :
123 or t12, t6, t8 # E : (stall)
124 zapnot t0, t8, t0 # U : clear src bytes > null (stall)
126 zap t1, t8, t1 # .. e1 : clear dst bytes <= null
127 or t0, t1, t0 # e1 : (stall)
131 1: stq_u t0, 0(a0) # L :
132 ret (t9) # L0 : Latency=3
136 /* Add the end-of-count bit to the eos detection bitmask. */
139 br $a_eos # L0 : Latency=3
152 /* Are source and destination co-aligned? */
154 and a0, 7, t0 # E : find dest misalignment
155 and t1, 7, t1 # E : (stall)
156 addq a2, t0, a2 # E : bias count by dest misalignment (stall)
159 and a2, 7, t2 # E : (stall)
160 srl a2, 3, a2 # U : a2 = loop counter = (count - 1)/8 (stall)
161 addq zero, 1, t10 # E :
163 sll t10, t2, t10 # U : t10 = bitmask of last count byte
164 bne t1, $unaligned # U :
165 /* We are co-aligned; take care of a partial first word. */
166 ldq_u t1, 0(a1) # L : load first src word
169 beq t0, stxncpy_aligned # U : avoid loading dest word if not needed
170 ldq_u t0, 0(a0) # L :
174 br stxncpy_aligned # .. e1 :
181 /* The source and destination are not co-aligned. Align the destination
182 and cope. We have to be very careful about not reading too much and
187 /* We know just enough now to be able to assemble the first
188 full source word. We can still find a zero at the end of it
189 that prevents us from outputting the whole thing.
191 On entry to this basic block:
192 t0 == the first dest word, unmasked
193 t1 == the shifted low bits of the first source word
194 t6 == bytemask that is -1 in dest word bytes */
196 ldq_u t2, 8(a1) # L : Latency=3 load second src word
198 mskql t0, a0, t0 # U : mask trailing garbage in dst
199 extqh t2, a1, t4 # U : (3 cycle stall on t2)
201 or t1, t4, t1 # E : first aligned src word complete (stall)
202 mskqh t1, a0, t1 # U : mask leading garbage in src (stall)
203 or t0, t1, t0 # E : first output word complete (stall)
204 or t0, t6, t6 # E : mask original data for zero test (stall)
206 cmpbge zero, t6, t8 # E :
207 beq a2, $u_eocfin # U :
211 bne t8, $u_final # U :
212 lda t6, -1 # E : mask out the bits we have
213 mskql t6, a1, t6 # U : already seen (stall)
214 stq_u t0, 0(a0) # L : store first output word
217 cmpbge zero, t2, t8 # E : find nulls in second partial (stall)
221 bne t8, $u_late_head_exit # U :
222 /* Finally, we've got all the stupid leading edge cases taken care
223 of and we can set up to enter the main loop. */
224 extql t2, a1, t1 # U : position hi-bits of lo word
225 ldq_u t2, 8(a1) # L : read next high-order source word
228 cmpbge zero, t2, t8 # E : (stall)
233 bne t8, $u_eos # e1 :
238 /* Unaligned copy main loop. In order to avoid reading too much,
239 the loop is structured to detect zeros in aligned source words.
240 This has, unfortunately, effectively pulled half of a loop
241 iteration out into the head and half into the tail, but it does
242 prevent nastiness from accumulating in the very thing we want
243 to run as fast as possible.
245 On entry to this basic block:
246 t1 == the shifted high-order bits from the previous source word
247 t2 == the unshifted current source word
249 We further know that t2 does not contain a null terminator. */
253 extqh t2, a1, t0 # U : extract high bits for current word
255 extql t2, a1, t3 # U : extract low bits for next time
258 or t0, t1, t0 # E : current dst word now complete
259 ldq_u t2, 0(a1) # U : Latency=3 load high word for next time
260 stq_u t0, -8(a0) # U : save the current word (stall)
264 cmpbge zero, t2, t8 # E : test new word for eos (2 cycle stall for data)
265 beq a2, $u_eoc # U : (stall)
268 beq t8, $u_loop # U :
273 /* We've found a zero somewhere in the source word we just read.
274 If it resides in the lower half, we have one (probably partial)
275 word to write out, and if it resides in the upper half, we
276 have one full and one partial word left to write out.
278 On entry to this basic block:
279 t1 == the shifted high-order bits from the previous source word
280 t2 == the unshifted current source word. */
282 extqh t2, a1, t0 # U :
283 or t0, t1, t0 # E : first (partial) source word complete (stall)
284 cmpbge zero, t0, t8 # E : is the null in this first bit? (stall)
285 bne t8, $u_final # U : (stall)
287 stq_u t0, 0(a0) # L : the null was in the high-order bits
293 extql t2, a1, t0 # U :
294 cmpbge zero, t0, t8 # E :
295 or t8, t10, t6 # E : (stall)
296 cmoveq a2, t6, t8 # E : Latency=2, extra map slot (stall)
298 /* Take care of a final (probably partial) result word.
299 On entry to this basic block:
300 t0 == assembled source word
301 t8 == cmpbge mask that found the null. */
303 negq t8, t6 # E : isolate low bit set
304 and t6, t8, t12 # E : (stall)
305 and t12, 0x80, t6 # E : avoid dest word load if we can (stall)
306 bne t6, 1f # U : (stall)
308 ldq_u t1, 0(a0) # L :
309 subq t12, 1, t6 # E :
310 or t6, t12, t8 # E : (stall)
311 zapnot t0, t8, t0 # U : kill source bytes > null
313 zap t1, t8, t1 # U : kill dest bytes <= null
314 or t0, t1, t0 # E : (stall)
318 1: stq_u t0, 0(a0) # L :
319 ret (t9) # L0 : Latency=3
321 $u_eoc: # end-of-count
322 extqh t2, a1, t0 # U :
323 or t0, t1, t0 # E : (stall)
324 cmpbge zero, t0, t8 # E : (stall)
327 $u_eocfin: # end-of-count, final word
329 br $u_final # L0 : Latency=3
333 /* Unaligned copy entry point. */
337 ldq_u t1, 0(a1) # L : load first source word
338 and a0, 7, t4 # E : find dest misalignment
339 and a1, 7, t5 # E : find src misalignment
340 /* Conditionally load the first destination word and a bytemask
341 with 0xff indicating that the destination byte is sacrosanct. */
346 ldq_u t0, 0(a0) # L :
349 mskql t6, a0, t6 # U :
354 subq a1, t4, a1 # E : sub dest misalignment from src addr
356 /* If source misalignment is larger than dest misalignment, we need
357 extra startup checks to avoid SEGV. */
359 cmplt t4, t5, t12 # E :
360 extql t1, a1, t1 # U : shift src into place
361 lda t2, -1 # E : for creating masks later
362 beq t12, $u_head # U : (stall)
364 mskqh t2, t5, t2 # U : begin src byte validity mask
365 cmpbge zero, t1, t8 # E : is there a zero?
366 extql t2, a1, t2 # U :
367 or t8, t10, t5 # E : test for end-of-count too
369 cmpbge zero, t2, t3 # E :
370 cmoveq a2, t5, t8 # E : Latency=2, extra map slot
371 nop # E : keep with cmoveq
372 andnot t8, t3, t8 # E : (stall)
374 beq t8, $u_head # U :
375 /* At this point we've found a zero in the first partial word of
376 the source. We need to isolate the valid source data and mask
377 it into the original destination data. (Incidentally, we know
378 that we'll need at least one byte of that original dest word.) */
379 ldq_u t0, 0(a0) # L :
380 negq t8, t6 # E : build bitmask of bytes <= zero
381 mskqh t1, t4, t1 # U :
383 and t6, t8, t12 # E :
384 subq t12, 1, t6 # E : (stall)
385 or t6, t12, t8 # E : (stall)
386 zapnot t2, t8, t2 # U : prepare source word; mirror changes (stall)
388 zapnot t1, t8, t1 # U : to source validity mask
389 andnot t0, t2, t0 # E : zero place for source to reside
390 or t0, t1, t0 # E : and put it there (stall both t0, t1)
391 stq_u t0, 0(a0) # L : (stall)
393 ret (t9) # L0 : Latency=3