GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / arch / cris / arch-v32 / lib / usercopy.c
blobbcee7e703190c8c0f7097afc8e083916878ce22a
1 /*
2 * User address space access functions.
3 * The non-inlined parts of asm-cris/uaccess.h are here.
5 * Copyright (C) 2000, 2003 Axis Communications AB.
7 * Written by Hans-Peter Nilsson.
8 * Pieces used from memcpy, originally by Kenny Ranerup long time ago.
9 */
11 #include <asm/uaccess.h>
13 /* Asm:s have been tweaked (within the domain of correctness) to give
14 satisfactory results for "gcc version 3.2.1 Axis release R53/1.53-v32".
16 Check regularly...
18 Note that for CRISv32, the PC saved at a bus-fault is the address
19 *at* the faulting instruction, with a special case for instructions
20 in delay slots: then it's the address of the branch. Note also that
21 in contrast to v10, a postincrement in the instruction is *not*
22 performed at a bus-fault; the register is seen having the original
23 value in fault handlers. */
26 /* Copy to userspace. This is based on the memcpy used for
27 kernel-to-kernel copying; see "string.c". */
29 unsigned long
30 __copy_user (void __user *pdst, const void *psrc, unsigned long pn)
33 register char *dst __asm__ ("r13") = pdst;
34 register const char *src __asm__ ("r11") = psrc;
35 register int n __asm__ ("r12") = pn;
36 register int retn __asm__ ("r10") = 0;
39 /* When src is aligned but not dst, this makes a few extra needless
40 cycles. I believe it would take as many to check that the
41 re-alignment was unnecessary. */
42 if (((unsigned long) dst & 3) != 0
43 /* Don't align if we wouldn't copy more than a few bytes; so we
44 don't have to check further for overflows. */
45 && n >= 3)
47 if ((unsigned long) dst & 1)
49 __asm_copy_to_user_1 (dst, src, retn);
50 n--;
53 if ((unsigned long) dst & 2)
55 __asm_copy_to_user_2 (dst, src, retn);
56 n -= 2;
60 /* Movem is dirt cheap. The overheap is low enough to always use the
61 minimum possible block size as the threshold. */
62 if (n >= 44)
64 /* For large copies we use 'movem'. */
66 /* It is not optimal to tell the compiler about clobbering any
67 registers; that will move the saving/restoring of those registers
68 to the function prologue/epilogue, and make non-movem sizes
69 suboptimal. */
70 __asm__ volatile ("\
71 ;; Check that the register asm declaration got right. \n\
72 ;; The GCC manual explicitly says TRT will happen. \n\
73 .ifnc %0%1%2%3,$r13$r11$r12$r10 \n\
74 .err \n\
75 .endif \n\
76 \n\
77 ;; Save the registers we'll use in the movem process \n\
78 ;; on the stack. \n\
79 subq 11*4,$sp \n\
80 movem $r10,[$sp] \n\
81 \n\
82 ;; Now we've got this: \n\
83 ;; r11 - src \n\
84 ;; r13 - dst \n\
85 ;; r12 - n \n\
86 \n\
87 ;; Update n for the first loop \n\
88 subq 44,$r12 \n\
89 0: \n\
90 movem [$r11+],$r10 \n\
91 subq 44,$r12 \n\
92 1: bge 0b \n\
93 movem $r10,[$r13+] \n\
94 3: \n\
95 addq 44,$r12 ;; compensate for last loop underflowing n \n\
96 \n\
97 ;; Restore registers from stack \n\
98 movem [$sp+],$r10 \n\
99 2: \n\
100 .section .fixup,\"ax\" \n\
101 4: \n\
102 ; When failing on any of the 1..44 bytes in a chunk, we adjust back the \n\
103 ; source pointer and just drop through to the by-16 and by-4 loops to \n\
104 ; get the correct number of failing bytes. This necessarily means a \n\
105 ; few extra exceptions, but invalid user pointers shouldn't happen in \n\
106 ; time-critical code anyway. \n\
107 jump 3b \n\
108 subq 44,$r11 \n\
110 .previous \n\
111 .section __ex_table,\"a\" \n\
112 .dword 1b,4b \n\
113 .previous"
115 /* Outputs */ : "=r" (dst), "=r" (src), "=r" (n), "=r" (retn)
116 /* Inputs */ : "0" (dst), "1" (src), "2" (n), "3" (retn));
120 while (n >= 16)
122 __asm_copy_to_user_16 (dst, src, retn);
123 n -= 16;
126 while (n >= 4)
128 __asm_copy_to_user_4 (dst, src, retn);
129 n -= 4;
132 switch (n)
134 case 0:
135 break;
136 case 1:
137 __asm_copy_to_user_1 (dst, src, retn);
138 break;
139 case 2:
140 __asm_copy_to_user_2 (dst, src, retn);
141 break;
142 case 3:
143 __asm_copy_to_user_3 (dst, src, retn);
144 break;
147 return retn;
150 /* Copy from user to kernel, zeroing the bytes that were inaccessible in
151 userland. The return-value is the number of bytes that were
152 inaccessible. */
154 unsigned long
155 __copy_user_zeroing(void *pdst, const void __user *psrc, unsigned long pn)
158 register char *dst __asm__ ("r13") = pdst;
159 register const char *src __asm__ ("r11") = psrc;
160 register int n __asm__ ("r12") = pn;
161 register int retn __asm__ ("r10") = 0;
163 /* The best reason to align src is that we then know that a read-fault
164 was for aligned bytes; there's no 1..3 remaining good bytes to
165 pickle. */
166 if (((unsigned long) src & 3) != 0)
168 if (((unsigned long) src & 1) && n != 0)
170 __asm_copy_from_user_1 (dst, src, retn);
171 n--;
174 if (((unsigned long) src & 2) && n >= 2)
176 __asm_copy_from_user_2 (dst, src, retn);
177 n -= 2;
180 /* We only need one check after the unalignment-adjustments, because
181 if both adjustments were done, either both or neither reference
182 had an exception. */
183 if (retn != 0)
184 goto copy_exception_bytes;
187 /* Movem is dirt cheap. The overheap is low enough to always use the
188 minimum possible block size as the threshold. */
189 if (n >= 44)
191 /* It is not optimal to tell the compiler about clobbering any
192 registers; that will move the saving/restoring of those registers
193 to the function prologue/epilogue, and make non-movem sizes
194 suboptimal. */
195 __asm__ volatile ("\
196 .ifnc %0%1%2%3,$r13$r11$r12$r10 \n\
197 .err \n\
198 .endif \n\
200 ;; Save the registers we'll use in the movem process \n\
201 ;; on the stack. \n\
202 subq 11*4,$sp \n\
203 movem $r10,[$sp] \n\
205 ;; Now we've got this: \n\
206 ;; r11 - src \n\
207 ;; r13 - dst \n\
208 ;; r12 - n \n\
210 ;; Update n for the first loop \n\
211 subq 44,$r12 \n\
212 0: \n\
213 movem [$r11+],$r10 \n\
215 subq 44,$r12 \n\
216 bge 0b \n\
217 movem $r10,[$r13+] \n\
219 4: \n\
220 addq 44,$r12 ;; compensate for last loop underflowing n \n\
222 ;; Restore registers from stack \n\
223 movem [$sp+],$r10 \n\
224 .section .fixup,\"ax\" \n\
226 ;; Do not jump back into the loop if we fail. For some uses, we get a \n\
227 ;; page fault somewhere on the line. Without checking for page limits, \n\
228 ;; we don't know where, but we need to copy accurately and keep an \n\
229 ;; accurate count; not just clear the whole line. To do that, we fall \n\
230 ;; down in the code below, proceeding with smaller amounts. It should \n\
231 ;; be kept in mind that we have to cater to code like what at one time \n\
232 ;; was in fs/super.c: \n\
233 ;; i = size - copy_from_user((void *)page, data, size); \n\
234 ;; which would cause repeated faults while clearing the remainder of \n\
235 ;; the SIZE bytes at PAGE after the first fault. \n\
236 ;; A caveat here is that we must not fall through from a failing page \n\
237 ;; to a valid page. \n\
239 3: \n\
240 jump 4b ;; Fall through, pretending the fault didn't happen. \n\
241 nop \n\
243 .previous \n\
244 .section __ex_table,\"a\" \n\
245 .dword 0b,3b \n\
246 .previous"
248 /* Outputs */ : "=r" (dst), "=r" (src), "=r" (n), "=r" (retn)
249 /* Inputs */ : "0" (dst), "1" (src), "2" (n), "3" (retn));
252 /* Either we directly start copying here, using dword copying in a loop,
253 or we copy as much as possible with 'movem' and then the last block
254 (<44 bytes) is copied here. This will work since 'movem' will have
255 updated src, dst and n. (Except with failing src.)
257 Since we want to keep src accurate, we can't use
258 __asm_copy_from_user_N with N != (1, 2, 4); it updates dst and
259 retn, but not src (by design; it's value is ignored elsewhere). */
261 while (n >= 4)
263 __asm_copy_from_user_4 (dst, src, retn);
264 n -= 4;
266 if (retn)
267 goto copy_exception_bytes;
270 /* If we get here, there were no memory read faults. */
271 switch (n)
273 /* These copies are at least "naturally aligned" (so we don't have
274 to check each byte), due to the src alignment code before the
275 movem loop. The *_3 case *will* get the correct count for retn. */
276 case 0:
277 /* This case deliberately left in (if you have doubts check the
278 generated assembly code). */
279 break;
280 case 1:
281 __asm_copy_from_user_1 (dst, src, retn);
282 break;
283 case 2:
284 __asm_copy_from_user_2 (dst, src, retn);
285 break;
286 case 3:
287 __asm_copy_from_user_3 (dst, src, retn);
288 break;
291 /* If we get here, retn correctly reflects the number of failing
292 bytes. */
293 return retn;
295 copy_exception_bytes:
296 /* We already have "retn" bytes cleared, and need to clear the
297 remaining "n" bytes. A non-optimized simple byte-for-byte in-line
298 memset is preferred here, since this isn't speed-critical code and
299 we'd rather have this a leaf-function than calling memset. */
301 char *endp;
302 for (endp = dst + n; dst < endp; dst++)
303 *dst = 0;
306 return retn + n;
309 /* Zero userspace. */
311 unsigned long
312 __do_clear_user (void __user *pto, unsigned long pn)
315 register char *dst __asm__ ("r13") = pto;
316 register int n __asm__ ("r12") = pn;
317 register int retn __asm__ ("r10") = 0;
320 if (((unsigned long) dst & 3) != 0
321 /* Don't align if we wouldn't copy more than a few bytes. */
322 && n >= 3)
324 if ((unsigned long) dst & 1)
326 __asm_clear_1 (dst, retn);
327 n--;
330 if ((unsigned long) dst & 2)
332 __asm_clear_2 (dst, retn);
333 n -= 2;
337 if (n >= 48)
339 /* For large clears we use 'movem' */
341 /* It is not optimal to tell the compiler about clobbering any
342 call-saved registers; that will move the saving/restoring of
343 those registers to the function prologue/epilogue, and make
344 non-movem sizes suboptimal.
346 This method is not foolproof; it assumes that the "asm reg"
347 declarations at the beginning of the function really are used
348 here (beware: they may be moved to temporary registers).
349 This way, we do not have to save/move the registers around into
350 temporaries; we can safely use them straight away.
352 If you want to check that the allocation was right; then
353 check the equalities in the first comment. It should say
354 something like "r13=r13, r11=r11, r12=r12". */
355 __asm__ volatile ("\
356 .ifnc %0%1%2,$r13$r12$r10 \n\
357 .err \n\
358 .endif \n\
360 ;; Save the registers we'll clobber in the movem process \n\
361 ;; on the stack. Don't mention them to gcc, it will only be \n\
362 ;; upset. \n\
363 subq 11*4,$sp \n\
364 movem $r10,[$sp] \n\
366 clear.d $r0 \n\
367 clear.d $r1 \n\
368 clear.d $r2 \n\
369 clear.d $r3 \n\
370 clear.d $r4 \n\
371 clear.d $r5 \n\
372 clear.d $r6 \n\
373 clear.d $r7 \n\
374 clear.d $r8 \n\
375 clear.d $r9 \n\
376 clear.d $r10 \n\
377 clear.d $r11 \n\
379 ;; Now we've got this: \n\
380 ;; r13 - dst \n\
381 ;; r12 - n \n\
383 ;; Update n for the first loop \n\
384 subq 12*4,$r12 \n\
385 0: \n\
386 subq 12*4,$r12 \n\
387 1: \n\
388 bge 0b \n\
389 movem $r11,[$r13+] \n\
391 addq 12*4,$r12 ;; compensate for last loop underflowing n \n\
393 ;; Restore registers from stack \n\
394 movem [$sp+],$r10 \n\
395 2: \n\
396 .section .fixup,\"ax\" \n\
397 3: \n\
398 movem [$sp],$r10 \n\
399 addq 12*4,$r10 \n\
400 addq 12*4,$r13 \n\
401 movem $r10,[$sp] \n\
402 jump 0b \n\
403 clear.d $r10 \n\
405 .previous \n\
406 .section __ex_table,\"a\" \n\
407 .dword 1b,3b \n\
408 .previous"
410 /* Outputs */ : "=r" (dst), "=r" (n), "=r" (retn)
411 /* Inputs */ : "0" (dst), "1" (n), "2" (retn)
412 /* Clobber */ : "r11");
415 while (n >= 16)
417 __asm_clear_16 (dst, retn);
418 n -= 16;
421 while (n >= 4)
423 __asm_clear_4 (dst, retn);
424 n -= 4;
427 switch (n)
429 case 0:
430 break;
431 case 1:
432 __asm_clear_1 (dst, retn);
433 break;
434 case 2:
435 __asm_clear_2 (dst, retn);
436 break;
437 case 3:
438 __asm_clear_3 (dst, retn);
439 break;
442 return retn;