x86: some lock annotations for user copy paths, v2
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / x86 / lib / usercopy_32.c
blob7393152a252e74bf0992993e7d3ac58b887de763
1 /*
2 * User address space access functions.
3 * The non inlined parts of asm-i386/uaccess.h are here.
5 * Copyright 1997 Andi Kleen <ak@muc.de>
6 * Copyright 1997 Linus Torvalds
7 */
8 #include <linux/mm.h>
9 #include <linux/highmem.h>
10 #include <linux/blkdev.h>
11 #include <linux/module.h>
12 #include <linux/backing-dev.h>
13 #include <linux/interrupt.h>
14 #include <asm/uaccess.h>
15 #include <asm/mmx.h>
17 static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned long n)
19 #ifdef CONFIG_X86_INTEL_USERCOPY
20 if (n >= 64 && ((a1 ^ a2) & movsl_mask.mask))
21 return 0;
22 #endif
23 return 1;
25 #define movsl_is_ok(a1, a2, n) \
26 __movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n))
29 * Copy a null terminated string from userspace.
32 #define __do_strncpy_from_user(dst, src, count, res) \
33 do { \
34 int __d0, __d1, __d2; \
35 might_fault(); \
36 __asm__ __volatile__( \
37 " testl %1,%1\n" \
38 " jz 2f\n" \
39 "0: lodsb\n" \
40 " stosb\n" \
41 " testb %%al,%%al\n" \
42 " jz 1f\n" \
43 " decl %1\n" \
44 " jnz 0b\n" \
45 "1: subl %1,%0\n" \
46 "2:\n" \
47 ".section .fixup,\"ax\"\n" \
48 "3: movl %5,%0\n" \
49 " jmp 2b\n" \
50 ".previous\n" \
51 _ASM_EXTABLE(0b,3b) \
52 : "=d"(res), "=c"(count), "=&a" (__d0), "=&S" (__d1), \
53 "=&D" (__d2) \
54 : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \
55 : "memory"); \
56 } while (0)
58 /**
59 * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
60 * @dst: Destination address, in kernel space. This buffer must be at
61 * least @count bytes long.
62 * @src: Source address, in user space.
63 * @count: Maximum number of bytes to copy, including the trailing NUL.
65 * Copies a NUL-terminated string from userspace to kernel space.
66 * Caller must check the specified block with access_ok() before calling
67 * this function.
69 * On success, returns the length of the string (not including the trailing
70 * NUL).
72 * If access to userspace fails, returns -EFAULT (some data may have been
73 * copied).
75 * If @count is smaller than the length of the string, copies @count bytes
76 * and returns @count.
78 long
79 __strncpy_from_user(char *dst, const char __user *src, long count)
81 long res;
82 __do_strncpy_from_user(dst, src, count, res);
83 return res;
85 EXPORT_SYMBOL(__strncpy_from_user);
87 /**
88 * strncpy_from_user: - Copy a NUL terminated string from userspace.
89 * @dst: Destination address, in kernel space. This buffer must be at
90 * least @count bytes long.
91 * @src: Source address, in user space.
92 * @count: Maximum number of bytes to copy, including the trailing NUL.
94 * Copies a NUL-terminated string from userspace to kernel space.
96 * On success, returns the length of the string (not including the trailing
97 * NUL).
99 * If access to userspace fails, returns -EFAULT (some data may have been
100 * copied).
102 * If @count is smaller than the length of the string, copies @count bytes
103 * and returns @count.
105 long
106 strncpy_from_user(char *dst, const char __user *src, long count)
108 long res = -EFAULT;
109 if (access_ok(VERIFY_READ, src, 1))
110 __do_strncpy_from_user(dst, src, count, res);
111 return res;
113 EXPORT_SYMBOL(strncpy_from_user);
116 * Zero Userspace
119 #define __do_clear_user(addr,size) \
120 do { \
121 int __d0; \
122 might_fault(); \
123 __asm__ __volatile__( \
124 "0: rep; stosl\n" \
125 " movl %2,%0\n" \
126 "1: rep; stosb\n" \
127 "2:\n" \
128 ".section .fixup,\"ax\"\n" \
129 "3: lea 0(%2,%0,4),%0\n" \
130 " jmp 2b\n" \
131 ".previous\n" \
132 _ASM_EXTABLE(0b,3b) \
133 _ASM_EXTABLE(1b,2b) \
134 : "=&c"(size), "=&D" (__d0) \
135 : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0)); \
136 } while (0)
139 * clear_user: - Zero a block of memory in user space.
140 * @to: Destination address, in user space.
141 * @n: Number of bytes to zero.
143 * Zero a block of memory in user space.
145 * Returns number of bytes that could not be cleared.
146 * On success, this will be zero.
148 unsigned long
149 clear_user(void __user *to, unsigned long n)
151 if (access_ok(VERIFY_WRITE, to, n))
152 __do_clear_user(to, n);
153 return n;
155 EXPORT_SYMBOL(clear_user);
158 * __clear_user: - Zero a block of memory in user space, with less checking.
159 * @to: Destination address, in user space.
160 * @n: Number of bytes to zero.
162 * Zero a block of memory in user space. Caller must check
163 * the specified block with access_ok() before calling this function.
165 * Returns number of bytes that could not be cleared.
166 * On success, this will be zero.
168 unsigned long
169 __clear_user(void __user *to, unsigned long n)
171 __do_clear_user(to, n);
172 return n;
174 EXPORT_SYMBOL(__clear_user);
177 * strnlen_user: - Get the size of a string in user space.
178 * @s: The string to measure.
179 * @n: The maximum valid length
181 * Get the size of a NUL-terminated string in user space.
183 * Returns the size of the string INCLUDING the terminating NUL.
184 * On exception, returns 0.
185 * If the string is too long, returns a value greater than @n.
187 long strnlen_user(const char __user *s, long n)
189 unsigned long mask = -__addr_ok(s);
190 unsigned long res, tmp;
192 might_fault();
194 __asm__ __volatile__(
195 " testl %0, %0\n"
196 " jz 3f\n"
197 " andl %0,%%ecx\n"
198 "0: repne; scasb\n"
199 " setne %%al\n"
200 " subl %%ecx,%0\n"
201 " addl %0,%%eax\n"
202 "1:\n"
203 ".section .fixup,\"ax\"\n"
204 "2: xorl %%eax,%%eax\n"
205 " jmp 1b\n"
206 "3: movb $1,%%al\n"
207 " jmp 1b\n"
208 ".previous\n"
209 ".section __ex_table,\"a\"\n"
210 " .align 4\n"
211 " .long 0b,2b\n"
212 ".previous"
213 :"=r" (n), "=D" (s), "=a" (res), "=c" (tmp)
214 :"0" (n), "1" (s), "2" (0), "3" (mask)
215 :"cc");
216 return res & mask;
218 EXPORT_SYMBOL(strnlen_user);
220 #ifdef CONFIG_X86_INTEL_USERCOPY
221 static unsigned long
222 __copy_user_intel(void __user *to, const void *from, unsigned long size)
224 int d0, d1;
225 __asm__ __volatile__(
226 " .align 2,0x90\n"
227 "1: movl 32(%4), %%eax\n"
228 " cmpl $67, %0\n"
229 " jbe 3f\n"
230 "2: movl 64(%4), %%eax\n"
231 " .align 2,0x90\n"
232 "3: movl 0(%4), %%eax\n"
233 "4: movl 4(%4), %%edx\n"
234 "5: movl %%eax, 0(%3)\n"
235 "6: movl %%edx, 4(%3)\n"
236 "7: movl 8(%4), %%eax\n"
237 "8: movl 12(%4),%%edx\n"
238 "9: movl %%eax, 8(%3)\n"
239 "10: movl %%edx, 12(%3)\n"
240 "11: movl 16(%4), %%eax\n"
241 "12: movl 20(%4), %%edx\n"
242 "13: movl %%eax, 16(%3)\n"
243 "14: movl %%edx, 20(%3)\n"
244 "15: movl 24(%4), %%eax\n"
245 "16: movl 28(%4), %%edx\n"
246 "17: movl %%eax, 24(%3)\n"
247 "18: movl %%edx, 28(%3)\n"
248 "19: movl 32(%4), %%eax\n"
249 "20: movl 36(%4), %%edx\n"
250 "21: movl %%eax, 32(%3)\n"
251 "22: movl %%edx, 36(%3)\n"
252 "23: movl 40(%4), %%eax\n"
253 "24: movl 44(%4), %%edx\n"
254 "25: movl %%eax, 40(%3)\n"
255 "26: movl %%edx, 44(%3)\n"
256 "27: movl 48(%4), %%eax\n"
257 "28: movl 52(%4), %%edx\n"
258 "29: movl %%eax, 48(%3)\n"
259 "30: movl %%edx, 52(%3)\n"
260 "31: movl 56(%4), %%eax\n"
261 "32: movl 60(%4), %%edx\n"
262 "33: movl %%eax, 56(%3)\n"
263 "34: movl %%edx, 60(%3)\n"
264 " addl $-64, %0\n"
265 " addl $64, %4\n"
266 " addl $64, %3\n"
267 " cmpl $63, %0\n"
268 " ja 1b\n"
269 "35: movl %0, %%eax\n"
270 " shrl $2, %0\n"
271 " andl $3, %%eax\n"
272 " cld\n"
273 "99: rep; movsl\n"
274 "36: movl %%eax, %0\n"
275 "37: rep; movsb\n"
276 "100:\n"
277 ".section .fixup,\"ax\"\n"
278 "101: lea 0(%%eax,%0,4),%0\n"
279 " jmp 100b\n"
280 ".previous\n"
281 ".section __ex_table,\"a\"\n"
282 " .align 4\n"
283 " .long 1b,100b\n"
284 " .long 2b,100b\n"
285 " .long 3b,100b\n"
286 " .long 4b,100b\n"
287 " .long 5b,100b\n"
288 " .long 6b,100b\n"
289 " .long 7b,100b\n"
290 " .long 8b,100b\n"
291 " .long 9b,100b\n"
292 " .long 10b,100b\n"
293 " .long 11b,100b\n"
294 " .long 12b,100b\n"
295 " .long 13b,100b\n"
296 " .long 14b,100b\n"
297 " .long 15b,100b\n"
298 " .long 16b,100b\n"
299 " .long 17b,100b\n"
300 " .long 18b,100b\n"
301 " .long 19b,100b\n"
302 " .long 20b,100b\n"
303 " .long 21b,100b\n"
304 " .long 22b,100b\n"
305 " .long 23b,100b\n"
306 " .long 24b,100b\n"
307 " .long 25b,100b\n"
308 " .long 26b,100b\n"
309 " .long 27b,100b\n"
310 " .long 28b,100b\n"
311 " .long 29b,100b\n"
312 " .long 30b,100b\n"
313 " .long 31b,100b\n"
314 " .long 32b,100b\n"
315 " .long 33b,100b\n"
316 " .long 34b,100b\n"
317 " .long 35b,100b\n"
318 " .long 36b,100b\n"
319 " .long 37b,100b\n"
320 " .long 99b,101b\n"
321 ".previous"
322 : "=&c"(size), "=&D" (d0), "=&S" (d1)
323 : "1"(to), "2"(from), "0"(size)
324 : "eax", "edx", "memory");
325 return size;
328 static unsigned long
329 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
331 int d0, d1;
332 __asm__ __volatile__(
333 " .align 2,0x90\n"
334 "0: movl 32(%4), %%eax\n"
335 " cmpl $67, %0\n"
336 " jbe 2f\n"
337 "1: movl 64(%4), %%eax\n"
338 " .align 2,0x90\n"
339 "2: movl 0(%4), %%eax\n"
340 "21: movl 4(%4), %%edx\n"
341 " movl %%eax, 0(%3)\n"
342 " movl %%edx, 4(%3)\n"
343 "3: movl 8(%4), %%eax\n"
344 "31: movl 12(%4),%%edx\n"
345 " movl %%eax, 8(%3)\n"
346 " movl %%edx, 12(%3)\n"
347 "4: movl 16(%4), %%eax\n"
348 "41: movl 20(%4), %%edx\n"
349 " movl %%eax, 16(%3)\n"
350 " movl %%edx, 20(%3)\n"
351 "10: movl 24(%4), %%eax\n"
352 "51: movl 28(%4), %%edx\n"
353 " movl %%eax, 24(%3)\n"
354 " movl %%edx, 28(%3)\n"
355 "11: movl 32(%4), %%eax\n"
356 "61: movl 36(%4), %%edx\n"
357 " movl %%eax, 32(%3)\n"
358 " movl %%edx, 36(%3)\n"
359 "12: movl 40(%4), %%eax\n"
360 "71: movl 44(%4), %%edx\n"
361 " movl %%eax, 40(%3)\n"
362 " movl %%edx, 44(%3)\n"
363 "13: movl 48(%4), %%eax\n"
364 "81: movl 52(%4), %%edx\n"
365 " movl %%eax, 48(%3)\n"
366 " movl %%edx, 52(%3)\n"
367 "14: movl 56(%4), %%eax\n"
368 "91: movl 60(%4), %%edx\n"
369 " movl %%eax, 56(%3)\n"
370 " movl %%edx, 60(%3)\n"
371 " addl $-64, %0\n"
372 " addl $64, %4\n"
373 " addl $64, %3\n"
374 " cmpl $63, %0\n"
375 " ja 0b\n"
376 "5: movl %0, %%eax\n"
377 " shrl $2, %0\n"
378 " andl $3, %%eax\n"
379 " cld\n"
380 "6: rep; movsl\n"
381 " movl %%eax,%0\n"
382 "7: rep; movsb\n"
383 "8:\n"
384 ".section .fixup,\"ax\"\n"
385 "9: lea 0(%%eax,%0,4),%0\n"
386 "16: pushl %0\n"
387 " pushl %%eax\n"
388 " xorl %%eax,%%eax\n"
389 " rep; stosb\n"
390 " popl %%eax\n"
391 " popl %0\n"
392 " jmp 8b\n"
393 ".previous\n"
394 ".section __ex_table,\"a\"\n"
395 " .align 4\n"
396 " .long 0b,16b\n"
397 " .long 1b,16b\n"
398 " .long 2b,16b\n"
399 " .long 21b,16b\n"
400 " .long 3b,16b\n"
401 " .long 31b,16b\n"
402 " .long 4b,16b\n"
403 " .long 41b,16b\n"
404 " .long 10b,16b\n"
405 " .long 51b,16b\n"
406 " .long 11b,16b\n"
407 " .long 61b,16b\n"
408 " .long 12b,16b\n"
409 " .long 71b,16b\n"
410 " .long 13b,16b\n"
411 " .long 81b,16b\n"
412 " .long 14b,16b\n"
413 " .long 91b,16b\n"
414 " .long 6b,9b\n"
415 " .long 7b,16b\n"
416 ".previous"
417 : "=&c"(size), "=&D" (d0), "=&S" (d1)
418 : "1"(to), "2"(from), "0"(size)
419 : "eax", "edx", "memory");
420 return size;
424 * Non Temporal Hint version of __copy_user_zeroing_intel. It is cache aware.
425 * hyoshiok@miraclelinux.com
428 static unsigned long __copy_user_zeroing_intel_nocache(void *to,
429 const void __user *from, unsigned long size)
431 int d0, d1;
433 __asm__ __volatile__(
434 " .align 2,0x90\n"
435 "0: movl 32(%4), %%eax\n"
436 " cmpl $67, %0\n"
437 " jbe 2f\n"
438 "1: movl 64(%4), %%eax\n"
439 " .align 2,0x90\n"
440 "2: movl 0(%4), %%eax\n"
441 "21: movl 4(%4), %%edx\n"
442 " movnti %%eax, 0(%3)\n"
443 " movnti %%edx, 4(%3)\n"
444 "3: movl 8(%4), %%eax\n"
445 "31: movl 12(%4),%%edx\n"
446 " movnti %%eax, 8(%3)\n"
447 " movnti %%edx, 12(%3)\n"
448 "4: movl 16(%4), %%eax\n"
449 "41: movl 20(%4), %%edx\n"
450 " movnti %%eax, 16(%3)\n"
451 " movnti %%edx, 20(%3)\n"
452 "10: movl 24(%4), %%eax\n"
453 "51: movl 28(%4), %%edx\n"
454 " movnti %%eax, 24(%3)\n"
455 " movnti %%edx, 28(%3)\n"
456 "11: movl 32(%4), %%eax\n"
457 "61: movl 36(%4), %%edx\n"
458 " movnti %%eax, 32(%3)\n"
459 " movnti %%edx, 36(%3)\n"
460 "12: movl 40(%4), %%eax\n"
461 "71: movl 44(%4), %%edx\n"
462 " movnti %%eax, 40(%3)\n"
463 " movnti %%edx, 44(%3)\n"
464 "13: movl 48(%4), %%eax\n"
465 "81: movl 52(%4), %%edx\n"
466 " movnti %%eax, 48(%3)\n"
467 " movnti %%edx, 52(%3)\n"
468 "14: movl 56(%4), %%eax\n"
469 "91: movl 60(%4), %%edx\n"
470 " movnti %%eax, 56(%3)\n"
471 " movnti %%edx, 60(%3)\n"
472 " addl $-64, %0\n"
473 " addl $64, %4\n"
474 " addl $64, %3\n"
475 " cmpl $63, %0\n"
476 " ja 0b\n"
477 " sfence \n"
478 "5: movl %0, %%eax\n"
479 " shrl $2, %0\n"
480 " andl $3, %%eax\n"
481 " cld\n"
482 "6: rep; movsl\n"
483 " movl %%eax,%0\n"
484 "7: rep; movsb\n"
485 "8:\n"
486 ".section .fixup,\"ax\"\n"
487 "9: lea 0(%%eax,%0,4),%0\n"
488 "16: pushl %0\n"
489 " pushl %%eax\n"
490 " xorl %%eax,%%eax\n"
491 " rep; stosb\n"
492 " popl %%eax\n"
493 " popl %0\n"
494 " jmp 8b\n"
495 ".previous\n"
496 ".section __ex_table,\"a\"\n"
497 " .align 4\n"
498 " .long 0b,16b\n"
499 " .long 1b,16b\n"
500 " .long 2b,16b\n"
501 " .long 21b,16b\n"
502 " .long 3b,16b\n"
503 " .long 31b,16b\n"
504 " .long 4b,16b\n"
505 " .long 41b,16b\n"
506 " .long 10b,16b\n"
507 " .long 51b,16b\n"
508 " .long 11b,16b\n"
509 " .long 61b,16b\n"
510 " .long 12b,16b\n"
511 " .long 71b,16b\n"
512 " .long 13b,16b\n"
513 " .long 81b,16b\n"
514 " .long 14b,16b\n"
515 " .long 91b,16b\n"
516 " .long 6b,9b\n"
517 " .long 7b,16b\n"
518 ".previous"
519 : "=&c"(size), "=&D" (d0), "=&S" (d1)
520 : "1"(to), "2"(from), "0"(size)
521 : "eax", "edx", "memory");
522 return size;
525 static unsigned long __copy_user_intel_nocache(void *to,
526 const void __user *from, unsigned long size)
528 int d0, d1;
530 __asm__ __volatile__(
531 " .align 2,0x90\n"
532 "0: movl 32(%4), %%eax\n"
533 " cmpl $67, %0\n"
534 " jbe 2f\n"
535 "1: movl 64(%4), %%eax\n"
536 " .align 2,0x90\n"
537 "2: movl 0(%4), %%eax\n"
538 "21: movl 4(%4), %%edx\n"
539 " movnti %%eax, 0(%3)\n"
540 " movnti %%edx, 4(%3)\n"
541 "3: movl 8(%4), %%eax\n"
542 "31: movl 12(%4),%%edx\n"
543 " movnti %%eax, 8(%3)\n"
544 " movnti %%edx, 12(%3)\n"
545 "4: movl 16(%4), %%eax\n"
546 "41: movl 20(%4), %%edx\n"
547 " movnti %%eax, 16(%3)\n"
548 " movnti %%edx, 20(%3)\n"
549 "10: movl 24(%4), %%eax\n"
550 "51: movl 28(%4), %%edx\n"
551 " movnti %%eax, 24(%3)\n"
552 " movnti %%edx, 28(%3)\n"
553 "11: movl 32(%4), %%eax\n"
554 "61: movl 36(%4), %%edx\n"
555 " movnti %%eax, 32(%3)\n"
556 " movnti %%edx, 36(%3)\n"
557 "12: movl 40(%4), %%eax\n"
558 "71: movl 44(%4), %%edx\n"
559 " movnti %%eax, 40(%3)\n"
560 " movnti %%edx, 44(%3)\n"
561 "13: movl 48(%4), %%eax\n"
562 "81: movl 52(%4), %%edx\n"
563 " movnti %%eax, 48(%3)\n"
564 " movnti %%edx, 52(%3)\n"
565 "14: movl 56(%4), %%eax\n"
566 "91: movl 60(%4), %%edx\n"
567 " movnti %%eax, 56(%3)\n"
568 " movnti %%edx, 60(%3)\n"
569 " addl $-64, %0\n"
570 " addl $64, %4\n"
571 " addl $64, %3\n"
572 " cmpl $63, %0\n"
573 " ja 0b\n"
574 " sfence \n"
575 "5: movl %0, %%eax\n"
576 " shrl $2, %0\n"
577 " andl $3, %%eax\n"
578 " cld\n"
579 "6: rep; movsl\n"
580 " movl %%eax,%0\n"
581 "7: rep; movsb\n"
582 "8:\n"
583 ".section .fixup,\"ax\"\n"
584 "9: lea 0(%%eax,%0,4),%0\n"
585 "16: jmp 8b\n"
586 ".previous\n"
587 ".section __ex_table,\"a\"\n"
588 " .align 4\n"
589 " .long 0b,16b\n"
590 " .long 1b,16b\n"
591 " .long 2b,16b\n"
592 " .long 21b,16b\n"
593 " .long 3b,16b\n"
594 " .long 31b,16b\n"
595 " .long 4b,16b\n"
596 " .long 41b,16b\n"
597 " .long 10b,16b\n"
598 " .long 51b,16b\n"
599 " .long 11b,16b\n"
600 " .long 61b,16b\n"
601 " .long 12b,16b\n"
602 " .long 71b,16b\n"
603 " .long 13b,16b\n"
604 " .long 81b,16b\n"
605 " .long 14b,16b\n"
606 " .long 91b,16b\n"
607 " .long 6b,9b\n"
608 " .long 7b,16b\n"
609 ".previous"
610 : "=&c"(size), "=&D" (d0), "=&S" (d1)
611 : "1"(to), "2"(from), "0"(size)
612 : "eax", "edx", "memory");
613 return size;
616 #else
619 * Leave these declared but undefined. They should not be any references to
620 * them
622 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
623 unsigned long size);
624 unsigned long __copy_user_intel(void __user *to, const void *from,
625 unsigned long size);
626 unsigned long __copy_user_zeroing_intel_nocache(void *to,
627 const void __user *from, unsigned long size);
628 #endif /* CONFIG_X86_INTEL_USERCOPY */
630 /* Generic arbitrary sized copy. */
631 #define __copy_user(to, from, size) \
632 do { \
633 int __d0, __d1, __d2; \
634 __asm__ __volatile__( \
635 " cmp $7,%0\n" \
636 " jbe 1f\n" \
637 " movl %1,%0\n" \
638 " negl %0\n" \
639 " andl $7,%0\n" \
640 " subl %0,%3\n" \
641 "4: rep; movsb\n" \
642 " movl %3,%0\n" \
643 " shrl $2,%0\n" \
644 " andl $3,%3\n" \
645 " .align 2,0x90\n" \
646 "0: rep; movsl\n" \
647 " movl %3,%0\n" \
648 "1: rep; movsb\n" \
649 "2:\n" \
650 ".section .fixup,\"ax\"\n" \
651 "5: addl %3,%0\n" \
652 " jmp 2b\n" \
653 "3: lea 0(%3,%0,4),%0\n" \
654 " jmp 2b\n" \
655 ".previous\n" \
656 ".section __ex_table,\"a\"\n" \
657 " .align 4\n" \
658 " .long 4b,5b\n" \
659 " .long 0b,3b\n" \
660 " .long 1b,2b\n" \
661 ".previous" \
662 : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
663 : "3"(size), "0"(size), "1"(to), "2"(from) \
664 : "memory"); \
665 } while (0)
667 #define __copy_user_zeroing(to, from, size) \
668 do { \
669 int __d0, __d1, __d2; \
670 __asm__ __volatile__( \
671 " cmp $7,%0\n" \
672 " jbe 1f\n" \
673 " movl %1,%0\n" \
674 " negl %0\n" \
675 " andl $7,%0\n" \
676 " subl %0,%3\n" \
677 "4: rep; movsb\n" \
678 " movl %3,%0\n" \
679 " shrl $2,%0\n" \
680 " andl $3,%3\n" \
681 " .align 2,0x90\n" \
682 "0: rep; movsl\n" \
683 " movl %3,%0\n" \
684 "1: rep; movsb\n" \
685 "2:\n" \
686 ".section .fixup,\"ax\"\n" \
687 "5: addl %3,%0\n" \
688 " jmp 6f\n" \
689 "3: lea 0(%3,%0,4),%0\n" \
690 "6: pushl %0\n" \
691 " pushl %%eax\n" \
692 " xorl %%eax,%%eax\n" \
693 " rep; stosb\n" \
694 " popl %%eax\n" \
695 " popl %0\n" \
696 " jmp 2b\n" \
697 ".previous\n" \
698 ".section __ex_table,\"a\"\n" \
699 " .align 4\n" \
700 " .long 4b,5b\n" \
701 " .long 0b,3b\n" \
702 " .long 1b,6b\n" \
703 ".previous" \
704 : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
705 : "3"(size), "0"(size), "1"(to), "2"(from) \
706 : "memory"); \
707 } while (0)
709 unsigned long __copy_to_user_ll(void __user *to, const void *from,
710 unsigned long n)
712 #ifndef CONFIG_X86_WP_WORKS_OK
713 if (unlikely(boot_cpu_data.wp_works_ok == 0) &&
714 ((unsigned long)to) < TASK_SIZE) {
716 * When we are in an atomic section (see
717 * mm/filemap.c:file_read_actor), return the full
718 * length to take the slow path.
720 if (in_atomic())
721 return n;
724 * CPU does not honor the WP bit when writing
725 * from supervisory mode, and due to preemption or SMP,
726 * the page tables can change at any time.
727 * Do it manually. Manfred <manfred@colorfullife.com>
729 while (n) {
730 unsigned long offset = ((unsigned long)to)%PAGE_SIZE;
731 unsigned long len = PAGE_SIZE - offset;
732 int retval;
733 struct page *pg;
734 void *maddr;
736 if (len > n)
737 len = n;
739 survive:
740 down_read(&current->mm->mmap_sem);
741 retval = get_user_pages(current, current->mm,
742 (unsigned long)to, 1, 1, 0, &pg, NULL);
744 if (retval == -ENOMEM && is_global_init(current)) {
745 up_read(&current->mm->mmap_sem);
746 congestion_wait(WRITE, HZ/50);
747 goto survive;
750 if (retval != 1) {
751 up_read(&current->mm->mmap_sem);
752 break;
755 maddr = kmap_atomic(pg, KM_USER0);
756 memcpy(maddr + offset, from, len);
757 kunmap_atomic(maddr, KM_USER0);
758 set_page_dirty_lock(pg);
759 put_page(pg);
760 up_read(&current->mm->mmap_sem);
762 from += len;
763 to += len;
764 n -= len;
766 return n;
768 #endif
769 if (movsl_is_ok(to, from, n))
770 __copy_user(to, from, n);
771 else
772 n = __copy_user_intel(to, from, n);
773 return n;
775 EXPORT_SYMBOL(__copy_to_user_ll);
777 unsigned long __copy_from_user_ll(void *to, const void __user *from,
778 unsigned long n)
780 if (movsl_is_ok(to, from, n))
781 __copy_user_zeroing(to, from, n);
782 else
783 n = __copy_user_zeroing_intel(to, from, n);
784 return n;
786 EXPORT_SYMBOL(__copy_from_user_ll);
788 unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
789 unsigned long n)
791 if (movsl_is_ok(to, from, n))
792 __copy_user(to, from, n);
793 else
794 n = __copy_user_intel((void __user *)to,
795 (const void *)from, n);
796 return n;
798 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
800 unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
801 unsigned long n)
803 #ifdef CONFIG_X86_INTEL_USERCOPY
804 if (n > 64 && cpu_has_xmm2)
805 n = __copy_user_zeroing_intel_nocache(to, from, n);
806 else
807 __copy_user_zeroing(to, from, n);
808 #else
809 __copy_user_zeroing(to, from, n);
810 #endif
811 return n;
813 EXPORT_SYMBOL(__copy_from_user_ll_nocache);
815 unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from,
816 unsigned long n)
818 #ifdef CONFIG_X86_INTEL_USERCOPY
819 if (n > 64 && cpu_has_xmm2)
820 n = __copy_user_intel_nocache(to, from, n);
821 else
822 __copy_user(to, from, n);
823 #else
824 __copy_user(to, from, n);
825 #endif
826 return n;
828 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
831 * copy_to_user: - Copy a block of data into user space.
832 * @to: Destination address, in user space.
833 * @from: Source address, in kernel space.
834 * @n: Number of bytes to copy.
836 * Context: User context only. This function may sleep.
838 * Copy data from kernel space to user space.
840 * Returns number of bytes that could not be copied.
841 * On success, this will be zero.
843 unsigned long
844 copy_to_user(void __user *to, const void *from, unsigned long n)
846 if (access_ok(VERIFY_WRITE, to, n))
847 n = __copy_to_user(to, from, n);
848 return n;
850 EXPORT_SYMBOL(copy_to_user);
853 * copy_from_user: - Copy a block of data from user space.
854 * @to: Destination address, in kernel space.
855 * @from: Source address, in user space.
856 * @n: Number of bytes to copy.
858 * Context: User context only. This function may sleep.
860 * Copy data from user space to kernel space.
862 * Returns number of bytes that could not be copied.
863 * On success, this will be zero.
865 * If some data could not be copied, this function will pad the copied
866 * data to the requested size using zero bytes.
868 unsigned long
869 copy_from_user(void *to, const void __user *from, unsigned long n)
871 if (access_ok(VERIFY_READ, from, n))
872 n = __copy_from_user(to, from, n);
873 else
874 memset(to, 0, n);
875 return n;
877 EXPORT_SYMBOL(copy_from_user);