2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1993 The Regents of the University of California.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 4. Neither the name of the University nor the names of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * $FreeBSD: src/sys/amd64/amd64/support.S,v 1.127 2007/05/23 08:33:04 kib Exp $
31 * $DragonFly: src/sys/platform/pc64/amd64/support.s,v 1.1 2007/09/23 04:29:31 yanyh Exp $
36 #include <machine/asmacros.h>
37 #include <machine/intr_machdep.h>
38 #include <machine/pmap.h>
43 .globl intrcnt, eintrcnt
45 .space INTRCNT_COUNT * 8
48 .globl intrnames, eintrnames
50 .space INTRCNT_COUNT * (MAXCOMLEN + 1)
57 * void bzero(void *buf, u_int len)
83 movnti
%rax
,(%rdi
,%rdx
)
84 movnti
%rax
,8(%rdi
,%rdx
)
85 movnti
%rax
,16(%rdi
,%rdx
)
86 movnti
%rax
,24(%rdi
,%rdx
)
95 cld
/* compare forwards */
110 * bcopy(src, dst, cnt)
112 * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
120 cmpq
%rcx
,%rax
/* overlapping && src < dst? */
123 shrq $
3,%rcx
/* copy by 64-bit words */
124 cld
/* nope, copy forwards */
128 andq $
7,%rcx
/* any bytes left? */
135 addq
%rcx
,%rdi
/* copy backwards */
139 andq $
7,%rcx
/* any fractional bytes? */
143 movq
%rdx
,%rcx
/* copy remainder by 32-bit words */
157 * Note: memcpy does not support overlapping copies
161 shrq $
3,%rcx
/* copy by 64-bit words */
162 cld
/* copy forwards */
166 andq $
7,%rcx
/* any bytes left? */
172 * pagecopy(%rdi=from, %rsi=to)
175 movq $
-PAGE_SIZE
,%rax
180 prefetchnta
(%rdi
,%rax
)
184 movq
(%rdi
,%rdx
),%rax
185 movnti
%rax
,(%rsi
,%rdx
)
186 movq
8(%rdi
,%rdx
),%rax
187 movnti
%rax
,8(%rsi
,%rdx
)
188 movq
16(%rdi
,%rdx
),%rax
189 movnti
%rax
,16(%rsi
,%rdx
)
190 movq
24(%rdi
,%rdx
),%rax
191 movnti
%rax
,24(%rsi
,%rdx
)
197 /* fillw(pat, base, cnt) */
198 /* %rdi,%rsi, %rdx */
208 /*****************************************************************************/
209 /* copyout and fubyte family */
210 /*****************************************************************************/
212 * Access user memory from inside the kernel. These routines should be
213 * the only places that do this.
215 * These routines set curpcb->onfault for the time they execute. When a
216 * protection violation occurs inside the functions, the trap handler
217 * returns to *curpcb->onfault instead of the function.
221 * copyout(from_kernel, to_user, len) - MP SAFE
225 movq PCPU
(curthread
),%rax
226 movq $copyout_fault
,PCB_ONFAULT
(%rax
)
227 testq
%rdx
,%rdx
/* anything to do? */
231 * Check explicitly for non-user addresses. If 486 write protection
232 * is being used, this check is essential because we are in kernel
233 * mode so the h/w does not provide any protection against writing
238 * First, prevent address wrapping.
244 * XXX STOP USING VM_MAXUSER_ADDRESS.
245 * It is an end address, not a max, so every time it is used correctly it
246 * looks like there is an off by one error, and of course it caused an off
247 * by one error in several places.
249 movq $VM_MAXUSER_ADDRESS
,%rcx
254 /* bcopy(%rsi, %rdi, %rdx) */
268 movq PCPU
(curthread
),%rdx
269 movq
%rax
,PCB_ONFAULT
(%rdx
)
274 movq PCPU
(curthread
),%rdx
275 movq $
0,PCB_ONFAULT
(%rdx
)
280 * copyin(from_user, to_kernel, len) - MP SAFE
284 movq PCPU
(curthread
),%rax
285 movq $copyin_fault
,PCB_ONFAULT
(%rax
)
286 testq
%rdx
,%rdx
/* anything to do? */
290 * make sure address is valid
295 movq $VM_MAXUSER_ADDRESS
,%rcx
302 shrq $
3,%rcx
/* copy longword-wise */
307 andb $
7,%cl
/* copy remaining bytes */
313 movq PCPU
(curthread
),%rdx
314 movq
%rax
,PCB_ONFAULT
(%rdx
)
319 movq PCPU
(curthread
),%rdx
320 movq $
0,PCB_ONFAULT
(%rdx
)
325 * casuword32. Compare and set user integer. Returns -1 or the current value.
326 * dst = %rdi, old = %rsi, new = %rdx
329 movq PCPU
(curthread
),%rcx
330 movq $fusufault
,PCB_ONFAULT
(%rcx
)
332 movq $VM_MAXUSER_ADDRESS-
4,%rax
333 cmpq
%rax
,%rdi
/* verify address is valid */
336 movl
%esi
,%eax
/* old */
340 cmpxchgl
%edx
,(%rdi
) /* new = %edx */
343 * The old value is in %eax. If the store succeeded it will be the
344 * value we expected (old) from before the store, otherwise it will
345 * be the current value.
348 movq PCPU
(curthread
),%rcx
349 movq $
0,PCB_ONFAULT
(%rcx
)
353 * casuword. Compare and set user word. Returns -1 or the current value.
354 * dst = %rdi, old = %rsi, new = %rdx
357 movq PCPU
(curthread
),%rcx
358 movq $fusufault
,PCB_ONFAULT
(%rcx
)
360 movq $VM_MAXUSER_ADDRESS-
4,%rax
361 cmpq
%rax
,%rdi
/* verify address is valid */
364 movq
%rsi
,%rax
/* old */
368 cmpxchgq
%rdx
,(%rdi
) /* new = %rdx */
371 * The old value is in %eax. If the store succeeded it will be the
372 * value we expected (old) from before the store, otherwise it will
373 * be the current value.
376 movq PCPU
(curthread
),%rcx
377 movq $fusufault
,PCB_ONFAULT
(%rcx
)
378 movq $
0,PCB_ONFAULT
(%rcx
)
382 * Fetch (load) a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit
383 * byte from user memory. All these functions are MPSAFE.
389 movq PCPU
(curthread
),%rcx
390 movq $fusufault
,PCB_ONFAULT
(%rcx
)
392 movq $VM_MAXUSER_ADDRESS-
8,%rax
393 cmpq
%rax
,%rdi
/* verify address is valid */
397 movq $
0,PCB_ONFAULT
(%rcx
)
401 movq PCPU
(curthread
),%rcx
402 movq $fusufault
,PCB_ONFAULT
(%rcx
)
404 movq $VM_MAXUSER_ADDRESS-
4,%rax
405 cmpq
%rax
,%rdi
/* verify address is valid */
409 movq $
0,PCB_ONFAULT
(%rcx
)
413 * fuswintr() and suswintr() are specialized variants of fuword16() and
414 * suword16(), respectively. They are called from the profiling code,
415 * potentially at interrupt time. If they fail, that's okay; good things
416 * will happen later. They always fail for now, until the trap code is
417 * able to deal with this.
425 movq PCPU
(curthread
),%rcx
426 movq $fusufault
,PCB_ONFAULT
(%rcx
)
428 movq $VM_MAXUSER_ADDRESS-
2,%rax
433 movq $
0,PCB_ONFAULT
(%rcx
)
437 movq PCPU
(curthread
),%rcx
438 movq $fusufault
,PCB_ONFAULT
(%rcx
)
440 movq $VM_MAXUSER_ADDRESS-
1,%rax
445 movq $
0,PCB_ONFAULT
(%rcx
)
450 movq PCPU
(curthread
),%rcx
452 movq
%rax
,PCB_ONFAULT
(%rcx
)
457 * Store a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit byte to
458 * user memory. All these functions are MPSAFE.
459 * addr = %rdi, value = %rsi
463 movq PCPU
(curthread
),%rcx
464 movq $fusufault
,PCB_ONFAULT
(%rcx
)
466 movq $VM_MAXUSER_ADDRESS-
8,%rax
467 cmpq
%rax
,%rdi
/* verify address validity */
472 movq PCPU
(curthread
),%rcx
473 movq
%rax
,PCB_ONFAULT
(%rcx
)
477 movq PCPU
(curthread
),%rcx
478 movq $fusufault
,PCB_ONFAULT
(%rcx
)
480 movq $VM_MAXUSER_ADDRESS-
4,%rax
481 cmpq
%rax
,%rdi
/* verify address validity */
486 movq PCPU
(curthread
),%rcx
487 movq
%rax
,PCB_ONFAULT
(%rcx
)
491 movq PCPU
(curthread
),%rcx
492 movq $fusufault
,PCB_ONFAULT
(%rcx
)
494 movq $VM_MAXUSER_ADDRESS-
2,%rax
495 cmpq
%rax
,%rdi
/* verify address validity */
500 movq PCPU
(curthread
),%rcx
/* restore trashed register */
501 movq
%rax
,PCB_ONFAULT
(%rcx
)
505 movq PCPU
(curthread
),%rcx
506 movq $fusufault
,PCB_ONFAULT
(%rcx
)
508 movq $VM_MAXUSER_ADDRESS-
1,%rax
509 cmpq
%rax
,%rdi
/* verify address validity */
515 movq PCPU
(curthread
),%rcx
/* restore trashed register */
516 movq
%rax
,PCB_ONFAULT
(%rcx
)
520 * copyinstr(from, to, maxlen, int *lencopied) - MP SAFE
521 * %rdi, %rsi, %rdx, %rcx
523 * copy a string from from to to, stop when a 0 character is reached.
524 * return ENAMETOOLONG if string is longer than maxlen, and
525 * EFAULT on protection violations. If lencopied is non-zero,
526 * return the actual length in *lencopied.
529 movq
%rdx
,%r8 /* %r8 = maxlen */
530 movq
%rcx
,%r9 /* %r9 = *len */
531 xchgq
%rdi
,%rsi
/* %rdi = from, %rsi = to */
532 movq PCPU
(curthread
),%rcx
533 movq $cpystrflt
,PCB_ONFAULT
(%rcx
)
535 movq $VM_MAXUSER_ADDRESS
,%rax
537 /* make sure 'from' is within bounds */
541 /* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
559 /* Success -- 0 byte reached */
564 /* rdx is zero - return ENAMETOOLONG or EFAULT */
565 movq $VM_MAXUSER_ADDRESS
,%rax
569 movq $ENAMETOOLONG
,%rax
576 /* set *lencopied and return %eax */
577 movq PCPU
(curthread
),%rcx
578 movq $
0,PCB_ONFAULT
(%rcx
)
589 * copystr(from, to, maxlen, int *lencopied) - MP SAFE
590 * %rdi, %rsi, %rdx, %rcx
593 movq
%rdx
,%r8 /* %r8 = maxlen */
606 /* Success -- 0 byte reached */
611 /* rdx is zero -- return ENAMETOOLONG */
612 movq $ENAMETOOLONG
,%rax
618 /* set *lencopied and return %rax */
625 * Handling of special amd64 registers and descriptor tables etc
628 /* void lgdt(struct region_descriptor *rdp); */
630 /* reload the descriptor table */
633 /* flush the prefetch q */
640 movl
%eax
,%fs /* Beware, use wrmsr to set 64 bit base */
644 /* reload code selector by turning return into intersegmental return */
651 /*****************************************************************************/
652 /* setjump, longjump */
653 /*****************************************************************************/
656 movq
%rbx
,0(%rdi
) /* save rbx */
657 movq
%rsp
,8(%rdi
) /* save rsp */
658 movq
%rbp
,16(%rdi
) /* save rbp */
659 movq
%r12,24(%rdi
) /* save r12 */
660 movq
%r13,32(%rdi
) /* save r13 */
661 movq
%r14,40(%rdi
) /* save r14 */
662 movq
%r15,48(%rdi
) /* save r15 */
663 movq
0(%rsp
),%rdx
/* get rta */
664 movq
%rdx
,56(%rdi
) /* save rip */
665 xorl
%eax
,%eax
/* return(0); */
669 movq
0(%rdi
),%rbx
/* restore rbx */
670 movq
8(%rdi
),%rsp
/* restore rsp */
671 movq
16(%rdi
),%rbp
/* restore rbp */
672 movq
24(%rdi
),%r12 /* restore r12 */
673 movq
32(%rdi
),%r13 /* restore r13 */
674 movq
40(%rdi
),%r14 /* restore r14 */
675 movq
48(%rdi
),%r15 /* restore r15 */
676 movq
56(%rdi
),%rdx
/* get rta */
677 movq
%rdx
,0(%rsp
) /* put in return frame */
678 xorl
%eax
,%eax
/* return(1); */
683 * Support for BB-profiling (gcc -a). The kernbb program will extract
684 * the data from the kernel.
694 NON_GPROF_ENTRY
(__bb_init_func
)