kernel - More threaded core dump fixes
[dragonfly.git] / sys / platform / pc64 / x86_64 / support.s
blob672c599c3b5a7408e12841bd45bba82b8c0457fa
1 /*-
2 * Copyright (c) 1993 The Regents of the University of California.
3 * Copyright (c) 2003 Peter Wemm.
4 * Copyright (c) 2008 The DragonFly Project.
5 * All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
31 * $FreeBSD: src/sys/amd64/amd64/support.S,v 1.127 2007/05/23 08:33:04 kib Exp $
34 #include <machine/asmacros.h>
35 #include <machine/pmap.h>
37 #include "assym.s"
39 ALIGN_DATA
41 .text
44 * bzero(ptr:%rdi, bytes:%rsi)
46 * Using rep stosq is 70% faster than a %rax loop and almost as fast as
47 * a %xmm0 loop on a modern intel cpu.
49 * Do not use non-termportal instructions here as we do not know the caller's
50 * intent.
52 ENTRY(bzero)
53 movq %rsi,%rcx
54 xorl %eax,%eax
55 shrq $3,%rcx
56 cld
57 rep
58 stosq
59 movq %rsi,%rcx
60 andq $7,%rcx
61 rep
62 stosb
63 ret
66 * pagezero(ptr:%rdi)
68 * Using rep stosq is nearly as fast as using %xmm0 on a modern intel cpu,
69 * and about 70% faster than a %rax loop.
71 * Do not use non-termportal instructions here as we do not know the caller's
72 * intent.
74 ENTRY(pagezero)
75 movq $PAGE_SIZE>>3,%rcx
76 xorl %eax,%eax
77 cld
78 rep
79 stosq
80 ret
83 * bcmp(ptr:%rdi, ptr:%rsi, bytes:%rdx)
85 ENTRY(bcmp)
86 movq %rdx,%rcx
87 shrq $3,%rcx
88 cld /* compare forwards */
89 repe
90 cmpsq
91 jne 1f
93 movq %rdx,%rcx
94 andq $7,%rcx
95 repe
96 cmpsb
98 setne %al
99 movsbl %al,%eax
103 * bcopy(src:%rdi, dst:%rsi, cnt:%rdx)
105 * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
107 ENTRY(bcopy)
108 xchgq %rsi,%rdi
109 movq %rdx,%rcx
111 movq %rdi,%rax
112 subq %rsi,%rax
113 cmpq %rcx,%rax /* overlapping && src < dst? */
114 jb 1f
116 shrq $3,%rcx /* copy by 64-bit words */
117 cld /* nope, copy forwards */
119 movsq
120 movq %rdx,%rcx
121 andq $7,%rcx /* any bytes left? */
123 movsb
126 ALIGN_TEXT
128 addq %rcx,%rdi /* copy backwards */
129 addq %rcx,%rsi
130 decq %rdi
131 decq %rsi
132 andq $7,%rcx /* any fractional bytes? */
135 movsb
136 movq %rdx,%rcx /* copy by 32-bit words */
137 shrq $3,%rcx
138 subq $7,%rsi
139 subq $7,%rdi
141 movsq
145 ENTRY(reset_dbregs)
146 movq $0x200,%rax /* the manual says that bit 10 must be set to 1 */
147 movq %rax,%dr7 /* disable all breapoints first */
148 movq $0,%rax
149 movq %rax,%dr0
150 movq %rax,%dr1
151 movq %rax,%dr2
152 movq %rax,%dr3
153 movq %rax,%dr6
157 * memcpy(dst:%rdi, src:%rsi, bytes:%rdx)
159 * Note: memcpy does not support overlapping copies
161 ENTRY(memcpy)
162 movq %rdi,%r8
163 movq %rdx,%rcx
164 shrq $3,%rcx /* copy by 64-bit words */
165 cld /* copy forwards */
167 movsq
168 movq %rdx,%rcx
169 andq $7,%rcx /* any bytes left? */
171 movsb
172 movq %r8,%rax
175 /* fillw(pat, base, cnt) */
176 /* %rdi,%rsi, %rdx */
177 ENTRY(fillw)
178 movq %rdi,%rax
179 movq %rsi,%rdi
180 movq %rdx,%rcx
183 stosw
186 /*****************************************************************************/
187 /* copyout and fubyte family */
188 /*****************************************************************************/
190 * Access user memory from inside the kernel. These routines should be
191 * the only places that do this.
193 * These routines set curpcb->onfault for the time they execute. When a
194 * protection violation occurs inside the functions, the trap handler
195 * returns to *curpcb->onfault instead of the function.
199 * std_copyout(from_kernel, to_user, len) - MP SAFE
200 * %rdi, %rsi, %rdx
202 ENTRY(std_copyout)
203 movq PCPU(curthread),%rax
204 movq TD_PCB(%rax), %rax
205 movq $copyout_fault,PCB_ONFAULT(%rax)
206 movq %rsp,PCB_ONFAULT_SP(%rax)
207 testq %rdx,%rdx /* anything to do? */
208 jz done_copyout
211 * Check explicitly for non-user addresses. If 486 write protection
212 * is being used, this check is essential because we are in kernel
213 * mode so the h/w does not provide any protection against writing
214 * kernel addresses.
218 * First, prevent address wrapping.
220 movq %rsi,%rax
221 addq %rdx,%rax
222 jc copyout_fault
224 * XXX STOP USING VM_MAX_USER_ADDRESS.
225 * It is an end address, not a max, so every time it is used correctly it
226 * looks like there is an off by one error, and of course it caused an off
227 * by one error in several places.
229 movq $VM_MAX_USER_ADDRESS,%rcx
230 cmpq %rcx,%rax
231 ja copyout_fault
233 xchgq %rdi,%rsi
234 /* bcopy(%rsi, %rdi, %rdx) */
235 movq %rdx,%rcx
237 shrq $3,%rcx
240 movsq
241 movb %dl,%cl
242 andb $7,%cl
244 movsb
246 done_copyout:
247 xorl %eax,%eax
248 movq PCPU(curthread),%rdx
249 movq TD_PCB(%rdx), %rdx
250 movq %rax,PCB_ONFAULT(%rdx)
253 ALIGN_TEXT
254 copyout_fault:
255 movq PCPU(curthread),%rdx
256 movq TD_PCB(%rdx), %rdx
257 movq $0,PCB_ONFAULT(%rdx)
258 movq $EFAULT,%rax
262 * std_copyin(from_user, to_kernel, len) - MP SAFE
263 * %rdi, %rsi, %rdx
265 ENTRY(std_copyin)
266 movq PCPU(curthread),%rax
267 movq TD_PCB(%rax), %rax
268 movq $copyin_fault,PCB_ONFAULT(%rax)
269 movq %rsp,PCB_ONFAULT_SP(%rax)
270 testq %rdx,%rdx /* anything to do? */
271 jz done_copyin
274 * make sure address is valid
276 movq %rdi,%rax
277 addq %rdx,%rax
278 jc copyin_fault
279 movq $VM_MAX_USER_ADDRESS,%rcx
280 cmpq %rcx,%rax
281 ja copyin_fault
283 xchgq %rdi,%rsi
284 movq %rdx,%rcx
285 movb %cl,%al
286 shrq $3,%rcx /* copy longword-wise */
289 movsq
290 movb %al,%cl
291 andb $7,%cl /* copy remaining bytes */
293 movsb
295 done_copyin:
296 xorl %eax,%eax
297 movq PCPU(curthread),%rdx
298 movq TD_PCB(%rdx), %rdx
299 movq %rax,PCB_ONFAULT(%rdx)
302 ALIGN_TEXT
303 copyin_fault:
304 movq PCPU(curthread),%rdx
305 movq TD_PCB(%rdx), %rdx
306 movq $0,PCB_ONFAULT(%rdx)
307 movq $EFAULT,%rax
311 * casuword32. Compare and set user integer. Returns -1 or the current value.
312 * dst = %rdi, old = %rsi, new = %rdx
314 ENTRY(casuword32)
315 movq PCPU(curthread),%rcx
316 movq TD_PCB(%rcx), %rcx
317 movq $fusufault,PCB_ONFAULT(%rcx)
318 movq %rsp,PCB_ONFAULT_SP(%rcx)
320 movq $VM_MAX_USER_ADDRESS-4,%rax
321 cmpq %rax,%rdi /* verify address is valid */
322 ja fusufault
324 movl %esi,%eax /* old */
325 lock
326 cmpxchgl %edx,(%rdi) /* new = %edx */
329 * The old value is in %eax. If the store succeeded it will be the
330 * value we expected (old) from before the store, otherwise it will
331 * be the current value.
334 movq PCPU(curthread),%rcx
335 movq TD_PCB(%rcx), %rcx
336 movq $0,PCB_ONFAULT(%rcx)
340 * casuword. Compare and set user word. Returns -1 or the current value.
341 * dst = %rdi, old = %rsi, new = %rdx
343 ENTRY(casuword)
344 movq PCPU(curthread),%rcx
345 movq TD_PCB(%rcx), %rcx
346 movq $fusufault,PCB_ONFAULT(%rcx)
347 movq %rsp,PCB_ONFAULT_SP(%rcx)
349 movq $VM_MAX_USER_ADDRESS-8,%rax
350 cmpq %rax,%rdi /* verify address is valid */
351 ja fusufault
353 movq %rsi,%rax /* old */
354 lock
355 cmpxchgq %rdx,(%rdi) /* new = %rdx */
358 * The old value is in %rax. If the store succeeded it will be the
359 * value we expected (old) from before the store, otherwise it will
360 * be the current value.
363 movq PCPU(curthread),%rcx
364 movq TD_PCB(%rcx), %rcx
365 movq $0,PCB_ONFAULT(%rcx)
369 * Fetch (load) a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit
370 * byte from user memory. All these functions are MPSAFE.
371 * addr = %rdi
374 ALTENTRY(fuword64)
375 ENTRY(std_fuword)
376 movq PCPU(curthread),%rcx
377 movq TD_PCB(%rcx), %rcx
378 movq $fusufault,PCB_ONFAULT(%rcx)
379 movq %rsp,PCB_ONFAULT_SP(%rcx)
381 movq $VM_MAX_USER_ADDRESS-8,%rax
382 cmpq %rax,%rdi /* verify address is valid */
383 ja fusufault
385 movq (%rdi),%rax
386 movq $0,PCB_ONFAULT(%rcx)
389 ENTRY(fuword32)
390 movq PCPU(curthread),%rcx
391 movq TD_PCB(%rcx), %rcx
392 movq $fusufault,PCB_ONFAULT(%rcx)
393 movq %rsp,PCB_ONFAULT_SP(%rcx)
395 movq $VM_MAX_USER_ADDRESS-4,%rax
396 cmpq %rax,%rdi /* verify address is valid */
397 ja fusufault
399 movl (%rdi),%eax
400 movq $0,PCB_ONFAULT(%rcx)
404 * fuswintr() and suswintr() are specialized variants of fuword16() and
405 * suword16(), respectively. They are called from the profiling code,
406 * potentially at interrupt time. If they fail, that's okay; good things
407 * will happen later. They always fail for now, until the trap code is
408 * able to deal with this.
410 ALTENTRY(suswintr)
411 ENTRY(fuswintr)
412 movq $-1,%rax
415 ENTRY(fuword16)
416 movq PCPU(curthread),%rcx
417 movq TD_PCB(%rcx), %rcx
418 movq $fusufault,PCB_ONFAULT(%rcx)
419 movq %rsp,PCB_ONFAULT_SP(%rcx)
421 movq $VM_MAX_USER_ADDRESS-2,%rax
422 cmpq %rax,%rdi
423 ja fusufault
425 movzwl (%rdi),%eax
426 movq $0,PCB_ONFAULT(%rcx)
429 ENTRY(std_fubyte)
430 movq PCPU(curthread),%rcx
431 movq TD_PCB(%rcx), %rcx
432 movq $fusufault,PCB_ONFAULT(%rcx)
433 movq %rsp,PCB_ONFAULT_SP(%rcx)
435 movq $VM_MAX_USER_ADDRESS-1,%rax
436 cmpq %rax,%rdi
437 ja fusufault
439 movzbl (%rdi),%eax
440 movq $0,PCB_ONFAULT(%rcx)
443 ALIGN_TEXT
444 fusufault:
445 movq PCPU(curthread),%rcx
446 xorl %eax,%eax
447 movq TD_PCB(%rcx), %rcx
448 movq %rax,PCB_ONFAULT(%rcx)
449 decq %rax
453 * Store a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit byte to
454 * user memory. All these functions are MPSAFE.
456 * addr = %rdi, value = %rsi
458 * Write a long
460 ALTENTRY(suword64)
461 ENTRY(std_suword)
462 movq PCPU(curthread),%rcx
463 movq TD_PCB(%rcx), %rcx
464 movq $fusufault,PCB_ONFAULT(%rcx)
465 movq %rsp,PCB_ONFAULT_SP(%rcx)
467 movq $VM_MAX_USER_ADDRESS-8,%rax
468 cmpq %rax,%rdi /* verify address validity */
469 ja fusufault
471 movq %rsi,(%rdi)
472 xorl %eax,%eax
473 movq PCPU(curthread),%rcx
474 movq TD_PCB(%rcx), %rcx
475 movq %rax,PCB_ONFAULT(%rcx)
479 * Write an int
481 ENTRY(std_suword32)
482 movq PCPU(curthread),%rcx
483 movq TD_PCB(%rcx), %rcx
484 movq $fusufault,PCB_ONFAULT(%rcx)
485 movq %rsp,PCB_ONFAULT_SP(%rcx)
487 movq $VM_MAX_USER_ADDRESS-4,%rax
488 cmpq %rax,%rdi /* verify address validity */
489 ja fusufault
491 movl %esi,(%rdi)
492 xorl %eax,%eax
493 movq PCPU(curthread),%rcx
494 movq TD_PCB(%rcx), %rcx
495 movq %rax,PCB_ONFAULT(%rcx)
498 ENTRY(suword16)
499 movq PCPU(curthread),%rcx
500 movq TD_PCB(%rcx), %rcx
501 movq $fusufault,PCB_ONFAULT(%rcx)
502 movq %rsp,PCB_ONFAULT_SP(%rcx)
504 movq $VM_MAX_USER_ADDRESS-2,%rax
505 cmpq %rax,%rdi /* verify address validity */
506 ja fusufault
508 movw %si,(%rdi)
509 xorl %eax,%eax
510 movq PCPU(curthread),%rcx /* restore trashed register */
511 movq TD_PCB(%rcx), %rcx
512 movq %rax,PCB_ONFAULT(%rcx)
515 ENTRY(std_subyte)
516 movq PCPU(curthread),%rcx
517 movq TD_PCB(%rcx), %rcx
518 movq $fusufault,PCB_ONFAULT(%rcx)
519 movq %rsp,PCB_ONFAULT_SP(%rcx)
521 movq $VM_MAX_USER_ADDRESS-1,%rax
522 cmpq %rax,%rdi /* verify address validity */
523 ja fusufault
525 movl %esi,%eax
526 movb %al,(%rdi)
527 xorl %eax,%eax
528 movq PCPU(curthread),%rcx /* restore trashed register */
529 movq TD_PCB(%rcx), %rcx
530 movq %rax,PCB_ONFAULT(%rcx)
534 * std_copyinstr(from, to, maxlen, int *lencopied) - MP SAFE
535 * %rdi, %rsi, %rdx, %rcx
537 * copy a string from from to to, stop when a 0 character is reached.
538 * return ENAMETOOLONG if string is longer than maxlen, and
539 * EFAULT on protection violations. If lencopied is non-zero,
540 * return the actual length in *lencopied.
542 ENTRY(std_copyinstr)
543 movq %rdx,%r8 /* %r8 = maxlen */
544 movq %rcx,%r9 /* %r9 = *len */
545 xchgq %rdi,%rsi /* %rdi = from, %rsi = to */
546 movq PCPU(curthread),%rcx
547 movq TD_PCB(%rcx), %rcx
548 movq $cpystrflt,PCB_ONFAULT(%rcx)
549 movq %rsp,PCB_ONFAULT_SP(%rcx)
551 movq $VM_MAX_USER_ADDRESS,%rax
553 /* make sure 'from' is within bounds */
554 subq %rsi,%rax
555 jbe cpystrflt
557 /* restrict maxlen to <= VM_MAX_USER_ADDRESS-from */
558 cmpq %rdx,%rax
559 jae 1f
560 movq %rax,%rdx
561 movq %rax,%r8
563 incq %rdx
567 decq %rdx
568 jz 3f
570 lodsb
571 stosb
572 orb %al,%al
573 jnz 2b
575 /* Success -- 0 byte reached */
576 decq %rdx
577 xorl %eax,%eax
578 jmp cpystrflt_x
580 /* rdx is zero - return ENAMETOOLONG or EFAULT */
581 movq $VM_MAX_USER_ADDRESS,%rax
582 cmpq %rax,%rsi
583 jae cpystrflt
585 movq $ENAMETOOLONG,%rax
586 jmp cpystrflt_x
588 cpystrflt:
589 movq $EFAULT,%rax
591 cpystrflt_x:
592 /* set *lencopied and return %eax */
593 movq PCPU(curthread),%rcx
594 movq TD_PCB(%rcx), %rcx
595 movq $0,PCB_ONFAULT(%rcx)
597 testq %r9,%r9
598 jz 1f
599 subq %rdx,%r8
600 movq %r8,(%r9)
606 * copystr(from, to, maxlen, int *lencopied) - MP SAFE
607 * %rdi, %rsi, %rdx, %rcx
609 ENTRY(copystr)
610 movq %rdx,%r8 /* %r8 = maxlen */
612 xchgq %rdi,%rsi
613 incq %rdx
616 decq %rdx
617 jz 4f
618 lodsb
619 stosb
620 orb %al,%al
621 jnz 1b
623 /* Success -- 0 byte reached */
624 decq %rdx
625 xorl %eax,%eax
626 jmp 6f
628 /* rdx is zero -- return ENAMETOOLONG */
629 movq $ENAMETOOLONG,%rax
633 testq %rcx,%rcx
634 jz 7f
635 /* set *lencopied and return %rax */
636 subq %rdx,%r8
637 movq %r8,(%rcx)
642 * Handling of special x86_64 registers and descriptor tables etc
643 * %rdi
645 /* void lgdt(struct region_descriptor *rdp); */
646 ENTRY(lgdt)
647 /* reload the descriptor table */
648 lgdt (%rdi)
650 /* flush the prefetch q */
651 jmp 1f
654 movl $KDSEL,%eax
655 movl %eax,%ds
656 movl %eax,%es
657 movl %eax,%fs /* Beware, use wrmsr to set 64 bit base */
658 movl %eax,%gs /* Beware, use wrmsr to set 64 bit base */
659 movl %eax,%ss
661 /* reload code selector by turning return into intersegmental return */
662 popq %rax
663 pushq $KCSEL
664 pushq %rax
665 MEXITCOUNT
666 lretq
668 /*****************************************************************************/
669 /* setjmp, longjmp */
670 /*****************************************************************************/
672 ENTRY(setjmp)
673 movq %rbx,0(%rdi) /* save rbx */
674 movq %rsp,8(%rdi) /* save rsp */
675 movq %rbp,16(%rdi) /* save rbp */
676 movq %r12,24(%rdi) /* save r12 */
677 movq %r13,32(%rdi) /* save r13 */
678 movq %r14,40(%rdi) /* save r14 */
679 movq %r15,48(%rdi) /* save r15 */
680 movq 0(%rsp),%rdx /* get rta */
681 movq %rdx,56(%rdi) /* save rip */
682 xorl %eax,%eax /* return(0); */
685 ENTRY(longjmp)
686 movq 0(%rdi),%rbx /* restore rbx */
687 movq 8(%rdi),%rsp /* restore rsp */
688 movq 16(%rdi),%rbp /* restore rbp */
689 movq 24(%rdi),%r12 /* restore r12 */
690 movq 32(%rdi),%r13 /* restore r13 */
691 movq 40(%rdi),%r14 /* restore r14 */
692 movq 48(%rdi),%r15 /* restore r15 */
693 movq 56(%rdi),%rdx /* get rta */
694 movq %rdx,0(%rsp) /* put in return frame */
695 xorl %eax,%eax /* return(1); */
696 incl %eax
700 * Support for reading MSRs in the safe manner.
702 ENTRY(rdmsr_safe)
703 /* int rdmsr_safe(u_int msr, uint64_t *data) */
704 movq PCPU(curthread),%r8
705 movq TD_PCB(%r8), %r8
706 movq $msr_onfault,PCB_ONFAULT(%r8)
707 movq %rsp,PCB_ONFAULT_SP(%r8)
708 movl %edi,%ecx
709 rdmsr /* Read MSR pointed by %ecx. Returns
710 hi byte in edx, lo in %eax */
711 salq $32,%rdx /* sign-shift %rdx left */
712 movl %eax,%eax /* zero-extend %eax -> %rax */
713 orq %rdx,%rax
714 movq %rax,(%rsi)
715 xorq %rax,%rax
716 movq %rax,PCB_ONFAULT(%r8)
720 * Support for writing MSRs in the safe manner.
722 ENTRY(wrmsr_safe)
723 /* int wrmsr_safe(u_int msr, uint64_t data) */
724 movq PCPU(curthread),%r8
725 movq TD_PCB(%r8), %r8
726 movq $msr_onfault,PCB_ONFAULT(%r8)
727 movq %rsp,PCB_ONFAULT_SP(%rcx)
728 movl %edi,%ecx
729 movl %esi,%eax
730 sarq $32,%rsi
731 movl %esi,%edx
732 wrmsr /* Write MSR pointed by %ecx. Accepts
733 hi byte in edx, lo in %eax. */
734 xorq %rax,%rax
735 movq %rax,PCB_ONFAULT(%r8)
739 * MSR operations fault handler
741 ALIGN_TEXT
742 msr_onfault:
743 movq PCPU(curthread),%r8
744 movq TD_PCB(%r8), %r8
745 movq $0,PCB_ONFAULT(%r8)
746 movl $EFAULT,%eax
750 * Support for BB-profiling (gcc -a). The kernbb program will extract
751 * the data from the kernel.
754 .data
755 ALIGN_DATA
756 .globl bbhead
757 bbhead:
758 .quad 0
760 .text
761 NON_GPROF_ENTRY(__bb_init_func)
762 movq $1,(%rdi)
763 movq bbhead,%rax
764 movq %rax,32(%rdi)
765 movq %rdi,bbhead
766 NON_GPROF_RET