acpi: Narrow workaround for broken interrupt settings
[dragonfly.git] / sys / platform / pc64 / x86_64 / support.s
blob881cddc3cb0d5129b269fa19d8fd2ccbd54eb33d
1 /*-
2 * Copyright (c) 1993 The Regents of the University of California.
3 * Copyright (c) 2003 Peter Wemm.
4 * Copyright (c) 2008 The DragonFly Project.
5 * Copyright (c) 2008-2020 The DragonFly Project.
6 * All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
32 * $FreeBSD: src/sys/amd64/amd64/support.S,v 1.127 2007/05/23 08:33:04 kib Exp $
35 #include <machine/asmacros.h>
36 #include <machine/asm_mjgmacros.h>
37 #include <machine/pmap.h>
39 #include "assym.s"
41 ALIGN_DATA
43 .text
46 * bzero(ptr:%rdi, bytes:%rsi)
48 * Using rep stosq is 70% faster than a %rax loop and almost as fast as
49 * a %xmm0 loop on a modern intel cpu.
51 * Do not use non-termportal instructions here as we do not know the caller's
52 * intent.
54 ENTRY(bzero)
55 subq %r10,%r10
56 movq %rsi,%rdx
57 MEMSET erms=0 end=ret
58 END(bzero)
60 .weak _bzero
61 .equ _bzero, bzero
64 * void *memset(ptr:%rdi, char:%rsi, bytes:%rdx)
66 * Same as bzero except we load the char into all byte
67 * positions of %rax. Returns original (ptr).
69 ENTRY(memset)
70 movzbq %sil,%r8
71 movabs $0x0101010101010101,%r10
72 imulq %r8,%r10
73 MEMSET erms=0 end=ret
74 END(memset)
76 .weak _memset
77 .equ _memset, memset
80 * pagezero(ptr:%rdi)
82 * Modern intel and AMD cpus do a good job with rep stosq on page-sized
83 * blocks. The cross-point on intel is at the 256 byte mark and on AMD
84 * it is around the 1024 byte mark. With large counts, rep stosq will
85 * internally use non-termporal instructions and a cache sync at the end.
87 #if 1
89 ENTRY(pagezero)
90 movq $PAGE_SIZE>>3,%rcx
91 xorl %eax,%eax
92 rep
93 stosq
94 ret
95 END(pagezero)
97 #else
99 ENTRY(pagezero)
100 addq $4096,%rdi
101 movq $-4096,%rax
102 ALIGN_TEXT
104 movq $0,(%rdi,%rax,1)
105 movq $0,8(%rdi,%rax,1)
106 addq $16,%rax
107 jne 1b
109 END(pagezero)
111 #endif
114 * bcopy(src:%rdi, dst:%rsi, cnt:%rdx)
116 * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
118 ENTRY(bcopy)
119 xchgq %rsi,%rdi
120 MEMMOVE erms=0 overlap=1 end=ret
121 END(bcopy)
124 * Use in situations where a bcopy function pointer is needed.
126 .weak _bcopy
127 .equ _bcopy, bcopy
130 * memmove(dst:%rdi, src:%rsi, cnt:%rdx)
131 * (same as bcopy but without the xchgq, and must return (dst)).
133 * NOTE: gcc builtin backs-off to memmove() call
134 * NOTE: returns dst
136 ENTRY(memmove)
137 movq %rdi,%rax
138 MEMMOVE erms=0 overlap=1 end=ret
139 END(memmove)
141 .weak _memmove
142 .equ _memmove, memmove
144 ENTRY(reset_dbregs)
145 movq $0x200,%rax /* the manual says that bit 10 must be set to 1 */
146 movq %rax,%dr7 /* disable all breapoints first */
147 movq $0,%rax
148 movq %rax,%dr0
149 movq %rax,%dr1
150 movq %rax,%dr2
151 movq %rax,%dr3
152 movq %rax,%dr6
154 END(reset_dbregs)
157 * memcpy(dst:%rdi, src:%rsi, bytes:%rdx)
159 * NOTE: memcpy does not support overlapping copies
160 * NOTE: returns dst
162 ENTRY(memcpy)
163 movq %rdi,%rax
164 MEMMOVE erms=0 overlap=0 end=ret
165 END(memcpy)
167 .weak _memcpy
168 .equ _memcpy, memcpy
170 /* fillw(pat, base, cnt) */
171 /* %rdi,%rsi, %rdx */
172 ENTRY(fillw)
173 movq %rdi,%rax
174 movq %rsi,%rdi
175 movq %rdx,%rcx
177 stosw
179 END(fillw)
181 /*****************************************************************************/
182 /* copyout and fubyte family */
183 /*****************************************************************************/
185 * Access user memory from inside the kernel. These routines should be
186 * the only places that do this.
188 * These routines set curpcb->onfault for the time they execute. When a
189 * protection violation occurs inside the functions, the trap handler
190 * returns to *curpcb->onfault instead of the function.
194 * uint64_t:%rax kreadmem64(addr:%rdi)
196 * Read kernel or user memory with fault protection.
198 ENTRY(kreadmem64)
199 SMAP_OPEN
200 movq PCPU(curthread),%rcx
201 movq TD_PCB(%rcx), %rcx
202 movq $kreadmem64fault,PCB_ONFAULT(%rcx)
203 movq %rsp,PCB_ONFAULT_SP(%rcx)
204 movq (%rdi),%rax
205 movq $0,PCB_ONFAULT(%rcx)
206 SMAP_CLOSE
209 kreadmem64fault:
210 SMAP_CLOSE
211 movq PCPU(curthread),%rcx
212 xorl %eax,%eax
213 movq TD_PCB(%rcx),%rcx
214 movq %rax,PCB_ONFAULT(%rcx)
215 decq %rax
217 END(kreadmem64)
219 .macro COPYOUT_END
220 jmp done_copyout
222 .endm
225 * std_copyout(from_kernel, to_user, len) - MP SAFE
226 * %rdi, %rsi, %rdx
228 ENTRY(std_copyout)
229 SMAP_OPEN
230 movq PCPU(curthread),%rax
231 movq TD_PCB(%rax), %rax
232 movq $copyout_fault,PCB_ONFAULT(%rax)
233 movq %rsp,PCB_ONFAULT_SP(%rax)
234 testq %rdx,%rdx /* anything to do? */
235 jz done_copyout
238 * Check explicitly for non-user addresses. If 486 write protection
239 * is being used, this check is essential because we are in kernel
240 * mode so the h/w does not provide any protection against writing
241 * kernel addresses.
245 * First, prevent address wrapping.
247 movq %rsi,%rax
248 addq %rdx,%rax
249 jc copyout_fault
251 * XXX STOP USING VM_MAX_USER_ADDRESS.
252 * It is an end address, not a max, so every time it is used correctly it
253 * looks like there is an off by one error, and of course it caused an off
254 * by one error in several places.
256 movq $VM_MAX_USER_ADDRESS,%rcx
257 cmpq %rcx,%rax
258 ja copyout_fault
260 xchgq %rdi,%rsi
261 MEMMOVE erms=0 overlap=0 end=COPYOUT_END
263 done_copyout:
264 SMAP_CLOSE
265 xorl %eax,%eax
266 movq PCPU(curthread),%rdx
267 movq TD_PCB(%rdx), %rdx
268 movq %rax,PCB_ONFAULT(%rdx)
271 ALIGN_TEXT
272 copyout_fault:
273 SMAP_CLOSE
274 movq PCPU(curthread),%rdx
275 movq TD_PCB(%rdx), %rdx
276 movq $0,PCB_ONFAULT(%rdx)
277 movq $EFAULT,%rax
279 END(std_copyout)
281 .macro COPYIN_END
282 jmp done_copyin
284 .endm
287 * std_copyin(from_user, to_kernel, len) - MP SAFE
288 * %rdi, %rsi, %rdx
290 ENTRY(std_copyin)
291 SMAP_OPEN
292 movq PCPU(curthread),%rax
293 movq TD_PCB(%rax), %rax
294 movq $copyin_fault,PCB_ONFAULT(%rax)
295 movq %rsp,PCB_ONFAULT_SP(%rax)
296 testq %rdx,%rdx /* anything to do? */
297 jz done_copyin
300 * make sure address is valid
302 movq %rdi,%rax
303 addq %rdx,%rax
304 jc copyin_fault
305 movq $VM_MAX_USER_ADDRESS,%rcx
306 cmpq %rcx,%rax
307 ja copyin_fault
309 xchgq %rdi,%rsi
310 MEMMOVE erms=0 overlap=0 end=COPYIN_END
312 done_copyin:
313 SMAP_CLOSE
314 xorl %eax,%eax
315 movq PCPU(curthread),%rdx
316 movq TD_PCB(%rdx), %rdx
317 movq %rax,PCB_ONFAULT(%rdx)
320 ALIGN_TEXT
321 copyin_fault:
322 SMAP_CLOSE
323 movq PCPU(curthread),%rdx
324 movq TD_PCB(%rdx), %rdx
325 movq $0,PCB_ONFAULT(%rdx)
326 movq $EFAULT,%rax
328 END(std_copyin)
331 * casu32 - Compare and set user integer. Returns -1 or the current value.
332 * dst = %rdi, old = %rsi, new = %rdx
334 ENTRY(casu32)
335 SMAP_OPEN
336 movq PCPU(curthread),%rcx
337 movq TD_PCB(%rcx), %rcx
338 movq $fusufault,PCB_ONFAULT(%rcx)
339 movq %rsp,PCB_ONFAULT_SP(%rcx)
341 movq $VM_MAX_USER_ADDRESS-4,%rax
342 cmpq %rax,%rdi /* verify address is valid */
343 ja fusufault
345 movl %esi,%eax /* old */
346 lock
347 cmpxchgl %edx,(%rdi) /* new = %edx */
350 * The old value is in %eax. If the store succeeded it will be the
351 * value we expected (old) from before the store, otherwise it will
352 * be the current value.
355 movq PCPU(curthread),%rcx
356 movq TD_PCB(%rcx), %rcx
357 movq $0,PCB_ONFAULT(%rcx)
358 SMAP_CLOSE
360 END(casu32)
363 * swapu32 - Swap int in user space. ptr = %rdi, val = %rsi
365 ENTRY(std_swapu32)
366 SMAP_OPEN
367 movq PCPU(curthread),%rcx
368 movq TD_PCB(%rcx), %rcx
369 movq $fusufault,PCB_ONFAULT(%rcx)
370 movq %rsp,PCB_ONFAULT_SP(%rcx)
372 movq $VM_MAX_USER_ADDRESS-4,%rax
373 cmpq %rax,%rdi /* verify address is valid */
374 ja fusufault
376 movq %rsi,%rax /* old */
377 xchgl %eax,(%rdi)
380 * The old value is in %rax. If the store succeeded it will be the
381 * value we expected (old) from before the store, otherwise it will
382 * be the current value.
385 movq PCPU(curthread),%rcx
386 movq TD_PCB(%rcx), %rcx
387 movq $0,PCB_ONFAULT(%rcx)
388 SMAP_CLOSE
390 END(std_swapu32)
392 ENTRY(std_fuwordadd32)
393 SMAP_OPEN
394 movq PCPU(curthread),%rcx
395 movq TD_PCB(%rcx), %rcx
396 movq $fusufault,PCB_ONFAULT(%rcx)
397 movq %rsp,PCB_ONFAULT_SP(%rcx)
399 movq $VM_MAX_USER_ADDRESS-4,%rax
400 cmpq %rax,%rdi /* verify address is valid */
401 ja fusufault
403 movq %rsi,%rax /* qty to add */
404 lock xaddl %eax,(%rdi)
407 * The old value is in %rax. If the store succeeded it will be the
408 * value we expected (old) from before the store, otherwise it will
409 * be the current value.
411 movq PCPU(curthread),%rcx
412 movq TD_PCB(%rcx), %rcx
413 movq $0,PCB_ONFAULT(%rcx)
414 SMAP_CLOSE
416 END(std_fuwordadd32)
419 * casu64 - Compare and set user word. Returns -1 or the current value.
420 * dst = %rdi, old = %rsi, new = %rdx
422 ENTRY(casu64)
423 SMAP_OPEN
424 movq PCPU(curthread),%rcx
425 movq TD_PCB(%rcx), %rcx
426 movq $fusufault,PCB_ONFAULT(%rcx)
427 movq %rsp,PCB_ONFAULT_SP(%rcx)
429 movq $VM_MAX_USER_ADDRESS-8,%rax
430 cmpq %rax,%rdi /* verify address is valid */
431 ja fusufault
433 movq %rsi,%rax /* old */
434 lock
435 cmpxchgq %rdx,(%rdi) /* new = %rdx */
438 * The old value is in %rax. If the store succeeded it will be the
439 * value we expected (old) from before the store, otherwise it will
440 * be the current value.
443 movq PCPU(curthread),%rcx
444 movq TD_PCB(%rcx), %rcx
445 movq $0,PCB_ONFAULT(%rcx)
446 SMAP_CLOSE
448 END(casu64)
451 * swapu64 - Swap long in user space. ptr = %rdi, val = %rsi
453 ENTRY(std_swapu64)
454 SMAP_OPEN
455 movq PCPU(curthread),%rcx
456 movq TD_PCB(%rcx), %rcx
457 movq $fusufault,PCB_ONFAULT(%rcx)
458 movq %rsp,PCB_ONFAULT_SP(%rcx)
460 movq $VM_MAX_USER_ADDRESS-8,%rax
461 cmpq %rax,%rdi /* verify address is valid */
462 ja fusufault
464 movq %rsi,%rax /* old */
465 xchgq %rax,(%rdi)
468 * The old value is in %rax. If the store succeeded it will be the
469 * value we expected (old) from before the store, otherwise it will
470 * be the current value.
473 movq PCPU(curthread),%rcx
474 movq TD_PCB(%rcx), %rcx
475 movq $0,PCB_ONFAULT(%rcx)
476 SMAP_CLOSE
478 END(std_swapu64)
480 ENTRY(std_fuwordadd64)
481 SMAP_OPEN
482 movq PCPU(curthread),%rcx
483 movq TD_PCB(%rcx), %rcx
484 movq $fusufault,PCB_ONFAULT(%rcx)
485 movq %rsp,PCB_ONFAULT_SP(%rcx)
487 movq $VM_MAX_USER_ADDRESS-8,%rax
488 cmpq %rax,%rdi /* verify address is valid */
489 ja fusufault
491 movq %rsi,%rax /* value to add */
492 lock xaddq %rax,(%rdi)
495 * The old value is in %rax. If the store succeeded it will be the
496 * value we expected (old) from before the store, otherwise it will
497 * be the current value.
500 movq PCPU(curthread),%rcx
501 movq TD_PCB(%rcx), %rcx
502 movq $0,PCB_ONFAULT(%rcx)
503 SMAP_CLOSE
505 END(std_fuwordadd64)
508 * Fetch (load) a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit
509 * byte from user memory. All these functions are MPSAFE.
510 * addr = %rdi
513 ENTRY(std_fuword64)
514 SMAP_OPEN
515 movq PCPU(curthread),%rcx
516 movq TD_PCB(%rcx), %rcx
517 movq $fusufault,PCB_ONFAULT(%rcx)
518 movq %rsp,PCB_ONFAULT_SP(%rcx)
520 movq $VM_MAX_USER_ADDRESS-8,%rax
521 cmpq %rax,%rdi /* verify address is valid */
522 ja fusufault
524 movq (%rdi),%rax
525 movq $0,PCB_ONFAULT(%rcx)
526 SMAP_CLOSE
528 END(std_fuword64)
530 ENTRY(std_fuword32)
531 SMAP_OPEN
532 movq PCPU(curthread),%rcx
533 movq TD_PCB(%rcx), %rcx
534 movq $fusufault,PCB_ONFAULT(%rcx)
535 movq %rsp,PCB_ONFAULT_SP(%rcx)
537 movq $VM_MAX_USER_ADDRESS-4,%rax
538 cmpq %rax,%rdi /* verify address is valid */
539 ja fusufault
541 movl (%rdi),%eax
542 movq $0,PCB_ONFAULT(%rcx)
543 SMAP_CLOSE
545 END(std_fuword32)
547 ENTRY(std_fubyte)
548 SMAP_OPEN
549 movq PCPU(curthread),%rcx
550 movq TD_PCB(%rcx), %rcx
551 movq $fusufault,PCB_ONFAULT(%rcx)
552 movq %rsp,PCB_ONFAULT_SP(%rcx)
554 movq $VM_MAX_USER_ADDRESS-1,%rax
555 cmpq %rax,%rdi
556 ja fusufault
558 movzbl (%rdi),%eax
559 movq $0,PCB_ONFAULT(%rcx)
560 SMAP_CLOSE
563 ALIGN_TEXT
564 fusufault:
565 movq PCPU(curthread),%rcx
566 xorl %eax,%eax
567 movq TD_PCB(%rcx), %rcx
568 movq %rax,PCB_ONFAULT(%rcx)
569 decq %rax
570 SMAP_CLOSE
572 END(std_fubyte)
575 * Store a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit byte to
576 * user memory. All these functions are MPSAFE.
578 * addr = %rdi, value = %rsi
580 * Write a long
582 ENTRY(std_suword64)
583 SMAP_OPEN
584 movq PCPU(curthread),%rcx
585 movq TD_PCB(%rcx), %rcx
586 movq $fusufault,PCB_ONFAULT(%rcx)
587 movq %rsp,PCB_ONFAULT_SP(%rcx)
589 movq $VM_MAX_USER_ADDRESS-8,%rax
590 cmpq %rax,%rdi /* verify address validity */
591 ja fusufault
593 movq %rsi,(%rdi)
594 xorl %eax,%eax
595 movq PCPU(curthread),%rcx
596 movq TD_PCB(%rcx), %rcx
597 movq %rax,PCB_ONFAULT(%rcx)
598 SMAP_CLOSE
600 END(std_suword64)
603 * Write an int
605 ENTRY(std_suword32)
606 SMAP_OPEN
607 movq PCPU(curthread),%rcx
608 movq TD_PCB(%rcx), %rcx
609 movq $fusufault,PCB_ONFAULT(%rcx)
610 movq %rsp,PCB_ONFAULT_SP(%rcx)
612 movq $VM_MAX_USER_ADDRESS-4,%rax
613 cmpq %rax,%rdi /* verify address validity */
614 ja fusufault
616 movl %esi,(%rdi)
617 xorl %eax,%eax
618 movq PCPU(curthread),%rcx
619 movq TD_PCB(%rcx), %rcx
620 movq %rax,PCB_ONFAULT(%rcx)
621 SMAP_CLOSE
623 END(std_suword32)
625 ENTRY(std_subyte)
626 SMAP_OPEN
627 movq PCPU(curthread),%rcx
628 movq TD_PCB(%rcx), %rcx
629 movq $fusufault,PCB_ONFAULT(%rcx)
630 movq %rsp,PCB_ONFAULT_SP(%rcx)
632 movq $VM_MAX_USER_ADDRESS-1,%rax
633 cmpq %rax,%rdi /* verify address validity */
634 ja fusufault
636 movl %esi,%eax
637 movb %al,(%rdi)
638 xorl %eax,%eax
639 movq PCPU(curthread),%rcx /* restore trashed register */
640 movq TD_PCB(%rcx), %rcx
641 movq %rax,PCB_ONFAULT(%rcx)
642 SMAP_CLOSE
644 END(std_subyte)
647 * std_copyinstr(from, to, maxlen, int *lencopied) - MP SAFE
648 * %rdi, %rsi, %rdx, %rcx
650 * copy a string from from to to, stop when a 0 character is reached.
651 * return ENAMETOOLONG if string is longer than maxlen, and
652 * EFAULT on protection violations. If lencopied is non-zero,
653 * return the actual length in *lencopied.
655 ENTRY(std_copyinstr)
656 SMAP_OPEN
657 movq %rdx,%r8 /* %r8 = maxlen */
658 movq %rcx,%r9 /* %r9 = *len */
659 movq PCPU(curthread),%rcx
660 movq TD_PCB(%rcx), %rcx
661 movq $cpystrflt,PCB_ONFAULT(%rcx)
662 movq %rsp,PCB_ONFAULT_SP(%rcx)
664 movq $VM_MAX_USER_ADDRESS,%rax
666 /* make sure 'from' is within bounds */
667 subq %rdi,%rax
668 jbe cpystrflt
670 /* restrict maxlen to <= VM_MAX_USER_ADDRESS-from */
671 cmpq %rdx,%rax
672 jae 1f
673 movq %rax,%rdx
674 movq %rax,%r8
676 incq %rdx
679 decq %rdx
680 jz 3f
682 movb (%rdi),%al /* faster than lodsb+stosb */
683 movb %al,(%rsi)
684 leaq 1(%rdi),%rdi
685 leaq 1(%rsi),%rsi
686 testb %al,%al
687 jnz 2b
689 /* Success -- 0 byte reached */
690 decq %rdx
691 xorl %eax,%eax
692 jmp cpystrflt_x
694 /* rdx is zero - return ENAMETOOLONG or EFAULT */
695 movq $VM_MAX_USER_ADDRESS,%rax
696 cmpq %rax,%rsi
697 jae cpystrflt
699 movq $ENAMETOOLONG,%rax
700 jmp cpystrflt_x
702 cpystrflt:
703 movq $EFAULT,%rax
705 cpystrflt_x:
706 SMAP_CLOSE
707 /* set *lencopied and return %eax */
708 movq PCPU(curthread),%rcx
709 movq TD_PCB(%rcx), %rcx
710 movq $0,PCB_ONFAULT(%rcx)
712 testq %r9,%r9
713 jz 1f
714 subq %rdx,%r8
715 movq %r8,(%r9)
718 END(std_copyinstr)
721 * copystr(from, to, maxlen, int *lencopied) - MP SAFE
722 * %rdi, %rsi, %rdx, %rcx
724 ENTRY(copystr)
725 movq %rdx,%r8 /* %r8 = maxlen */
726 incq %rdx
728 decq %rdx
729 jz 4f
731 movb (%rdi),%al /* faster than lodsb+stosb */
732 movb %al,(%rsi)
733 leaq 1(%rdi),%rdi
734 leaq 1(%rsi),%rsi
735 testb %al,%al
736 jnz 1b
738 /* Success -- 0 byte reached */
739 decq %rdx
740 xorl %eax,%eax
741 jmp 6f
743 /* rdx is zero -- return ENAMETOOLONG */
744 movq $ENAMETOOLONG,%rax
747 testq %rcx,%rcx
748 jz 7f
749 /* set *lencopied and return %rax */
750 subq %rdx,%r8
751 movq %r8,(%rcx)
754 END(copystr)
757 * Handling of special x86_64 registers and descriptor tables etc
758 * %rdi
760 /* void lgdt(struct region_descriptor *rdp); */
761 ENTRY(lgdt)
762 /* reload the descriptor table */
763 lgdt (%rdi)
765 /* flush the prefetch q */
766 jmp 1f
769 movl $KDSEL,%eax
770 movl %eax,%ds
771 movl %eax,%es
772 movl %eax,%fs /* Beware, use wrmsr to set 64 bit base */
773 movl %eax,%gs /* Beware, use wrmsr to set 64 bit base */
774 movl %eax,%ss
776 /* reload code selector by turning return into intersegmental return */
777 popq %rax
778 pushq $KCSEL
779 pushq %rax
780 MEXITCOUNT
781 lretq
782 END(lgdt)
784 /*****************************************************************************/
785 /* setjmp, longjmp */
786 /*****************************************************************************/
788 ENTRY(setjmp)
789 movq %rbx,0(%rdi) /* save rbx */
790 movq %rsp,8(%rdi) /* save rsp */
791 movq %rbp,16(%rdi) /* save rbp */
792 movq %r12,24(%rdi) /* save r12 */
793 movq %r13,32(%rdi) /* save r13 */
794 movq %r14,40(%rdi) /* save r14 */
795 movq %r15,48(%rdi) /* save r15 */
796 movq 0(%rsp),%rdx /* get rta */
797 movq %rdx,56(%rdi) /* save rip */
798 xorl %eax,%eax /* return(0); */
800 END(setjmp)
802 ENTRY(longjmp)
803 movq 0(%rdi),%rbx /* restore rbx */
804 movq 8(%rdi),%rsp /* restore rsp */
805 movq 16(%rdi),%rbp /* restore rbp */
806 movq 24(%rdi),%r12 /* restore r12 */
807 movq 32(%rdi),%r13 /* restore r13 */
808 movq 40(%rdi),%r14 /* restore r14 */
809 movq 48(%rdi),%r15 /* restore r15 */
810 movq 56(%rdi),%rdx /* get rta */
811 movq %rdx,0(%rsp) /* put in return frame */
812 xorl %eax,%eax /* return(1); */
813 incl %eax
815 END(longjmp)
818 * Support for reading MSRs in the safe manner.
820 ENTRY(rdmsr_safe)
821 /* int rdmsr_safe(u_int msr, uint64_t *data) */
822 movq PCPU(curthread),%r8
823 movq TD_PCB(%r8), %r8
824 movq $msr_onfault,PCB_ONFAULT(%r8)
825 movq %rsp,PCB_ONFAULT_SP(%r8)
826 movl %edi,%ecx
827 rdmsr /* Read MSR pointed by %ecx. Returns
828 hi byte in edx, lo in %eax */
829 salq $32,%rdx /* sign-shift %rdx left */
830 movl %eax,%eax /* zero-extend %eax -> %rax */
831 orq %rdx,%rax
832 movq %rax,(%rsi)
833 xorq %rax,%rax
834 movq %rax,PCB_ONFAULT(%r8)
836 END(rdmsr_safe)
839 * Support for writing MSRs in the safe manner.
841 ENTRY(wrmsr_safe)
842 /* int wrmsr_safe(u_int msr, uint64_t data) */
843 movq PCPU(curthread),%r8
844 movq TD_PCB(%r8), %r8
845 movq $msr_onfault,PCB_ONFAULT(%r8)
846 movq %rsp,PCB_ONFAULT_SP(%r8)
847 movl %edi,%ecx
848 movl %esi,%eax
849 sarq $32,%rsi
850 movl %esi,%edx
851 wrmsr /* Write MSR pointed by %ecx. Accepts
852 hi byte in edx, lo in %eax. */
853 xorq %rax,%rax
854 movq %rax,PCB_ONFAULT(%r8)
856 END(wrmsr_safe)
859 * MSR operations fault handler
861 ALIGN_TEXT
862 msr_onfault:
863 movq PCPU(curthread),%r8
864 movq TD_PCB(%r8), %r8
865 movq $0,PCB_ONFAULT(%r8)
866 movl $EFAULT,%eax
869 ENTRY(smap_open)
870 SMAP_OPEN
872 END(smap_open)
874 ENTRY(smap_close)
875 SMAP_CLOSE
877 END(smap_close)