kernel - Refactor Xinvltlb (3)
[dragonfly.git] / sys / cpu / x86_64 / include / cpufunc.h
blob3dedf9c63c55f571a868500e2a1a81fc2273244f
1 /*-
2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1993 The Regents of the University of California.
4 * Copyright (c) 2008 The DragonFly Project.
5 * All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
31 * $FreeBSD: src/sys/amd64/include/cpufunc.h,v 1.139 2004/01/28 23:53:04 peter Exp $
35 * Functions to provide access to special i386 instructions.
36 * This in included in sys/systm.h, and that file should be
37 * used in preference to this.
40 #ifndef _CPU_CPUFUNC_H_
41 #define _CPU_CPUFUNC_H_
43 #include <sys/cdefs.h>
44 #include <sys/thread.h>
45 #include <machine/psl.h>
46 #include <machine/smp.h>
48 struct thread;
49 struct region_descriptor;
50 struct pmap;
52 __BEGIN_DECLS
53 #define readb(va) (*(volatile u_int8_t *) (va))
54 #define readw(va) (*(volatile u_int16_t *) (va))
55 #define readl(va) (*(volatile u_int32_t *) (va))
56 #define readq(va) (*(volatile u_int64_t *) (va))
58 #define writeb(va, d) (*(volatile u_int8_t *) (va) = (d))
59 #define writew(va, d) (*(volatile u_int16_t *) (va) = (d))
60 #define writel(va, d) (*(volatile u_int32_t *) (va) = (d))
61 #define writeq(va, d) (*(volatile u_int64_t *) (va) = (d))
63 #ifdef __GNUC__
65 #include <machine/lock.h> /* XXX */
67 static __inline void
68 breakpoint(void)
70 __asm __volatile("int $3");
73 static __inline void
74 cpu_pause(void)
76 __asm __volatile("pause":::"memory");
79 static __inline u_int
80 bsfl(u_int mask)
82 u_int result;
84 __asm __volatile("bsfl %1,%0" : "=r" (result) : "rm" (mask));
85 return (result);
88 static __inline u_long
89 bsfq(u_long mask)
91 u_long result;
93 __asm __volatile("bsfq %1,%0" : "=r" (result) : "rm" (mask));
94 return (result);
97 static __inline u_long
98 bsflong(u_long mask)
100 u_long result;
102 __asm __volatile("bsfq %1,%0" : "=r" (result) : "rm" (mask));
103 return (result);
106 static __inline u_int
107 bsrl(u_int mask)
109 u_int result;
111 __asm __volatile("bsrl %1,%0" : "=r" (result) : "rm" (mask));
112 return (result);
115 static __inline u_long
116 bsrq(u_long mask)
118 u_long result;
120 __asm __volatile("bsrq %1,%0" : "=r" (result) : "rm" (mask));
121 return (result);
124 static __inline void
125 clflush(u_long addr)
127 __asm __volatile("clflush %0" : : "m" (*(char *) addr));
130 static __inline void
131 do_cpuid(u_int ax, u_int *p)
133 __asm __volatile("cpuid"
134 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
135 : "0" (ax));
138 static __inline void
139 cpuid_count(u_int ax, u_int cx, u_int *p)
141 __asm __volatile("cpuid"
142 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
143 : "0" (ax), "c" (cx));
146 #ifndef _CPU_DISABLE_INTR_DEFINED
148 static __inline void
149 cpu_disable_intr(void)
151 __asm __volatile("cli" : : : "memory");
154 #endif
156 #ifndef _CPU_ENABLE_INTR_DEFINED
158 static __inline void
159 cpu_enable_intr(void)
161 __asm __volatile("sti");
164 #endif
167 * Cpu and compiler memory ordering fence. mfence ensures strong read and
168 * write ordering.
170 * A serializing or fence instruction is required here. A locked bus
171 * cycle on data for which we already own cache mastership is the most
172 * portable.
174 static __inline void
175 cpu_mfence(void)
177 __asm __volatile("mfence" : : : "memory");
181 * cpu_lfence() ensures strong read ordering for reads issued prior
182 * to the instruction verses reads issued afterwords.
184 * A serializing or fence instruction is required here. A locked bus
185 * cycle on data for which we already own cache mastership is the most
186 * portable.
188 static __inline void
189 cpu_lfence(void)
191 __asm __volatile("lfence" : : : "memory");
195 * cpu_sfence() ensures strong write ordering for writes issued prior
196 * to the instruction verses writes issued afterwords. Writes are
197 * ordered on intel cpus so we do not actually have to do anything.
199 static __inline void
200 cpu_sfence(void)
203 * NOTE:
204 * Don't use 'sfence' here, as it will create a lot of
205 * unnecessary stalls.
207 __asm __volatile("" : : : "memory");
211 * cpu_ccfence() prevents the compiler from reordering instructions, in
212 * particular stores, relative to the current cpu. Use cpu_sfence() if
213 * you need to guarentee ordering by both the compiler and by the cpu.
215 * This also prevents the compiler from caching memory loads into local
216 * variables across the routine.
218 static __inline void
219 cpu_ccfence(void)
221 __asm __volatile("" : : : "memory");
225 * This is a horrible, horrible hack that might have to be put at the
226 * end of certain procedures (on a case by case basis), just before it
227 * returns to avoid what we believe to be an unreported AMD cpu bug.
228 * Found to occur on both a Phenom II X4 820 (two of them), as well
229 * as a 48-core built around an Opteron 6168 (Id = 0x100f91 Stepping = 1).
230 * The problem does not appear to occur w/Intel cpus.
232 * The bug is likely related to either a write combining issue or the
233 * Return Address Stack (RAS) hardware cache.
235 * In particular, we had to do this for GCC's fill_sons_in_loop() routine
236 * which due to its deep recursion and stack flow appears to be able to
237 * tickle the amd cpu bug (w/ gcc-4.4.7). Adding a single 'nop' to the
238 * end of the routine just before it returns works around the bug.
240 * The bug appears to be extremely sensitive to %rip and %rsp values, to
241 * the point where even just inserting an instruction in an unrelated
242 * procedure (shifting the entire code base being run) effects the outcome.
243 * DragonFly is probably able to more readily reproduce the bug due to
244 * the stackgap randomization code. We would expect OpenBSD (where we got
245 * the stackgap randomization code from) to also be able to reproduce the
246 * issue. To date we have only reproduced the issue in DragonFly.
248 #define __AMDCPUBUG_DFLY01_AVAILABLE__
250 static __inline void
251 cpu_amdcpubug_dfly01(void)
253 __asm __volatile("nop" : : : "memory");
256 #ifdef _KERNEL
258 #define HAVE_INLINE_FFS
260 static __inline int
261 ffs(int mask)
263 #if 0
265 * Note that gcc-2's builtin ffs would be used if we didn't declare
266 * this inline or turn off the builtin. The builtin is faster but
267 * broken in gcc-2.4.5 and slower but working in gcc-2.5 and later
268 * versions.
270 return (mask == 0 ? mask : (int)bsfl((u_int)mask) + 1);
271 #else
272 /* Actually, the above is way out of date. The builtins use cmov etc */
273 return (__builtin_ffs(mask));
274 #endif
277 #define HAVE_INLINE_FFSL
279 static __inline int
280 ffsl(long mask)
282 return (mask == 0 ? mask : (int)bsfq((u_long)mask) + 1);
285 #define HAVE_INLINE_FLS
287 static __inline int
288 fls(int mask)
290 return (mask == 0 ? mask : (int)bsrl((u_int)mask) + 1);
293 #define HAVE_INLINE_FLSL
295 static __inline int
296 flsl(long mask)
298 return (mask == 0 ? mask : (int)bsrq((u_long)mask) + 1);
301 #define HAVE_INLINE_FLSLL
303 static __inline int
304 flsll(long long mask)
306 return (flsl((long)mask));
309 #endif /* _KERNEL */
311 static __inline void
312 halt(void)
314 __asm __volatile("hlt");
318 * The following complications are to get around gcc not having a
319 * constraint letter for the range 0..255. We still put "d" in the
320 * constraint because "i" isn't a valid constraint when the port
321 * isn't constant. This only matters for -O0 because otherwise
322 * the non-working version gets optimized away.
324 * Use an expression-statement instead of a conditional expression
325 * because gcc-2.6.0 would promote the operands of the conditional
326 * and produce poor code for "if ((inb(var) & const1) == const2)".
328 * The unnecessary test `(port) < 0x10000' is to generate a warning if
329 * the `port' has type u_short or smaller. Such types are pessimal.
330 * This actually only works for signed types. The range check is
331 * careful to avoid generating warnings.
333 #define inb(port) __extension__ ({ \
334 u_char _data; \
335 if (__builtin_constant_p(port) && ((port) & 0xffff) < 0x100 \
336 && (port) < 0x10000) \
337 _data = inbc(port); \
338 else \
339 _data = inbv(port); \
340 _data; })
342 #define outb(port, data) ( \
343 __builtin_constant_p(port) && ((port) & 0xffff) < 0x100 \
344 && (port) < 0x10000 \
345 ? outbc(port, data) : outbv(port, data))
347 static __inline u_char
348 inbc(u_int port)
350 u_char data;
352 __asm __volatile("inb %1,%0" : "=a" (data) : "id" ((u_short)(port)));
353 return (data);
356 static __inline void
357 outbc(u_int port, u_char data)
359 __asm __volatile("outb %0,%1" : : "a" (data), "id" ((u_short)(port)));
362 static __inline u_char
363 inbv(u_int port)
365 u_char data;
367 * We use %%dx and not %1 here because i/o is done at %dx and not at
368 * %edx, while gcc generates inferior code (movw instead of movl)
369 * if we tell it to load (u_short) port.
371 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port));
372 return (data);
375 static __inline u_int
376 inl(u_int port)
378 u_int data;
380 __asm __volatile("inl %%dx,%0" : "=a" (data) : "d" (port));
381 return (data);
384 static __inline void
385 insb(u_int port, void *addr, size_t cnt)
387 __asm __volatile("cld; rep; insb"
388 : "+D" (addr), "+c" (cnt)
389 : "d" (port)
390 : "memory");
393 static __inline void
394 insw(u_int port, void *addr, size_t cnt)
396 __asm __volatile("cld; rep; insw"
397 : "+D" (addr), "+c" (cnt)
398 : "d" (port)
399 : "memory");
402 static __inline void
403 insl(u_int port, void *addr, size_t cnt)
405 __asm __volatile("cld; rep; insl"
406 : "+D" (addr), "+c" (cnt)
407 : "d" (port)
408 : "memory");
411 static __inline void
412 invd(void)
414 __asm __volatile("invd");
417 #if defined(_KERNEL)
419 #ifndef _CPU_INVLPG_DEFINED
422 * Invalidate a particular VA on this cpu only
424 * TLB flush for an individual page (even if it has PG_G).
425 * Only works on 486+ CPUs (i386 does not have PG_G).
427 static __inline void
428 cpu_invlpg(void *addr)
430 __asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
433 #endif
435 static __inline void
436 cpu_nop(void)
438 __asm __volatile("rep; nop");
441 #endif /* _KERNEL */
443 static __inline u_short
444 inw(u_int port)
446 u_short data;
448 __asm __volatile("inw %%dx,%0" : "=a" (data) : "d" (port));
449 return (data);
452 static __inline u_int
453 loadandclear(volatile u_int *addr)
455 u_int result;
457 __asm __volatile("xorl %0,%0; xchgl %1,%0"
458 : "=&r" (result) : "m" (*addr));
459 return (result);
462 static __inline void
463 outbv(u_int port, u_char data)
465 u_char al;
467 * Use an unnecessary assignment to help gcc's register allocator.
468 * This make a large difference for gcc-1.40 and a tiny difference
469 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for
470 * best results. gcc-2.6.0 can't handle this.
472 al = data;
473 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port));
476 static __inline void
477 outl(u_int port, u_int data)
480 * outl() and outw() aren't used much so we haven't looked at
481 * possible micro-optimizations such as the unnecessary
482 * assignment for them.
484 __asm __volatile("outl %0,%%dx" : : "a" (data), "d" (port));
487 static __inline void
488 outsb(u_int port, const void *addr, size_t cnt)
490 __asm __volatile("cld; rep; outsb"
491 : "+S" (addr), "+c" (cnt)
492 : "d" (port));
495 static __inline void
496 outsw(u_int port, const void *addr, size_t cnt)
498 __asm __volatile("cld; rep; outsw"
499 : "+S" (addr), "+c" (cnt)
500 : "d" (port));
503 static __inline void
504 outsl(u_int port, const void *addr, size_t cnt)
506 __asm __volatile("cld; rep; outsl"
507 : "+S" (addr), "+c" (cnt)
508 : "d" (port));
511 static __inline void
512 outw(u_int port, u_short data)
514 __asm __volatile("outw %0,%%dx" : : "a" (data), "d" (port));
517 static __inline void
518 ia32_pause(void)
520 __asm __volatile("pause");
523 static __inline u_long
524 read_rflags(void)
526 u_long rf;
528 __asm __volatile("pushfq; popq %0" : "=r" (rf));
529 return (rf);
532 static __inline u_int64_t
533 rdmsr(u_int msr)
535 u_int32_t low, high;
537 __asm __volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr));
538 return (low | ((u_int64_t)high << 32));
541 static __inline u_int64_t
542 rdpmc(u_int pmc)
544 u_int32_t low, high;
546 __asm __volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (pmc));
547 return (low | ((u_int64_t)high << 32));
550 #define _RDTSC_SUPPORTED_
552 static __inline u_int64_t
553 rdtsc(void)
555 u_int32_t low, high;
557 __asm __volatile("rdtsc" : "=a" (low), "=d" (high));
558 return (low | ((u_int64_t)high << 32));
561 #ifdef _KERNEL
562 #include <machine/cputypes.h>
563 #include <machine/md_var.h>
565 static __inline u_int64_t
566 rdtsc_ordered(void)
568 if (cpu_vendor_id == CPU_VENDOR_INTEL)
569 cpu_lfence();
570 else
571 cpu_mfence();
572 return rdtsc();
574 #endif
576 static __inline void
577 wbinvd(void)
579 __asm __volatile("wbinvd");
582 #if defined(_KERNEL)
583 void cpu_wbinvd_on_all_cpus_callback(void *arg);
585 static __inline void
586 cpu_wbinvd_on_all_cpus(void)
588 lwkt_cpusync_simple(smp_active_mask, cpu_wbinvd_on_all_cpus_callback, NULL);
590 #endif
592 static __inline void
593 write_rflags(u_long rf)
595 __asm __volatile("pushq %0; popfq" : : "r" (rf));
598 static __inline void
599 wrmsr(u_int msr, u_int64_t newval)
601 u_int32_t low, high;
603 low = newval;
604 high = newval >> 32;
605 __asm __volatile("wrmsr" : : "a" (low), "d" (high), "c" (msr));
608 static __inline void
609 xsetbv(u_int ecx, u_int eax, u_int edx)
611 __asm __volatile(".byte 0x0f,0x01,0xd1"
613 : "a" (eax), "c" (ecx), "d" (edx));
616 static __inline void
617 load_cr0(u_long data)
620 __asm __volatile("movq %0,%%cr0" : : "r" (data));
623 static __inline u_long
624 rcr0(void)
626 u_long data;
628 __asm __volatile("movq %%cr0,%0" : "=r" (data));
629 return (data);
632 static __inline u_long
633 rcr2(void)
635 u_long data;
637 __asm __volatile("movq %%cr2,%0" : "=r" (data));
638 return (data);
641 static __inline void
642 load_cr3(u_long data)
645 __asm __volatile("movq %0,%%cr3" : : "r" (data) : "memory");
648 static __inline u_long
649 rcr3(void)
651 u_long data;
653 __asm __volatile("movq %%cr3,%0" : "=r" (data));
654 return (data);
657 static __inline void
658 load_cr4(u_long data)
660 __asm __volatile("movq %0,%%cr4" : : "r" (data));
663 static __inline u_long
664 rcr4(void)
666 u_long data;
668 __asm __volatile("movq %%cr4,%0" : "=r" (data));
669 return (data);
672 #ifndef _CPU_INVLTLB_DEFINED
675 * Invalidate the TLB on this cpu only
677 static __inline void
678 cpu_invltlb(void)
680 load_cr3(rcr3());
681 #if defined(SWTCH_OPTIM_STATS)
682 ++tlb_flush_count;
683 #endif
686 #endif
688 extern void smp_invltlb(void);
690 static __inline u_short
691 rfs(void)
693 u_short sel;
694 __asm __volatile("movw %%fs,%0" : "=rm" (sel));
695 return (sel);
698 static __inline u_short
699 rgs(void)
701 u_short sel;
702 __asm __volatile("movw %%gs,%0" : "=rm" (sel));
703 return (sel);
706 static __inline void
707 load_ds(u_short sel)
709 __asm __volatile("movw %0,%%ds" : : "rm" (sel));
712 static __inline void
713 load_es(u_short sel)
715 __asm __volatile("movw %0,%%es" : : "rm" (sel));
718 #ifdef _KERNEL
719 /* This is defined in <machine/specialreg.h> but is too painful to get to */
720 #ifndef MSR_FSBASE
721 #define MSR_FSBASE 0xc0000100
722 #endif
723 static __inline void
724 load_fs(u_short sel)
726 /* Preserve the fsbase value across the selector load */
727 __asm __volatile("rdmsr; movw %0,%%fs; wrmsr"
728 : : "rm" (sel), "c" (MSR_FSBASE) : "eax", "edx");
731 #ifndef MSR_GSBASE
732 #define MSR_GSBASE 0xc0000101
733 #endif
734 static __inline void
735 load_gs(u_short sel)
738 * Preserve the gsbase value across the selector load.
739 * Note that we have to disable interrupts because the gsbase
740 * being trashed happens to be the kernel gsbase at the time.
742 __asm __volatile("pushfq; cli; rdmsr; movw %0,%%gs; wrmsr; popfq"
743 : : "rm" (sel), "c" (MSR_GSBASE) : "eax", "edx");
745 #else
746 /* Usable by userland */
747 static __inline void
748 load_fs(u_short sel)
750 __asm __volatile("movw %0,%%fs" : : "rm" (sel));
753 static __inline void
754 load_gs(u_short sel)
756 __asm __volatile("movw %0,%%gs" : : "rm" (sel));
758 #endif
760 /* void lidt(struct region_descriptor *addr); */
761 static __inline void
762 lidt(struct region_descriptor *addr)
764 __asm __volatile("lidt (%0)" : : "r" (addr));
767 /* void lldt(u_short sel); */
768 static __inline void
769 lldt(u_short sel)
771 __asm __volatile("lldt %0" : : "r" (sel));
774 /* void ltr(u_short sel); */
775 static __inline void
776 ltr(u_short sel)
778 __asm __volatile("ltr %0" : : "r" (sel));
781 static __inline u_int64_t
782 rdr0(void)
784 u_int64_t data;
785 __asm __volatile("movq %%dr0,%0" : "=r" (data));
786 return (data);
789 static __inline void
790 load_dr0(u_int64_t dr0)
792 __asm __volatile("movq %0,%%dr0" : : "r" (dr0));
795 static __inline u_int64_t
796 rdr1(void)
798 u_int64_t data;
799 __asm __volatile("movq %%dr1,%0" : "=r" (data));
800 return (data);
803 static __inline void
804 load_dr1(u_int64_t dr1)
806 __asm __volatile("movq %0,%%dr1" : : "r" (dr1));
809 static __inline u_int64_t
810 rdr2(void)
812 u_int64_t data;
813 __asm __volatile("movq %%dr2,%0" : "=r" (data));
814 return (data);
817 static __inline void
818 load_dr2(u_int64_t dr2)
820 __asm __volatile("movq %0,%%dr2" : : "r" (dr2));
823 static __inline u_int64_t
824 rdr3(void)
826 u_int64_t data;
827 __asm __volatile("movq %%dr3,%0" : "=r" (data));
828 return (data);
831 static __inline void
832 load_dr3(u_int64_t dr3)
834 __asm __volatile("movq %0,%%dr3" : : "r" (dr3));
837 static __inline u_int64_t
838 rdr4(void)
840 u_int64_t data;
841 __asm __volatile("movq %%dr4,%0" : "=r" (data));
842 return (data);
845 static __inline void
846 load_dr4(u_int64_t dr4)
848 __asm __volatile("movq %0,%%dr4" : : "r" (dr4));
851 static __inline u_int64_t
852 rdr5(void)
854 u_int64_t data;
855 __asm __volatile("movq %%dr5,%0" : "=r" (data));
856 return (data);
859 static __inline void
860 load_dr5(u_int64_t dr5)
862 __asm __volatile("movq %0,%%dr5" : : "r" (dr5));
865 static __inline u_int64_t
866 rdr6(void)
868 u_int64_t data;
869 __asm __volatile("movq %%dr6,%0" : "=r" (data));
870 return (data);
873 static __inline void
874 load_dr6(u_int64_t dr6)
876 __asm __volatile("movq %0,%%dr6" : : "r" (dr6));
879 static __inline u_int64_t
880 rdr7(void)
882 u_int64_t data;
883 __asm __volatile("movq %%dr7,%0" : "=r" (data));
884 return (data);
887 static __inline void
888 load_dr7(u_int64_t dr7)
890 __asm __volatile("movq %0,%%dr7" : : "r" (dr7));
893 static __inline register_t
894 intr_disable(void)
896 register_t rflags;
898 rflags = read_rflags();
899 cpu_disable_intr();
900 return (rflags);
903 static __inline void
904 intr_restore(register_t rflags)
906 write_rflags(rflags);
909 #else /* !__GNUC__ */
911 int breakpoint(void);
912 void cpu_pause(void);
913 u_int bsfl(u_int mask);
914 u_int bsrl(u_int mask);
915 void cpu_disable_intr(void);
916 void cpu_enable_intr(void);
917 void cpu_invlpg(u_long addr);
918 void cpu_invlpg_range(u_long start, u_long end);
919 void do_cpuid(u_int ax, u_int *p);
920 void halt(void);
921 u_char inb(u_int port);
922 u_int inl(u_int port);
923 void insb(u_int port, void *addr, size_t cnt);
924 void insl(u_int port, void *addr, size_t cnt);
925 void insw(u_int port, void *addr, size_t cnt);
926 void invd(void);
927 void invlpg_range(u_int start, u_int end);
928 void cpu_invltlb(void);
929 u_short inw(u_int port);
930 void load_cr0(u_int cr0);
931 void load_cr3(u_int cr3);
932 void load_cr4(u_int cr4);
933 void load_fs(u_int sel);
934 void load_gs(u_int sel);
935 struct region_descriptor;
936 void lidt(struct region_descriptor *addr);
937 void lldt(u_short sel);
938 void ltr(u_short sel);
939 void outb(u_int port, u_char data);
940 void outl(u_int port, u_int data);
941 void outsb(u_int port, void *addr, size_t cnt);
942 void outsl(u_int port, void *addr, size_t cnt);
943 void outsw(u_int port, void *addr, size_t cnt);
944 void outw(u_int port, u_short data);
945 void ia32_pause(void);
946 u_int rcr0(void);
947 u_int rcr2(void);
948 u_int rcr3(void);
949 u_int rcr4(void);
950 u_short rfs(void);
951 u_short rgs(void);
952 u_int64_t rdmsr(u_int msr);
953 u_int64_t rdpmc(u_int pmc);
954 u_int64_t rdtsc(void);
955 u_int read_rflags(void);
956 void wbinvd(void);
957 void write_rflags(u_int rf);
958 void wrmsr(u_int msr, u_int64_t newval);
959 u_int64_t rdr0(void);
960 void load_dr0(u_int64_t dr0);
961 u_int64_t rdr1(void);
962 void load_dr1(u_int64_t dr1);
963 u_int64_t rdr2(void);
964 void load_dr2(u_int64_t dr2);
965 u_int64_t rdr3(void);
966 void load_dr3(u_int64_t dr3);
967 u_int64_t rdr4(void);
968 void load_dr4(u_int64_t dr4);
969 u_int64_t rdr5(void);
970 void load_dr5(u_int64_t dr5);
971 u_int64_t rdr6(void);
972 void load_dr6(u_int64_t dr6);
973 u_int64_t rdr7(void);
974 void load_dr7(u_int64_t dr7);
975 register_t intr_disable(void);
976 void intr_restore(register_t rf);
978 #endif /* __GNUC__ */
980 int rdmsr_safe(u_int msr, uint64_t *val);
981 int wrmsr_safe(u_int msr, uint64_t newval);
982 void reset_dbregs(void);
984 __END_DECLS
986 #endif /* !_CPU_CPUFUNC_H_ */