sys/vfs/hammer2: Remove obsolete comments for unused/reserved ondisk fields
[dragonfly.git] / sys / cpu / x86_64 / include / cpufunc.h
blob3ba363cfa8e4f0421eb04bed22e04f3663b67021
1 /*-
2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1993 The Regents of the University of California.
4 * Copyright (c) 2008 The DragonFly Project.
5 * All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
31 * $FreeBSD: src/sys/amd64/include/cpufunc.h,v 1.139 2004/01/28 23:53:04 peter Exp $
35 * Functions to provide access to special i386 instructions.
36 * This in included in sys/systm.h, and that file should be
37 * used in preference to this.
40 #ifndef _CPU_CPUFUNC_H_
41 #define _CPU_CPUFUNC_H_
43 #include <sys/cdefs.h>
44 #include <sys/thread.h>
45 #include <machine/clock.h>
46 #include <machine/psl.h>
47 #include <machine/smp.h>
49 struct thread;
50 struct region_descriptor;
51 struct pmap;
53 __BEGIN_DECLS
54 #define readb(va) (*(volatile u_int8_t *) (va))
55 #define readw(va) (*(volatile u_int16_t *) (va))
56 #define readl(va) (*(volatile u_int32_t *) (va))
57 #define readq(va) (*(volatile u_int64_t *) (va))
59 #define writeb(va, d) (*(volatile u_int8_t *) (va) = (d))
60 #define writew(va, d) (*(volatile u_int16_t *) (va) = (d))
61 #define writel(va, d) (*(volatile u_int32_t *) (va) = (d))
62 #define writeq(va, d) (*(volatile u_int64_t *) (va) = (d))
64 #ifdef __GNUC__
66 #include <machine/lock.h> /* XXX */
68 struct trapframe;
70 static __inline void
71 breakpoint(void)
73 __asm __volatile("int $3");
76 static __inline void
77 cpu_pause(void)
79 __asm __volatile("pause":::"memory");
82 static __inline u_int
83 bsfl(u_int mask)
85 u_int result;
87 __asm __volatile("bsfl %1,%0" : "=r" (result) : "rm" (mask));
88 return (result);
91 static __inline u_long
92 bsfq(u_long mask)
94 u_long result;
96 __asm __volatile("bsfq %1,%0" : "=r" (result) : "rm" (mask));
97 return (result);
100 static __inline u_long
101 bsflong(u_long mask)
103 u_long result;
105 __asm __volatile("bsfq %1,%0" : "=r" (result) : "rm" (mask));
106 return (result);
109 static __inline u_int
110 bsrl(u_int mask)
112 u_int result;
114 __asm __volatile("bsrl %1,%0" : "=r" (result) : "rm" (mask));
115 return (result);
118 static __inline u_long
119 bsrq(u_long mask)
121 u_long result;
123 __asm __volatile("bsrq %1,%0" : "=r" (result) : "rm" (mask));
124 return (result);
127 static __inline void
128 clflush(u_long addr)
130 __asm __volatile("clflush %0" : : "m" (*(char *) addr));
133 static __inline void
134 do_cpuid(u_int ax, u_int *p)
136 __asm __volatile("cpuid"
137 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
138 : "0" (ax));
141 static __inline void
142 cpuid_count(u_int ax, u_int cx, u_int *p)
144 __asm __volatile("cpuid"
145 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
146 : "0" (ax), "c" (cx));
149 #ifndef _CPU_DISABLE_INTR_DEFINED
151 static __inline void
152 cpu_disable_intr(void)
154 __asm __volatile("cli" : : : "memory");
157 #endif
159 #ifndef _CPU_ENABLE_INTR_DEFINED
161 static __inline void
162 cpu_enable_intr(void)
164 __asm __volatile("sti");
167 #endif
170 * Cpu and compiler memory ordering fence. mfence ensures strong read and
171 * write ordering.
173 * A serializing or fence instruction is required here. A locked bus
174 * cycle on data for which we already own cache mastership is the most
175 * portable.
177 static __inline void
178 cpu_mfence(void)
180 __asm __volatile("mfence" : : : "memory");
184 * cpu_lfence() ensures strong read ordering for reads issued prior
185 * to the instruction verses reads issued afterwords.
187 * A serializing or fence instruction is required here. A locked bus
188 * cycle on data for which we already own cache mastership is the most
189 * portable.
191 static __inline void
192 cpu_lfence(void)
194 __asm __volatile("lfence" : : : "memory");
198 * cpu_sfence() ensures strong write ordering for writes issued prior
199 * to the instruction verses writes issued afterwords. Writes are
200 * ordered on intel cpus so we do not actually have to do anything.
202 static __inline void
203 cpu_sfence(void)
206 * NOTE:
207 * Don't use 'sfence' here, as it will create a lot of
208 * unnecessary stalls.
210 __asm __volatile("" : : : "memory");
214 * cpu_ccfence() prevents the compiler from reordering instructions, in
215 * particular stores, relative to the current cpu. Use cpu_sfence() if
216 * you need to guarentee ordering by both the compiler and by the cpu.
218 * This also prevents the compiler from caching memory loads into local
219 * variables across the routine.
221 static __inline void
222 cpu_ccfence(void)
224 __asm __volatile("" : : : "memory");
228 * This is a horrible, horrible hack that might have to be put at the
229 * end of certain procedures (on a case by case basis), just before it
230 * returns to avoid what we believe to be an unreported AMD cpu bug.
231 * Found to occur on both a Phenom II X4 820 (two of them), as well
232 * as a 48-core built around an Opteron 6168 (Id = 0x100f91 Stepping = 1).
233 * The problem does not appear to occur w/Intel cpus.
235 * The bug is likely related to either a write combining issue or the
236 * Return Address Stack (RAS) hardware cache.
238 * In particular, we had to do this for GCC's fill_sons_in_loop() routine
239 * which due to its deep recursion and stack flow appears to be able to
240 * tickle the amd cpu bug (w/ gcc-4.4.7). Adding a single 'nop' to the
241 * end of the routine just before it returns works around the bug.
243 * The bug appears to be extremely sensitive to %rip and %rsp values, to
244 * the point where even just inserting an instruction in an unrelated
245 * procedure (shifting the entire code base being run) effects the outcome.
246 * DragonFly is probably able to more readily reproduce the bug due to
247 * the stackgap randomization code. We would expect OpenBSD (where we got
248 * the stackgap randomization code from) to also be able to reproduce the
249 * issue. To date we have only reproduced the issue in DragonFly.
251 #define __AMDCPUBUG_DFLY01_AVAILABLE__
253 static __inline void
254 cpu_amdcpubug_dfly01(void)
256 __asm __volatile("nop" : : : "memory");
259 #ifdef _KERNEL
261 #define HAVE_INLINE_FFS
263 static __inline int
264 ffs(int mask)
266 return (__builtin_ffs(mask));
269 #define HAVE_INLINE_FFSL
271 static __inline int
272 ffsl(long mask)
274 return (__builtin_ffsl(mask));
277 #define HAVE_INLINE_FLS
279 static __inline int
280 fls(int mask)
282 return (mask == 0 ? mask : (int)bsrl((u_int)mask) + 1);
285 #define HAVE_INLINE_FLSL
287 static __inline int
288 flsl(long mask)
290 return (mask == 0 ? mask : (int)bsrq((u_long)mask) + 1);
293 #define HAVE_INLINE_FLSLL
295 static __inline int
296 flsll(long long mask)
298 return (flsl((long)mask));
301 #endif /* _KERNEL */
303 static __inline void
304 halt(void)
306 __asm __volatile("hlt");
310 * The following complications are to get around gcc not having a
311 * constraint letter for the range 0..255. We still put "d" in the
312 * constraint because "i" isn't a valid constraint when the port
313 * isn't constant. This only matters for -O0 because otherwise
314 * the non-working version gets optimized away.
316 * Use an expression-statement instead of a conditional expression
317 * because gcc-2.6.0 would promote the operands of the conditional
318 * and produce poor code for "if ((inb(var) & const1) == const2)".
320 * The unnecessary test `(port) < 0x10000' is to generate a warning if
321 * the `port' has type u_short or smaller. Such types are pessimal.
322 * This actually only works for signed types. The range check is
323 * careful to avoid generating warnings.
325 #define inb(port) __extension__ ({ \
326 u_char _data; \
327 if (__builtin_constant_p(port) && ((port) & 0xffff) < 0x100 \
328 && (port) < 0x10000) \
329 _data = inbc(port); \
330 else \
331 _data = inbv(port); \
332 _data; })
334 #define outb(port, data) ( \
335 __builtin_constant_p(port) && ((port) & 0xffff) < 0x100 \
336 && (port) < 0x10000 \
337 ? outbc(port, data) : outbv(port, data))
339 static __inline u_char
340 inbc(u_int port)
342 u_char data;
344 __asm __volatile("inb %1,%0" : "=a" (data) : "id" ((u_short)(port)));
345 return (data);
348 static __inline void
349 outbc(u_int port, u_char data)
351 __asm __volatile("outb %0,%1" : : "a" (data), "id" ((u_short)(port)));
354 static __inline u_char
355 inbv(u_int port)
357 u_char data;
359 * We use %%dx and not %1 here because i/o is done at %dx and not at
360 * %edx, while gcc generates inferior code (movw instead of movl)
361 * if we tell it to load (u_short) port.
363 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port));
364 return (data);
367 static __inline u_int
368 inl(u_int port)
370 u_int data;
372 __asm __volatile("inl %%dx,%0" : "=a" (data) : "d" (port));
373 return (data);
376 static __inline void
377 insb(u_int port, void *addr, size_t cnt)
379 __asm __volatile("cld; rep; insb"
380 : "+D" (addr), "+c" (cnt)
381 : "d" (port)
382 : "memory");
385 static __inline void
386 insw(u_int port, void *addr, size_t cnt)
388 __asm __volatile("cld; rep; insw"
389 : "+D" (addr), "+c" (cnt)
390 : "d" (port)
391 : "memory");
394 static __inline void
395 insl(u_int port, void *addr, size_t cnt)
397 __asm __volatile("cld; rep; insl"
398 : "+D" (addr), "+c" (cnt)
399 : "d" (port)
400 : "memory");
403 static __inline void
404 invd(void)
406 __asm __volatile("invd");
409 #if defined(_KERNEL)
411 #ifndef _CPU_INVLPG_DEFINED
414 * Invalidate a particular VA on this cpu only
416 * TLB flush for an individual page (even if it has PG_G).
417 * Only works on 486+ CPUs (i386 does not have PG_G).
419 static __inline void
420 cpu_invlpg(void *addr)
422 __asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
425 #endif
427 static __inline void
428 cpu_nop(void)
430 __asm __volatile("rep; nop");
433 #endif /* _KERNEL */
435 static __inline u_short
436 inw(u_int port)
438 u_short data;
440 __asm __volatile("inw %%dx,%0" : "=a" (data) : "d" (port));
441 return (data);
444 static __inline u_int
445 loadandclear(volatile u_int *addr)
447 u_int result;
449 __asm __volatile("xorl %0,%0; xchgl %1,%0"
450 : "=&r" (result) : "m" (*addr));
451 return (result);
454 static __inline void
455 outbv(u_int port, u_char data)
457 u_char al;
459 * Use an unnecessary assignment to help gcc's register allocator.
460 * This make a large difference for gcc-1.40 and a tiny difference
461 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for
462 * best results. gcc-2.6.0 can't handle this.
464 al = data;
465 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port));
468 static __inline void
469 outl(u_int port, u_int data)
472 * outl() and outw() aren't used much so we haven't looked at
473 * possible micro-optimizations such as the unnecessary
474 * assignment for them.
476 __asm __volatile("outl %0,%%dx" : : "a" (data), "d" (port));
479 static __inline void
480 outsb(u_int port, const void *addr, size_t cnt)
482 __asm __volatile("cld; rep; outsb"
483 : "+S" (addr), "+c" (cnt)
484 : "d" (port));
487 static __inline void
488 outsw(u_int port, const void *addr, size_t cnt)
490 __asm __volatile("cld; rep; outsw"
491 : "+S" (addr), "+c" (cnt)
492 : "d" (port));
495 static __inline void
496 outsl(u_int port, const void *addr, size_t cnt)
498 __asm __volatile("cld; rep; outsl"
499 : "+S" (addr), "+c" (cnt)
500 : "d" (port));
503 static __inline void
504 outw(u_int port, u_short data)
506 __asm __volatile("outw %0,%%dx" : : "a" (data), "d" (port));
509 static __inline void
510 ia32_pause(void)
512 __asm __volatile("pause");
515 static __inline u_long
516 read_rflags(void)
518 u_long rf;
520 __asm __volatile("pushfq; popq %0" : "=r" (rf));
521 return (rf);
524 static __inline u_int64_t
525 rdmsr(u_int msr)
527 u_int32_t low, high;
529 __asm __volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr));
530 return (low | ((u_int64_t)high << 32));
533 static __inline u_int64_t
534 rdpmc(u_int pmc)
536 u_int32_t low, high;
538 __asm __volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (pmc));
539 return (low | ((u_int64_t)high << 32));
542 #define _RDTSC_SUPPORTED_
544 static __inline tsc_uclock_t
545 rdtsc(void)
547 u_int32_t low, high;
549 __asm __volatile("rdtsc" : "=a" (low), "=d" (high));
550 return (low | ((tsc_uclock_t)high << 32));
553 #ifdef _KERNEL
554 #include <machine/cputypes.h>
555 #include <machine/md_var.h>
557 static __inline tsc_uclock_t
558 rdtsc_ordered(void)
560 if (cpu_vendor_id == CPU_VENDOR_INTEL)
561 cpu_lfence();
562 else
563 cpu_mfence();
564 return rdtsc();
566 #endif
568 static __inline void
569 wbinvd(void)
571 __asm __volatile("wbinvd");
574 #if defined(_KERNEL)
575 void cpu_wbinvd_on_all_cpus_callback(void *arg);
577 static __inline void
578 cpu_wbinvd_on_all_cpus(void)
580 lwkt_cpusync_simple(smp_active_mask, cpu_wbinvd_on_all_cpus_callback, NULL);
582 #endif
584 static __inline void
585 write_rflags(u_long rf)
587 __asm __volatile("pushq %0; popfq" : : "r" (rf));
590 static __inline void
591 wrmsr(u_int msr, u_int64_t newval)
593 u_int32_t low, high;
595 low = newval;
596 high = newval >> 32;
597 __asm __volatile("wrmsr"
599 : "a" (low), "d" (high), "c" (msr)
600 : "memory");
603 static __inline void
604 load_xcr(u_int xcr, uint64_t newval)
606 uint32_t low, high;
608 low = newval;
609 high = newval >> 32;
611 __asm __volatile("xsetbv"
613 : "a" (low), "d" (high), "c" (xcr)
614 : "memory");
617 static __inline uint64_t
618 rxcr(u_int xcr)
620 uint32_t low, high;
622 __asm __volatile("xgetbv" : "=a" (low), "=d" (high) : "c" (xcr));
623 return (low | ((uint64_t)high << 32));
626 static __inline void
627 load_cr0(u_long data)
629 __asm __volatile("movq %0,%%cr0" : : "r" (data) : "memory");
632 static __inline u_long
633 rcr0(void)
635 u_long data;
637 __asm __volatile("movq %%cr0,%0" : "=r" (data));
638 return (data);
641 static __inline void
642 load_cr2(u_long data)
644 __asm __volatile("movq %0,%%cr2" : : "r" (data) : "memory");
647 static __inline u_long
648 rcr2(void)
650 u_long data;
652 __asm __volatile("movq %%cr2,%0" : "=r" (data));
653 return (data);
656 static __inline void
657 load_cr3(u_long data)
659 __asm __volatile("movq %0,%%cr3" : : "r" (data) : "memory");
662 static __inline u_long
663 rcr3(void)
665 u_long data;
667 __asm __volatile("movq %%cr3,%0" : "=r" (data));
668 return (data);
671 static __inline void
672 load_cr4(u_long data)
674 __asm __volatile("movq %0,%%cr4" : : "r" (data) : "memory");
677 static __inline u_long
678 rcr4(void)
680 u_long data;
682 __asm __volatile("movq %%cr4,%0" : "=r" (data));
683 return (data);
686 #ifndef _CPU_INVLTLB_DEFINED
689 * Invalidate the TLB on this cpu only
691 static __inline void
692 cpu_invltlb(void)
694 load_cr3(rcr3());
695 #if defined(SWTCH_OPTIM_STATS)
696 ++tlb_flush_count;
697 #endif
700 #endif
702 void smp_invltlb(void);
703 void smp_sniff(void);
704 void cpu_sniff(int);
705 void hard_sniff(struct trapframe *);
707 static __inline u_short
708 rfs(void)
710 u_short sel;
711 __asm __volatile("movw %%fs,%0" : "=rm" (sel));
712 return (sel);
715 static __inline u_short
716 rgs(void)
718 u_short sel;
719 __asm __volatile("movw %%gs,%0" : "=rm" (sel));
720 return (sel);
723 static __inline void
724 load_ds(u_short sel)
726 __asm __volatile("movw %0,%%ds" : : "rm" (sel));
729 static __inline void
730 load_es(u_short sel)
732 __asm __volatile("movw %0,%%es" : : "rm" (sel));
735 #ifdef _KERNEL
736 /* This is defined in <machine/specialreg.h> but is too painful to get to */
737 #ifndef MSR_FSBASE
738 #define MSR_FSBASE 0xc0000100
739 #endif
740 static __inline void
741 load_fs(u_short sel)
743 /* Preserve the fsbase value across the selector load */
744 __asm __volatile("rdmsr; movw %0,%%fs; wrmsr"
745 : : "rm" (sel), "c" (MSR_FSBASE) : "eax", "edx");
748 #ifndef MSR_GSBASE
749 #define MSR_GSBASE 0xc0000101
750 #endif
751 static __inline void
752 load_gs(u_short sel)
755 * Preserve the gsbase value across the selector load.
756 * Note that we have to disable interrupts because the gsbase
757 * being trashed happens to be the kernel gsbase at the time.
759 __asm __volatile("pushfq; cli; rdmsr; movw %0,%%gs; wrmsr; popfq"
760 : : "rm" (sel), "c" (MSR_GSBASE) : "eax", "edx");
762 #else
763 /* Usable by userland */
764 static __inline void
765 load_fs(u_short sel)
767 __asm __volatile("movw %0,%%fs" : : "rm" (sel));
770 static __inline void
771 load_gs(u_short sel)
773 __asm __volatile("movw %0,%%gs" : : "rm" (sel));
775 #endif
777 /* void lidt(struct region_descriptor *addr); */
778 static __inline void
779 lidt(struct region_descriptor *addr)
781 __asm __volatile("lidt (%0)" : : "r" (addr));
784 /* void lldt(u_short sel); */
785 static __inline void
786 lldt(u_short sel)
788 __asm __volatile("lldt %0" : : "r" (sel));
791 /* void ltr(u_short sel); */
792 static __inline void
793 ltr(u_short sel)
795 __asm __volatile("ltr %0" : : "r" (sel));
798 static __inline u_int64_t
799 rdr0(void)
801 u_int64_t data;
802 __asm __volatile("movq %%dr0,%0" : "=r" (data));
803 return (data);
806 static __inline void
807 load_dr0(u_int64_t dr0)
809 __asm __volatile("movq %0,%%dr0" : : "r" (dr0) : "memory");
812 static __inline u_int64_t
813 rdr1(void)
815 u_int64_t data;
816 __asm __volatile("movq %%dr1,%0" : "=r" (data));
817 return (data);
820 static __inline void
821 load_dr1(u_int64_t dr1)
823 __asm __volatile("movq %0,%%dr1" : : "r" (dr1) : "memory");
826 static __inline u_int64_t
827 rdr2(void)
829 u_int64_t data;
830 __asm __volatile("movq %%dr2,%0" : "=r" (data));
831 return (data);
834 static __inline void
835 load_dr2(u_int64_t dr2)
837 __asm __volatile("movq %0,%%dr2" : : "r" (dr2) : "memory");
840 static __inline u_int64_t
841 rdr3(void)
843 u_int64_t data;
844 __asm __volatile("movq %%dr3,%0" : "=r" (data));
845 return (data);
848 static __inline void
849 load_dr3(u_int64_t dr3)
851 __asm __volatile("movq %0,%%dr3" : : "r" (dr3) : "memory");
854 static __inline u_int64_t
855 rdr4(void)
857 u_int64_t data;
858 __asm __volatile("movq %%dr4,%0" : "=r" (data));
859 return (data);
862 static __inline void
863 load_dr4(u_int64_t dr4)
865 __asm __volatile("movq %0,%%dr4" : : "r" (dr4) : "memory");
868 static __inline u_int64_t
869 rdr5(void)
871 u_int64_t data;
872 __asm __volatile("movq %%dr5,%0" : "=r" (data));
873 return (data);
876 static __inline void
877 load_dr5(u_int64_t dr5)
879 __asm __volatile("movq %0,%%dr5" : : "r" (dr5) : "memory");
882 static __inline u_int64_t
883 rdr6(void)
885 u_int64_t data;
886 __asm __volatile("movq %%dr6,%0" : "=r" (data));
887 return (data);
890 static __inline void
891 load_dr6(u_int64_t dr6)
893 __asm __volatile("movq %0,%%dr6" : : "r" (dr6) : "memory");
896 static __inline u_int64_t
897 rdr7(void)
899 u_int64_t data;
900 __asm __volatile("movq %%dr7,%0" : "=r" (data));
901 return (data);
904 static __inline void
905 load_dr7(u_int64_t dr7)
907 __asm __volatile("movq %0,%%dr7" : : "r" (dr7) : "memory");
910 static __inline register_t
911 intr_disable(void)
913 register_t rflags;
915 rflags = read_rflags();
916 cpu_disable_intr();
917 return (rflags);
920 static __inline void
921 intr_restore(register_t rflags)
923 write_rflags(rflags);
926 #else /* !__GNUC__ */
928 int breakpoint(void);
929 void cpu_pause(void);
930 u_int bsfl(u_int mask);
931 u_int bsrl(u_int mask);
932 void cpu_disable_intr(void);
933 void cpu_enable_intr(void);
934 void cpu_invlpg(u_long addr);
935 void cpu_invlpg_range(u_long start, u_long end);
936 void do_cpuid(u_int ax, u_int *p);
937 void halt(void);
938 u_char inb(u_int port);
939 u_int inl(u_int port);
940 void insb(u_int port, void *addr, size_t cnt);
941 void insl(u_int port, void *addr, size_t cnt);
942 void insw(u_int port, void *addr, size_t cnt);
943 void invd(void);
944 void invlpg_range(u_int start, u_int end);
945 void cpu_invltlb(void);
946 u_short inw(u_int port);
947 void load_cr0(u_int cr0);
948 void load_cr2(u_int cr2);
949 void load_cr3(u_int cr3);
950 void load_cr4(u_int cr4);
951 void load_fs(u_int sel);
952 void load_gs(u_int sel);
953 void lidt(struct region_descriptor *addr);
954 void lldt(u_short sel);
955 void ltr(u_short sel);
956 void outb(u_int port, u_char data);
957 void outl(u_int port, u_int data);
958 void outsb(u_int port, void *addr, size_t cnt);
959 void outsl(u_int port, void *addr, size_t cnt);
960 void outsw(u_int port, void *addr, size_t cnt);
961 void outw(u_int port, u_short data);
962 void ia32_pause(void);
963 u_int rcr0(void);
964 u_int rcr2(void);
965 u_int rcr3(void);
966 u_int rcr4(void);
967 u_short rfs(void);
968 u_short rgs(void);
969 u_int64_t rdmsr(u_int msr);
970 u_int64_t rdpmc(u_int pmc);
971 tsc_uclock_t rdtsc(void);
972 u_int read_rflags(void);
973 void wbinvd(void);
974 void write_rflags(u_int rf);
975 void wrmsr(u_int msr, u_int64_t newval);
976 u_int64_t rdr0(void);
977 void load_dr0(u_int64_t dr0);
978 u_int64_t rdr1(void);
979 void load_dr1(u_int64_t dr1);
980 u_int64_t rdr2(void);
981 void load_dr2(u_int64_t dr2);
982 u_int64_t rdr3(void);
983 void load_dr3(u_int64_t dr3);
984 u_int64_t rdr4(void);
985 void load_dr4(u_int64_t dr4);
986 u_int64_t rdr5(void);
987 void load_dr5(u_int64_t dr5);
988 u_int64_t rdr6(void);
989 void load_dr6(u_int64_t dr6);
990 u_int64_t rdr7(void);
991 void load_dr7(u_int64_t dr7);
992 register_t intr_disable(void);
993 void intr_restore(register_t rf);
995 #endif /* __GNUC__ */
997 int rdmsr_safe(u_int msr, uint64_t *val);
998 int wrmsr_safe(u_int msr, uint64_t newval);
999 void reset_dbregs(void);
1000 void smap_open(void);
1001 void smap_close(void);
1003 __END_DECLS
1005 #endif /* !_CPU_CPUFUNC_H_ */