HAMMER 60I/Many: Mirroring
[dragonfly.git] / sys / cpu / i386 / include / cpufunc.h
blob9b7e80e4df17452ec8f36c2e49e543a0d63dd881
1 /*-
2 * Copyright (c) 1993 The Regents of the University of California.
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
33 * $FreeBSD: src/sys/i386/include/cpufunc.h,v 1.96.2.3 2002/04/28 22:50:54 dwmalone Exp $
34 * $DragonFly: src/sys/cpu/i386/include/cpufunc.h,v 1.21 2007/04/27 23:23:59 dillon Exp $
38 * Functions to provide access to special i386 instructions.
41 #ifndef _CPU_CPUFUNC_H_
42 #define _CPU_CPUFUNC_H_
44 #ifndef _SYS_TYPES_H_
45 #include <sys/types.h>
46 #endif
47 #ifndef _SYS_CDEFS_H_
48 #include <sys/cdefs.h>
49 #endif
51 __BEGIN_DECLS
52 #define readb(va) (*(volatile u_int8_t *) (va))
53 #define readw(va) (*(volatile u_int16_t *) (va))
54 #define readl(va) (*(volatile u_int32_t *) (va))
56 #define writeb(va, d) (*(volatile u_int8_t *) (va) = (d))
57 #define writew(va, d) (*(volatile u_int16_t *) (va) = (d))
58 #define writel(va, d) (*(volatile u_int32_t *) (va) = (d))
60 #ifdef __GNUC__
62 #ifdef SMP
63 #include <machine/lock.h> /* XXX */
64 #endif
66 #ifdef SWTCH_OPTIM_STATS
67 extern int tlb_flush_count; /* XXX */
68 #endif
70 static __inline void
71 breakpoint(void)
73 __asm __volatile("int $3");
76 static __inline void
77 cpu_pause(void)
79 __asm __volatile("pause");
83 * Find the first 1 in mask, starting with bit 0 and return the
84 * bit number. If mask is 0 the result is undefined.
86 static __inline u_int
87 bsfl(u_int mask)
89 u_int result;
91 __asm __volatile("bsfl %0,%0" : "=r" (result) : "0" (mask));
92 return (result);
96 * Find the last 1 in mask, starting with bit 31 and return the
97 * bit number. If mask is 0 the result is undefined.
99 static __inline u_int
100 bsrl(u_int mask)
102 u_int result;
104 __asm __volatile("bsrl %0,%0" : "=r" (result) : "0" (mask));
105 return (result);
109 * Test and set the specified bit (1 << bit) in the integer. The
110 * previous value of the bit is returned (0 or 1).
112 static __inline int
113 btsl(u_int *mask, int bit)
115 int result;
117 __asm __volatile("btsl %2,%1; movl $0,%0; adcl $0,%0" :
118 "=r"(result), "=m"(*mask) : "r" (bit));
119 return(result);
123 * Test and clear the specified bit (1 << bit) in the integer. The
124 * previous value of the bit is returned (0 or 1).
126 static __inline int
127 btrl(u_int *mask, int bit)
129 int result;
131 __asm __volatile("btrl %2,%1; movl $0,%0; adcl $0,%0" :
132 "=r"(result), "=m"(*mask) : "r" (bit));
133 return(result);
136 static __inline void
137 do_cpuid(u_int ax, u_int *p)
139 __asm __volatile("cpuid"
140 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
141 : "0" (ax));
144 #ifndef _CPU_DISABLE_INTR_DEFINED
146 static __inline void
147 cpu_disable_intr(void)
149 __asm __volatile("cli" : : : "memory");
152 #endif
154 #ifndef _CPU_ENABLE_INTR_DEFINED
156 static __inline void
157 cpu_enable_intr(void)
159 __asm __volatile("sti");
162 #endif
165 * Cpu and compiler memory ordering fence. mfence ensures strong read and
166 * write ordering.
168 * A serializing or fence instruction is required here. A locked bus
169 * cycle on data for which we already own cache mastership is the most
170 * portable.
172 static __inline void
173 cpu_mfence(void)
175 #ifdef SMP
176 __asm __volatile("lock; addl $0,(%%esp)" : : : "memory");
177 #else
178 __asm __volatile("" : : : "memory");
179 #endif
183 * cpu_lfence() ensures strong read ordering for reads issued prior
184 * to the instruction verses reads issued afterwords.
186 * A serializing or fence instruction is required here. A locked bus
187 * cycle on data for which we already own cache mastership is the most
188 * portable.
190 static __inline void
191 cpu_lfence(void)
193 #ifdef SMP
194 __asm __volatile("lock; addl $0,(%%esp)" : : : "memory");
195 #else
196 __asm __volatile("" : : : "memory");
197 #endif
201 * cpu_sfence() ensures strong write ordering for writes issued prior
202 * to the instruction verses writes issued afterwords. Writes are
203 * ordered on intel cpus so we do not actually have to do anything.
205 static __inline void
206 cpu_sfence(void)
208 __asm __volatile("" : : : "memory");
212 * cpu_ccfence() prevents the compiler from reordering instructions, in
213 * particular stores, relative to the current cpu. Use cpu_sfence() if
214 * you need to guarentee ordering by both the compiler and by the cpu.
216 * This also prevents the compiler from caching memory loads into local
217 * variables across the routine.
219 static __inline void
220 cpu_ccfence(void)
222 __asm __volatile("" : : : "memory");
225 #ifdef _KERNEL
227 #define HAVE_INLINE_FFS
229 static __inline int
230 ffs(int mask)
233 * Note that gcc-2's builtin ffs would be used if we didn't declare
234 * this inline or turn off the builtin. The builtin is faster but
235 * broken in gcc-2.4.5 and slower but working in gcc-2.5 and later
236 * versions.
238 return (mask == 0 ? mask : (int)bsfl((u_int)mask) + 1);
241 #define HAVE_INLINE_FLS
243 static __inline int
244 fls(int mask)
246 return (mask == 0 ? mask : (int) bsrl((u_int)mask) + 1);
249 #endif /* _KERNEL */
252 * The following complications are to get around gcc not having a
253 * constraint letter for the range 0..255. We still put "d" in the
254 * constraint because "i" isn't a valid constraint when the port
255 * isn't constant. This only matters for -O0 because otherwise
256 * the non-working version gets optimized away.
258 * Use an expression-statement instead of a conditional expression
259 * because gcc-2.6.0 would promote the operands of the conditional
260 * and produce poor code for "if ((inb(var) & const1) == const2)".
262 * The unnecessary test `(port) < 0x10000' is to generate a warning if
263 * the `port' has type u_short or smaller. Such types are pessimal.
264 * This actually only works for signed types. The range check is
265 * careful to avoid generating warnings.
267 #define inb(port) __extension__ ({ \
268 u_char _data; \
269 if (__builtin_constant_p(port) && ((port) & 0xffff) < 0x100 \
270 && (port) < 0x10000) \
271 _data = inbc(port); \
272 else \
273 _data = inbv(port); \
274 _data; })
276 #define outb(port, data) ( \
277 __builtin_constant_p(port) && ((port) & 0xffff) < 0x100 \
278 && (port) < 0x10000 \
279 ? outbc(port, data) : outbv(port, data))
281 static __inline u_char
282 inbc(u_int port)
284 u_char data;
286 __asm __volatile("inb %1,%0" : "=a" (data) : "id" ((u_short)(port)));
287 return (data);
290 static __inline void
291 outbc(u_int port, u_char data)
293 __asm __volatile("outb %0,%1" : : "a" (data), "id" ((u_short)(port)));
296 static __inline u_char
297 inbv(u_int port)
299 u_char data;
301 * We use %%dx and not %1 here because i/o is done at %dx and not at
302 * %edx, while gcc generates inferior code (movw instead of movl)
303 * if we tell it to load (u_short) port.
305 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port));
306 return (data);
309 static __inline u_int
310 inl(u_int port)
312 u_int data;
314 __asm __volatile("inl %%dx,%0" : "=a" (data) : "d" (port));
315 return (data);
318 static __inline void
319 insb(u_int port, void *addr, size_t cnt)
321 __asm __volatile("cld; rep; insb"
322 : "=D" (addr), "=c" (cnt)
323 : "0" (addr), "1" (cnt), "d" (port)
324 : "memory");
327 static __inline void
328 insw(u_int port, void *addr, size_t cnt)
330 __asm __volatile("cld; rep; insw"
331 : "=D" (addr), "=c" (cnt)
332 : "0" (addr), "1" (cnt), "d" (port)
333 : "memory");
336 static __inline void
337 insl(u_int port, void *addr, size_t cnt)
339 __asm __volatile("cld; rep; insl"
340 : "=D" (addr), "=c" (cnt)
341 : "0" (addr), "1" (cnt), "d" (port)
342 : "memory");
345 static __inline void
346 invd(void)
348 __asm __volatile("invd");
351 #if defined(_KERNEL)
354 * If we are not a true-SMP box then smp_invltlb() is a NOP. Note that this
355 * will cause the invl*() functions to be equivalent to the cpu_invl*()
356 * functions.
358 #ifdef SMP
359 void smp_invltlb(void);
360 #else
361 #define smp_invltlb()
362 #endif
364 #ifndef _CPU_INVLPG_DEFINED
367 * Invalidate a patricular VA on this cpu only
369 static __inline void
370 cpu_invlpg(void *addr)
372 __asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
375 #endif
377 #ifndef _CPU_INVLTLB_DEFINED
380 * Invalidate the TLB on this cpu only
382 static __inline void
383 cpu_invltlb(void)
385 u_int temp;
387 * This should be implemented as load_cr3(rcr3()) when load_cr3()
388 * is inlined.
390 __asm __volatile("movl %%cr3, %0; movl %0, %%cr3" : "=r" (temp)
391 : : "memory");
392 #if defined(SWTCH_OPTIM_STATS)
393 ++tlb_flush_count;
394 #endif
397 #endif
399 static __inline void
400 cpu_nop(void)
402 __asm __volatile("rep; nop");
405 #endif /* _KERNEL */
407 static __inline u_short
408 inw(u_int port)
410 u_short data;
412 __asm __volatile("inw %%dx,%0" : "=a" (data) : "d" (port));
413 return (data);
416 static __inline u_int
417 loadandclear(volatile u_int *addr)
419 u_int result;
421 __asm __volatile("xorl %0,%0; xchgl %1,%0"
422 : "=&r" (result) : "m" (*addr));
423 return (result);
426 static __inline void
427 outbv(u_int port, u_char data)
429 u_char al;
431 * Use an unnecessary assignment to help gcc's register allocator.
432 * This make a large difference for gcc-1.40 and a tiny difference
433 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for
434 * best results. gcc-2.6.0 can't handle this.
436 al = data;
437 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port));
440 static __inline void
441 outl(u_int port, u_int data)
444 * outl() and outw() aren't used much so we haven't looked at
445 * possible micro-optimizations such as the unnecessary
446 * assignment for them.
448 __asm __volatile("outl %0,%%dx" : : "a" (data), "d" (port));
451 static __inline void
452 outsb(u_int port, const void *addr, size_t cnt)
454 __asm __volatile("cld; rep; outsb"
455 : "=S" (addr), "=c" (cnt)
456 : "0" (addr), "1" (cnt), "d" (port));
459 static __inline void
460 outsw(u_int port, const void *addr, size_t cnt)
462 __asm __volatile("cld; rep; outsw"
463 : "=S" (addr), "=c" (cnt)
464 : "0" (addr), "1" (cnt), "d" (port));
467 static __inline void
468 outsl(u_int port, const void *addr, size_t cnt)
470 __asm __volatile("cld; rep; outsl"
471 : "=S" (addr), "=c" (cnt)
472 : "0" (addr), "1" (cnt), "d" (port));
475 static __inline void
476 outw(u_int port, u_short data)
478 __asm __volatile("outw %0,%%dx" : : "a" (data), "d" (port));
481 static __inline u_int
482 rcr2(void)
484 u_int data;
486 __asm __volatile("movl %%cr2,%0" : "=r" (data));
487 return (data);
490 static __inline u_int
491 read_eflags(void)
493 u_int ef;
495 __asm __volatile("pushfl; popl %0" : "=r" (ef));
496 return (ef);
499 static __inline u_int64_t
500 rdmsr(u_int msr)
502 u_int64_t rv;
504 __asm __volatile(".byte 0x0f, 0x32" : "=A" (rv) : "c" (msr));
505 return (rv);
508 static __inline u_int64_t
509 rdpmc(u_int pmc)
511 u_int64_t rv;
513 __asm __volatile(".byte 0x0f, 0x33" : "=A" (rv) : "c" (pmc));
514 return (rv);
517 #define _RDTSC_SUPPORTED_
519 static __inline u_int64_t
520 rdtsc(void)
522 u_int64_t rv;
524 __asm __volatile(".byte 0x0f, 0x31" : "=A" (rv));
525 return (rv);
528 static __inline void
529 wbinvd(void)
531 __asm __volatile("wbinvd");
534 static __inline void
535 write_eflags(u_int ef)
537 __asm __volatile("pushl %0; popfl" : : "r" (ef));
540 static __inline void
541 wrmsr(u_int msr, u_int64_t newval)
543 __asm __volatile(".byte 0x0f, 0x30" : : "A" (newval), "c" (msr));
546 static __inline u_int
547 rfs(void)
549 u_int sel;
550 __asm __volatile("movw %%fs,%0" : "=rm" (sel));
551 return (sel);
554 static __inline u_int
555 rgs(void)
557 u_int sel;
558 __asm __volatile("movw %%gs,%0" : "=rm" (sel));
559 return (sel);
562 static __inline void
563 load_fs(u_int sel)
565 __asm __volatile("movw %0,%%fs" : : "rm" (sel));
568 static __inline void
569 load_gs(u_int sel)
571 __asm __volatile("movw %0,%%gs" : : "rm" (sel));
574 static __inline u_int
575 rdr0(void)
577 u_int data;
578 __asm __volatile("movl %%dr0,%0" : "=r" (data));
579 return (data);
582 static __inline void
583 load_dr0(u_int sel)
585 __asm __volatile("movl %0,%%dr0" : : "r" (sel));
588 static __inline u_int
589 rdr1(void)
591 u_int data;
592 __asm __volatile("movl %%dr1,%0" : "=r" (data));
593 return (data);
596 static __inline void
597 load_dr1(u_int sel)
599 __asm __volatile("movl %0,%%dr1" : : "r" (sel));
602 static __inline u_int
603 rdr2(void)
605 u_int data;
606 __asm __volatile("movl %%dr2,%0" : "=r" (data));
607 return (data);
610 static __inline void
611 load_dr2(u_int sel)
613 __asm __volatile("movl %0,%%dr2" : : "r" (sel));
616 static __inline u_int
617 rdr3(void)
619 u_int data;
620 __asm __volatile("movl %%dr3,%0" : "=r" (data));
621 return (data);
624 static __inline void
625 load_dr3(u_int sel)
627 __asm __volatile("movl %0,%%dr3" : : "r" (sel));
630 static __inline u_int
631 rdr4(void)
633 u_int data;
634 __asm __volatile("movl %%dr4,%0" : "=r" (data));
635 return (data);
638 static __inline void
639 load_dr4(u_int sel)
641 __asm __volatile("movl %0,%%dr4" : : "r" (sel));
644 static __inline u_int
645 rdr5(void)
647 u_int data;
648 __asm __volatile("movl %%dr5,%0" : "=r" (data));
649 return (data);
652 static __inline void
653 load_dr5(u_int sel)
655 __asm __volatile("movl %0,%%dr5" : : "r" (sel));
658 static __inline u_int
659 rdr6(void)
661 u_int data;
662 __asm __volatile("movl %%dr6,%0" : "=r" (data));
663 return (data);
666 static __inline void
667 load_dr6(u_int sel)
669 __asm __volatile("movl %0,%%dr6" : : "r" (sel));
672 static __inline u_int
673 rdr7(void)
675 u_int data;
676 __asm __volatile("movl %%dr7,%0" : "=r" (data));
677 return (data);
680 static __inline void
681 load_dr7(u_int sel)
683 __asm __volatile("movl %0,%%dr7" : : "r" (sel));
686 #else /* !__GNUC__ */
688 int breakpoint (void);
689 void cpu_pause (void);
690 u_int bsfl (u_int mask);
691 u_int bsrl (u_int mask);
692 void cpu_disable_intr (void);
693 void do_cpuid (u_int ax, u_int *p);
694 void cpu_enable_intr (void);
695 u_char inb (u_int port);
696 u_int inl (u_int port);
697 void insb (u_int port, void *addr, size_t cnt);
698 void insl (u_int port, void *addr, size_t cnt);
699 void insw (u_int port, void *addr, size_t cnt);
700 void invd (void);
701 u_short inw (u_int port);
702 u_int loadandclear (u_int *addr);
703 void outb (u_int port, u_char data);
704 void outl (u_int port, u_int data);
705 void outsb (u_int port, void *addr, size_t cnt);
706 void outsl (u_int port, void *addr, size_t cnt);
707 void outsw (u_int port, void *addr, size_t cnt);
708 void outw (u_int port, u_short data);
709 u_int rcr2 (void);
710 u_int64_t rdmsr (u_int msr);
711 u_int64_t rdpmc (u_int pmc);
712 u_int64_t rdtsc (void);
713 u_int read_eflags (void);
714 void wbinvd (void);
715 void write_eflags (u_int ef);
716 void wrmsr (u_int msr, u_int64_t newval);
717 u_int rfs (void);
718 u_int rgs (void);
719 void load_fs (u_int sel);
720 void load_gs (u_int sel);
722 #endif /* __GNUC__ */
724 void load_cr0 (u_int cr0);
725 void load_cr3 (u_int cr3);
726 void load_cr4 (u_int cr4);
727 void ltr (u_short sel);
728 u_int rcr0 (void);
729 u_int rcr3 (void);
730 u_int rcr4 (void);
731 void reset_dbregs (void);
732 __END_DECLS
734 #endif /* !_CPU_CPUFUNC_H_ */