kernel - Fix serious issue w/ smp_invltlb(), plus other issues.
[dragonfly.git] / sys / cpu / i386 / include / cpufunc.h
blobfa0b3edde7173e54aacbfd1c759dfc3c23454608
1 /*-
2 * Copyright (c) 1993 The Regents of the University of California.
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
33 * $FreeBSD: src/sys/i386/include/cpufunc.h,v 1.96.2.3 2002/04/28 22:50:54 dwmalone Exp $
34 * $DragonFly: src/sys/cpu/i386/include/cpufunc.h,v 1.21 2007/04/27 23:23:59 dillon Exp $
38 * Functions to provide access to special i386 instructions.
41 #ifndef _CPU_CPUFUNC_H_
42 #define _CPU_CPUFUNC_H_
44 #ifndef _SYS_TYPES_H_
45 #include <sys/types.h>
46 #endif
47 #ifndef _SYS_CDEFS_H_
48 #include <sys/cdefs.h>
49 #endif
51 __BEGIN_DECLS
52 #define readb(va) (*(volatile u_int8_t *) (va))
53 #define readw(va) (*(volatile u_int16_t *) (va))
54 #define readl(va) (*(volatile u_int32_t *) (va))
56 #define writeb(va, d) (*(volatile u_int8_t *) (va) = (d))
57 #define writew(va, d) (*(volatile u_int16_t *) (va) = (d))
58 #define writel(va, d) (*(volatile u_int32_t *) (va) = (d))
60 #ifdef __GNUC__
62 #ifdef SMP
63 #include <machine/lock.h> /* XXX */
64 #endif
66 #ifdef SWTCH_OPTIM_STATS
67 extern int tlb_flush_count; /* XXX */
68 #endif
70 static __inline void
71 breakpoint(void)
73 __asm __volatile("int $3");
76 static __inline void
77 cpu_pause(void)
79 __asm __volatile("pause");
83 * Find the first 1 in mask, starting with bit 0 and return the
84 * bit number. If mask is 0 the result is undefined.
86 static __inline u_int
87 bsfl(u_int mask)
89 u_int result;
91 __asm __volatile("bsfl %0,%0" : "=r" (result) : "0" (mask));
92 return (result);
96 * Find the last 1 in mask, starting with bit 31 and return the
97 * bit number. If mask is 0 the result is undefined.
99 static __inline u_int
100 bsrl(u_int mask)
102 u_int result;
104 __asm __volatile("bsrl %0,%0" : "=r" (result) : "0" (mask));
105 return (result);
109 * Test and set the specified bit (1 << bit) in the integer. The
110 * previous value of the bit is returned (0 or 1).
112 static __inline int
113 btsl(u_int *mask, int bit)
115 int result;
117 __asm __volatile("btsl %2,%1; movl $0,%0; adcl $0,%0" :
118 "=r"(result), "=m"(*mask) : "r" (bit));
119 return(result);
123 * Test and clear the specified bit (1 << bit) in the integer. The
124 * previous value of the bit is returned (0 or 1).
126 static __inline int
127 btrl(u_int *mask, int bit)
129 int result;
131 __asm __volatile("btrl %2,%1; movl $0,%0; adcl $0,%0" :
132 "=r"(result), "=m"(*mask) : "r" (bit));
133 return(result);
136 static __inline void
137 do_cpuid(u_int ax, u_int *p)
139 __asm __volatile("cpuid"
140 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
141 : "0" (ax));
144 static __inline void
145 cpuid_count(u_int ax, u_int cx, u_int *p)
147 __asm __volatile("cpuid"
148 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
149 : "0" (ax), "c" (cx));
152 #ifndef _CPU_DISABLE_INTR_DEFINED
154 static __inline void
155 cpu_disable_intr(void)
157 __asm __volatile("cli" : : : "memory");
160 #endif
162 #ifndef _CPU_ENABLE_INTR_DEFINED
164 static __inline void
165 cpu_enable_intr(void)
167 __asm __volatile("sti");
170 #endif
173 * Cpu and compiler memory ordering fence. mfence ensures strong read and
174 * write ordering.
176 * A serializing or fence instruction is required here. A locked bus
177 * cycle on data for which we already own cache mastership is the most
178 * portable.
180 static __inline void
181 cpu_mfence(void)
183 #ifdef SMP
184 __asm __volatile("lock; addl $0,(%%esp)" : : : "memory");
185 #else
186 __asm __volatile("" : : : "memory");
187 #endif
191 * cpu_lfence() ensures strong read ordering for reads issued prior
192 * to the instruction verses reads issued afterwords.
194 * A serializing or fence instruction is required here. A locked bus
195 * cycle on data for which we already own cache mastership is the most
196 * portable.
198 static __inline void
199 cpu_lfence(void)
201 #ifdef SMP
202 __asm __volatile("lock; addl $0,(%%esp)" : : : "memory");
203 #else
204 __asm __volatile("" : : : "memory");
205 #endif
209 * cpu_sfence() ensures strong write ordering for writes issued prior
210 * to the instruction verses writes issued afterwords. Writes are
211 * ordered on intel cpus so we do not actually have to do anything.
213 static __inline void
214 cpu_sfence(void)
216 __asm __volatile("" : : : "memory");
220 * cpu_ccfence() prevents the compiler from reordering instructions, in
221 * particular stores, relative to the current cpu. Use cpu_sfence() if
222 * you need to guarentee ordering by both the compiler and by the cpu.
224 * This also prevents the compiler from caching memory loads into local
225 * variables across the routine.
227 static __inline void
228 cpu_ccfence(void)
230 __asm __volatile("" : : : "memory");
233 #ifdef _KERNEL
235 #define HAVE_INLINE_FFS
237 static __inline int
238 ffs(int mask)
241 * Note that gcc-2's builtin ffs would be used if we didn't declare
242 * this inline or turn off the builtin. The builtin is faster but
243 * broken in gcc-2.4.5 and slower but working in gcc-2.5 and later
244 * versions.
246 return (mask == 0 ? mask : (int)bsfl((u_int)mask) + 1);
249 #define HAVE_INLINE_FLS
251 static __inline int
252 fls(int mask)
254 return (mask == 0 ? mask : (int) bsrl((u_int)mask) + 1);
257 #endif /* _KERNEL */
260 * The following complications are to get around gcc not having a
261 * constraint letter for the range 0..255. We still put "d" in the
262 * constraint because "i" isn't a valid constraint when the port
263 * isn't constant. This only matters for -O0 because otherwise
264 * the non-working version gets optimized away.
266 * Use an expression-statement instead of a conditional expression
267 * because gcc-2.6.0 would promote the operands of the conditional
268 * and produce poor code for "if ((inb(var) & const1) == const2)".
270 * The unnecessary test `(port) < 0x10000' is to generate a warning if
271 * the `port' has type u_short or smaller. Such types are pessimal.
272 * This actually only works for signed types. The range check is
273 * careful to avoid generating warnings.
275 #define inb(port) __extension__ ({ \
276 u_char _data; \
277 if (__builtin_constant_p(port) && ((port) & 0xffff) < 0x100 \
278 && (port) < 0x10000) \
279 _data = inbc(port); \
280 else \
281 _data = inbv(port); \
282 _data; })
284 #define outb(port, data) ( \
285 __builtin_constant_p(port) && ((port) & 0xffff) < 0x100 \
286 && (port) < 0x10000 \
287 ? outbc(port, data) : outbv(port, data))
289 static __inline u_char
290 inbc(u_int port)
292 u_char data;
294 __asm __volatile("inb %1,%0" : "=a" (data) : "id" ((u_short)(port)));
295 return (data);
298 static __inline void
299 outbc(u_int port, u_char data)
301 __asm __volatile("outb %0,%1" : : "a" (data), "id" ((u_short)(port)));
304 static __inline u_char
305 inbv(u_int port)
307 u_char data;
309 * We use %%dx and not %1 here because i/o is done at %dx and not at
310 * %edx, while gcc generates inferior code (movw instead of movl)
311 * if we tell it to load (u_short) port.
313 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port));
314 return (data);
317 static __inline u_int
318 inl(u_int port)
320 u_int data;
322 __asm __volatile("inl %%dx,%0" : "=a" (data) : "d" (port));
323 return (data);
326 static __inline void
327 insb(u_int port, void *addr, size_t cnt)
329 __asm __volatile("cld; rep; insb"
330 : "=D" (addr), "=c" (cnt)
331 : "0" (addr), "1" (cnt), "d" (port)
332 : "memory");
335 static __inline void
336 insw(u_int port, void *addr, size_t cnt)
338 __asm __volatile("cld; rep; insw"
339 : "=D" (addr), "=c" (cnt)
340 : "0" (addr), "1" (cnt), "d" (port)
341 : "memory");
344 static __inline void
345 insl(u_int port, void *addr, size_t cnt)
347 __asm __volatile("cld; rep; insl"
348 : "=D" (addr), "=c" (cnt)
349 : "0" (addr), "1" (cnt), "d" (port)
350 : "memory");
353 static __inline void
354 invd(void)
356 __asm __volatile("invd");
359 #if defined(_KERNEL)
362 * If we are not a true-SMP box then smp_invltlb() is a NOP. Note that this
363 * will cause the invl*() functions to be equivalent to the cpu_invl*()
364 * functions.
366 #ifdef SMP
367 void smp_invltlb(void);
368 void smp_invltlb_intr(void);
369 #else
370 #define smp_invltlb()
371 #endif
373 #ifndef _CPU_INVLPG_DEFINED
376 * Invalidate a patricular VA on this cpu only
378 static __inline void
379 cpu_invlpg(void *addr)
381 __asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
384 #endif
386 #ifndef _CPU_INVLTLB_DEFINED
389 * Invalidate the TLB on this cpu only
391 static __inline void
392 cpu_invltlb(void)
394 u_int temp;
396 * This should be implemented as load_cr3(rcr3()) when load_cr3()
397 * is inlined.
399 __asm __volatile("movl %%cr3, %0; movl %0, %%cr3" : "=r" (temp)
400 : : "memory");
401 #if defined(SWTCH_OPTIM_STATS)
402 ++tlb_flush_count;
403 #endif
406 #endif
408 static __inline void
409 cpu_nop(void)
411 __asm __volatile("rep; nop");
414 #endif /* _KERNEL */
416 static __inline u_short
417 inw(u_int port)
419 u_short data;
421 __asm __volatile("inw %%dx,%0" : "=a" (data) : "d" (port));
422 return (data);
425 static __inline u_int
426 loadandclear(volatile u_int *addr)
428 u_int result;
430 __asm __volatile("xorl %0,%0; xchgl %1,%0"
431 : "=&r" (result) : "m" (*addr));
432 return (result);
435 static __inline void
436 outbv(u_int port, u_char data)
438 u_char al;
440 * Use an unnecessary assignment to help gcc's register allocator.
441 * This make a large difference for gcc-1.40 and a tiny difference
442 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for
443 * best results. gcc-2.6.0 can't handle this.
445 al = data;
446 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port));
449 static __inline void
450 outl(u_int port, u_int data)
453 * outl() and outw() aren't used much so we haven't looked at
454 * possible micro-optimizations such as the unnecessary
455 * assignment for them.
457 __asm __volatile("outl %0,%%dx" : : "a" (data), "d" (port));
460 static __inline void
461 outsb(u_int port, const void *addr, size_t cnt)
463 __asm __volatile("cld; rep; outsb"
464 : "=S" (addr), "=c" (cnt)
465 : "0" (addr), "1" (cnt), "d" (port));
468 static __inline void
469 outsw(u_int port, const void *addr, size_t cnt)
471 __asm __volatile("cld; rep; outsw"
472 : "=S" (addr), "=c" (cnt)
473 : "0" (addr), "1" (cnt), "d" (port));
476 static __inline void
477 outsl(u_int port, const void *addr, size_t cnt)
479 __asm __volatile("cld; rep; outsl"
480 : "=S" (addr), "=c" (cnt)
481 : "0" (addr), "1" (cnt), "d" (port));
484 static __inline void
485 outw(u_int port, u_short data)
487 __asm __volatile("outw %0,%%dx" : : "a" (data), "d" (port));
490 static __inline u_int
491 rcr2(void)
493 u_int data;
495 __asm __volatile("movl %%cr2,%0" : "=r" (data));
496 return (data);
499 static __inline u_int
500 read_eflags(void)
502 u_int ef;
504 __asm __volatile("pushfl; popl %0" : "=r" (ef));
505 return (ef);
508 static __inline u_int64_t
509 rdmsr(u_int msr)
511 u_int64_t rv;
513 __asm __volatile(".byte 0x0f, 0x32" : "=A" (rv) : "c" (msr));
514 return (rv);
517 static __inline u_int64_t
518 rdpmc(u_int pmc)
520 u_int64_t rv;
522 __asm __volatile(".byte 0x0f, 0x33" : "=A" (rv) : "c" (pmc));
523 return (rv);
526 #define _RDTSC_SUPPORTED_
528 static __inline u_int64_t
529 rdtsc(void)
531 u_int64_t rv;
533 __asm __volatile(".byte 0x0f, 0x31" : "=A" (rv));
534 return (rv);
537 static __inline void
538 wbinvd(void)
540 __asm __volatile("wbinvd");
543 static __inline void
544 write_eflags(u_int ef)
546 __asm __volatile("pushl %0; popfl" : : "r" (ef));
549 static __inline void
550 wrmsr(u_int msr, u_int64_t newval)
552 __asm __volatile(".byte 0x0f, 0x30" : : "A" (newval), "c" (msr));
555 static __inline u_int
556 rfs(void)
558 u_int sel;
559 __asm __volatile("movw %%fs,%0" : "=rm" (sel));
560 return (sel);
563 static __inline u_int
564 rgs(void)
566 u_int sel;
567 __asm __volatile("movw %%gs,%0" : "=rm" (sel));
568 return (sel);
571 static __inline void
572 load_fs(u_int sel)
574 __asm __volatile("movw %0,%%fs" : : "rm" (sel));
577 static __inline void
578 load_gs(u_int sel)
580 __asm __volatile("movw %0,%%gs" : : "rm" (sel));
583 static __inline u_int
584 rdr0(void)
586 u_int data;
587 __asm __volatile("movl %%dr0,%0" : "=r" (data));
588 return (data);
591 static __inline void
592 load_dr0(u_int sel)
594 __asm __volatile("movl %0,%%dr0" : : "r" (sel));
597 static __inline u_int
598 rdr1(void)
600 u_int data;
601 __asm __volatile("movl %%dr1,%0" : "=r" (data));
602 return (data);
605 static __inline void
606 load_dr1(u_int sel)
608 __asm __volatile("movl %0,%%dr1" : : "r" (sel));
611 static __inline u_int
612 rdr2(void)
614 u_int data;
615 __asm __volatile("movl %%dr2,%0" : "=r" (data));
616 return (data);
619 static __inline void
620 load_dr2(u_int sel)
622 __asm __volatile("movl %0,%%dr2" : : "r" (sel));
625 static __inline u_int
626 rdr3(void)
628 u_int data;
629 __asm __volatile("movl %%dr3,%0" : "=r" (data));
630 return (data);
633 static __inline void
634 load_dr3(u_int sel)
636 __asm __volatile("movl %0,%%dr3" : : "r" (sel));
639 static __inline u_int
640 rdr4(void)
642 u_int data;
643 __asm __volatile("movl %%dr4,%0" : "=r" (data));
644 return (data);
647 static __inline void
648 load_dr4(u_int sel)
650 __asm __volatile("movl %0,%%dr4" : : "r" (sel));
653 static __inline u_int
654 rdr5(void)
656 u_int data;
657 __asm __volatile("movl %%dr5,%0" : "=r" (data));
658 return (data);
661 static __inline void
662 load_dr5(u_int sel)
664 __asm __volatile("movl %0,%%dr5" : : "r" (sel));
667 static __inline u_int
668 rdr6(void)
670 u_int data;
671 __asm __volatile("movl %%dr6,%0" : "=r" (data));
672 return (data);
675 static __inline void
676 load_dr6(u_int sel)
678 __asm __volatile("movl %0,%%dr6" : : "r" (sel));
681 static __inline u_int
682 rdr7(void)
684 u_int data;
685 __asm __volatile("movl %%dr7,%0" : "=r" (data));
686 return (data);
689 static __inline void
690 load_dr7(u_int sel)
692 __asm __volatile("movl %0,%%dr7" : : "r" (sel));
695 #else /* !__GNUC__ */
697 int breakpoint (void);
698 void cpu_pause (void);
699 u_int bsfl (u_int mask);
700 u_int bsrl (u_int mask);
701 void cpu_disable_intr (void);
702 void do_cpuid (u_int ax, u_int *p);
703 void cpu_enable_intr (void);
704 u_char inb (u_int port);
705 u_int inl (u_int port);
706 void insb (u_int port, void *addr, size_t cnt);
707 void insl (u_int port, void *addr, size_t cnt);
708 void insw (u_int port, void *addr, size_t cnt);
709 void invd (void);
710 u_short inw (u_int port);
711 u_int loadandclear (u_int *addr);
712 void outb (u_int port, u_char data);
713 void outl (u_int port, u_int data);
714 void outsb (u_int port, void *addr, size_t cnt);
715 void outsl (u_int port, void *addr, size_t cnt);
716 void outsw (u_int port, void *addr, size_t cnt);
717 void outw (u_int port, u_short data);
718 u_int rcr2 (void);
719 u_int64_t rdmsr (u_int msr);
720 u_int64_t rdpmc (u_int pmc);
721 u_int64_t rdtsc (void);
722 u_int read_eflags (void);
723 void wbinvd (void);
724 void write_eflags (u_int ef);
725 void wrmsr (u_int msr, u_int64_t newval);
726 u_int rfs (void);
727 u_int rgs (void);
728 void load_fs (u_int sel);
729 void load_gs (u_int sel);
731 #endif /* __GNUC__ */
733 void load_cr0 (u_int cr0);
734 void load_cr3 (u_int cr3);
735 void load_cr4 (u_int cr4);
736 void ltr (u_short sel);
737 u_int rcr0 (void);
738 u_int rcr3 (void);
739 u_int rcr4 (void);
740 void reset_dbregs (void);
741 __END_DECLS
743 #endif /* !_CPU_CPUFUNC_H_ */