hyperv/vmbus: Complete vmbus initialization; interrupt cputimer is enabled
[dragonfly.git] / sys / platform / pc64 / x86_64 / support.s
blob0d890e38b05ffe8a6c5f8c95ce71cd67af9d5f9a
1 /*-
2 * Copyright (c) 1993 The Regents of the University of California.
3 * Copyright (c) 2003 Peter Wemm.
4 * Copyright (c) 2008 The DragonFly Project.
5 * All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
31 * $FreeBSD: src/sys/amd64/amd64/support.S,v 1.127 2007/05/23 08:33:04 kib Exp $
34 #include <machine/asmacros.h>
35 #include <machine/pmap.h>
37 #include "assym.s"
39 ALIGN_DATA
41 .text
44 * bcopy family
45 * void bzero(void *buf, size_t len)
48 /* done */
49 ENTRY(bzero)
50 movq %rsi,%rcx
51 xorl %eax,%eax
52 shrq $3,%rcx
53 cld
54 rep
55 stosq
56 movq %rsi,%rcx
57 andq $7,%rcx
58 rep
59 stosb
60 ret
62 /* Address: %rdi */
63 ENTRY(pagezero)
64 movq $-PAGE_SIZE,%rdx
65 subq %rdx,%rdi
66 xorl %eax,%eax
68 movq %rax,(%rdi,%rdx) /* movnti */
69 movq %rax,8(%rdi,%rdx) /* movnti */
70 movq %rax,16(%rdi,%rdx) /* movnti */
71 movq %rax,24(%rdi,%rdx) /* movnti */
72 addq $32,%rdx
73 jne 1b
74 /*sfence*/
75 ret
77 ENTRY(bcmp)
78 movq %rdx,%rcx
79 shrq $3,%rcx
80 cld /* compare forwards */
81 repe
82 cmpsq
83 jne 1f
85 movq %rdx,%rcx
86 andq $7,%rcx
87 repe
88 cmpsb
90 setne %al
91 movsbl %al,%eax
92 ret
95 * bcopy(src, dst, cnt)
96 * rdi, rsi, rdx
97 * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
99 ENTRY(generic_bcopy) /* generic_bcopy is bcopy without FPU */
100 ENTRY(ovbcopy) /* our bcopy doesn't use the FPU, so ovbcopy is the same */
101 ENTRY(bcopy)
102 xchgq %rsi,%rdi
103 movq %rdx,%rcx
105 movq %rdi,%rax
106 subq %rsi,%rax
107 cmpq %rcx,%rax /* overlapping && src < dst? */
108 jb 1f
110 shrq $3,%rcx /* copy by 64-bit words */
111 cld /* nope, copy forwards */
113 movsq
114 movq %rdx,%rcx
115 andq $7,%rcx /* any bytes left? */
117 movsb
120 /* ALIGN_TEXT */
122 addq %rcx,%rdi /* copy backwards */
123 addq %rcx,%rsi
124 decq %rdi
125 decq %rsi
126 andq $7,%rcx /* any fractional bytes? */
129 movsb
130 movq %rdx,%rcx /* copy remainder by 32-bit words */
131 shrq $3,%rcx
132 subq $7,%rsi
133 subq $7,%rdi
135 movsq
138 ENTRY(reset_dbregs)
139 movq $0x200,%rax /* the manual says that bit 10 must be set to 1 */
140 movq %rax,%dr7 /* disable all breapoints first */
141 movq $0,%rax
142 movq %rax,%dr0
143 movq %rax,%dr1
144 movq %rax,%dr2
145 movq %rax,%dr3
146 movq %rax,%dr6
150 * Note: memcpy does not support overlapping copies
152 ENTRY(memcpy)
153 movq %rdx,%rcx
154 shrq $3,%rcx /* copy by 64-bit words */
155 cld /* copy forwards */
157 movsq
158 movq %rdx,%rcx
159 andq $7,%rcx /* any bytes left? */
161 movsb
165 * pagecopy(%rdi=from, %rsi=to)
167 ENTRY(pagecopy)
168 movq $-PAGE_SIZE,%rax
169 movq %rax,%rdx
170 subq %rax,%rdi
171 subq %rax,%rsi
173 /*prefetchnta (%rdi,%rax)*/
174 /*addq $64,%rax*/
175 /*jne 1b*/
177 movq (%rdi,%rdx),%rax
178 movq %rax,(%rsi,%rdx) /* movnti */
179 movq 8(%rdi,%rdx),%rax
180 movq %rax,8(%rsi,%rdx) /* movnti */
181 movq 16(%rdi,%rdx),%rax
182 movq %rax,16(%rsi,%rdx) /* movnti */
183 movq 24(%rdi,%rdx),%rax
184 movq %rax,24(%rsi,%rdx) /* movnti */
185 addq $32,%rdx
186 jne 2b
187 /*sfence*/
190 /* fillw(pat, base, cnt) */
191 /* %rdi,%rsi, %rdx */
192 ENTRY(fillw)
193 movq %rdi,%rax
194 movq %rsi,%rdi
195 movq %rdx,%rcx
198 stosw
201 /*****************************************************************************/
202 /* copyout and fubyte family */
203 /*****************************************************************************/
205 * Access user memory from inside the kernel. These routines should be
206 * the only places that do this.
208 * These routines set curpcb->onfault for the time they execute. When a
209 * protection violation occurs inside the functions, the trap handler
210 * returns to *curpcb->onfault instead of the function.
214 * std_copyout(from_kernel, to_user, len) - MP SAFE
215 * %rdi, %rsi, %rdx
217 ENTRY(std_copyout)
218 movq PCPU(curthread),%rax
219 movq TD_PCB(%rax), %rax
220 movq $copyout_fault,PCB_ONFAULT(%rax)
221 movq %rsp,PCB_ONFAULT_SP(%rax)
222 testq %rdx,%rdx /* anything to do? */
223 jz done_copyout
226 * Check explicitly for non-user addresses. If 486 write protection
227 * is being used, this check is essential because we are in kernel
228 * mode so the h/w does not provide any protection against writing
229 * kernel addresses.
233 * First, prevent address wrapping.
235 movq %rsi,%rax
236 addq %rdx,%rax
237 jc copyout_fault
239 * XXX STOP USING VM_MAX_USER_ADDRESS.
240 * It is an end address, not a max, so every time it is used correctly it
241 * looks like there is an off by one error, and of course it caused an off
242 * by one error in several places.
244 movq $VM_MAX_USER_ADDRESS,%rcx
245 cmpq %rcx,%rax
246 ja copyout_fault
248 xchgq %rdi,%rsi
249 /* bcopy(%rsi, %rdi, %rdx) */
250 movq %rdx,%rcx
252 shrq $3,%rcx
255 movsq
256 movb %dl,%cl
257 andb $7,%cl
259 movsb
261 done_copyout:
262 xorl %eax,%eax
263 movq PCPU(curthread),%rdx
264 movq TD_PCB(%rdx), %rdx
265 movq %rax,PCB_ONFAULT(%rdx)
268 ALIGN_TEXT
269 copyout_fault:
270 movq PCPU(curthread),%rdx
271 movq TD_PCB(%rdx), %rdx
272 movq $0,PCB_ONFAULT(%rdx)
273 movq $EFAULT,%rax
277 * std_copyin(from_user, to_kernel, len) - MP SAFE
278 * %rdi, %rsi, %rdx
280 ENTRY(std_copyin)
281 movq PCPU(curthread),%rax
282 movq TD_PCB(%rax), %rax
283 movq $copyin_fault,PCB_ONFAULT(%rax)
284 movq %rsp,PCB_ONFAULT_SP(%rax)
285 testq %rdx,%rdx /* anything to do? */
286 jz done_copyin
289 * make sure address is valid
291 movq %rdi,%rax
292 addq %rdx,%rax
293 jc copyin_fault
294 movq $VM_MAX_USER_ADDRESS,%rcx
295 cmpq %rcx,%rax
296 ja copyin_fault
298 xchgq %rdi,%rsi
299 movq %rdx,%rcx
300 movb %cl,%al
301 shrq $3,%rcx /* copy longword-wise */
304 movsq
305 movb %al,%cl
306 andb $7,%cl /* copy remaining bytes */
308 movsb
310 done_copyin:
311 xorl %eax,%eax
312 movq PCPU(curthread),%rdx
313 movq TD_PCB(%rdx), %rdx
314 movq %rax,PCB_ONFAULT(%rdx)
317 ALIGN_TEXT
318 copyin_fault:
319 movq PCPU(curthread),%rdx
320 movq TD_PCB(%rdx), %rdx
321 movq $0,PCB_ONFAULT(%rdx)
322 movq $EFAULT,%rax
326 * casuword32. Compare and set user integer. Returns -1 or the current value.
327 * dst = %rdi, old = %rsi, new = %rdx
329 ENTRY(casuword32)
330 movq PCPU(curthread),%rcx
331 movq TD_PCB(%rcx), %rcx
332 movq $fusufault,PCB_ONFAULT(%rcx)
333 movq %rsp,PCB_ONFAULT_SP(%rcx)
335 movq $VM_MAX_USER_ADDRESS-4,%rax
336 cmpq %rax,%rdi /* verify address is valid */
337 ja fusufault
339 movl %esi,%eax /* old */
340 lock
341 cmpxchgl %edx,(%rdi) /* new = %edx */
344 * The old value is in %eax. If the store succeeded it will be the
345 * value we expected (old) from before the store, otherwise it will
346 * be the current value.
349 movq PCPU(curthread),%rcx
350 movq TD_PCB(%rcx), %rcx
351 movq $0,PCB_ONFAULT(%rcx)
355 * casuword. Compare and set user word. Returns -1 or the current value.
356 * dst = %rdi, old = %rsi, new = %rdx
358 ENTRY(casuword)
359 movq PCPU(curthread),%rcx
360 movq TD_PCB(%rcx), %rcx
361 movq $fusufault,PCB_ONFAULT(%rcx)
362 movq %rsp,PCB_ONFAULT_SP(%rcx)
364 movq $VM_MAX_USER_ADDRESS-8,%rax
365 cmpq %rax,%rdi /* verify address is valid */
366 ja fusufault
368 movq %rsi,%rax /* old */
369 lock
370 cmpxchgq %rdx,(%rdi) /* new = %rdx */
373 * The old value is in %rax. If the store succeeded it will be the
374 * value we expected (old) from before the store, otherwise it will
375 * be the current value.
378 movq PCPU(curthread),%rcx
379 movq TD_PCB(%rcx), %rcx
380 movq $0,PCB_ONFAULT(%rcx)
384 * Fetch (load) a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit
385 * byte from user memory. All these functions are MPSAFE.
386 * addr = %rdi
389 ALTENTRY(fuword64)
390 ENTRY(std_fuword)
391 movq PCPU(curthread),%rcx
392 movq TD_PCB(%rcx), %rcx
393 movq $fusufault,PCB_ONFAULT(%rcx)
394 movq %rsp,PCB_ONFAULT_SP(%rcx)
396 movq $VM_MAX_USER_ADDRESS-8,%rax
397 cmpq %rax,%rdi /* verify address is valid */
398 ja fusufault
400 movq (%rdi),%rax
401 movq $0,PCB_ONFAULT(%rcx)
404 ENTRY(fuword32)
405 movq PCPU(curthread),%rcx
406 movq TD_PCB(%rcx), %rcx
407 movq $fusufault,PCB_ONFAULT(%rcx)
408 movq %rsp,PCB_ONFAULT_SP(%rcx)
410 movq $VM_MAX_USER_ADDRESS-4,%rax
411 cmpq %rax,%rdi /* verify address is valid */
412 ja fusufault
414 movl (%rdi),%eax
415 movq $0,PCB_ONFAULT(%rcx)
419 * fuswintr() and suswintr() are specialized variants of fuword16() and
420 * suword16(), respectively. They are called from the profiling code,
421 * potentially at interrupt time. If they fail, that's okay; good things
422 * will happen later. They always fail for now, until the trap code is
423 * able to deal with this.
425 ALTENTRY(suswintr)
426 ENTRY(fuswintr)
427 movq $-1,%rax
430 ENTRY(fuword16)
431 movq PCPU(curthread),%rcx
432 movq TD_PCB(%rcx), %rcx
433 movq $fusufault,PCB_ONFAULT(%rcx)
434 movq %rsp,PCB_ONFAULT_SP(%rcx)
436 movq $VM_MAX_USER_ADDRESS-2,%rax
437 cmpq %rax,%rdi
438 ja fusufault
440 movzwl (%rdi),%eax
441 movq $0,PCB_ONFAULT(%rcx)
444 ENTRY(std_fubyte)
445 movq PCPU(curthread),%rcx
446 movq TD_PCB(%rcx), %rcx
447 movq $fusufault,PCB_ONFAULT(%rcx)
448 movq %rsp,PCB_ONFAULT_SP(%rcx)
450 movq $VM_MAX_USER_ADDRESS-1,%rax
451 cmpq %rax,%rdi
452 ja fusufault
454 movzbl (%rdi),%eax
455 movq $0,PCB_ONFAULT(%rcx)
458 ALIGN_TEXT
459 fusufault:
460 movq PCPU(curthread),%rcx
461 xorl %eax,%eax
462 movq TD_PCB(%rcx), %rcx
463 movq %rax,PCB_ONFAULT(%rcx)
464 decq %rax
468 * Store a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit byte to
469 * user memory. All these functions are MPSAFE.
471 * addr = %rdi, value = %rsi
473 * Write a long
475 ALTENTRY(suword64)
476 ENTRY(std_suword)
477 movq PCPU(curthread),%rcx
478 movq TD_PCB(%rcx), %rcx
479 movq $fusufault,PCB_ONFAULT(%rcx)
480 movq %rsp,PCB_ONFAULT_SP(%rcx)
482 movq $VM_MAX_USER_ADDRESS-8,%rax
483 cmpq %rax,%rdi /* verify address validity */
484 ja fusufault
486 movq %rsi,(%rdi)
487 xorl %eax,%eax
488 movq PCPU(curthread),%rcx
489 movq TD_PCB(%rcx), %rcx
490 movq %rax,PCB_ONFAULT(%rcx)
494 * Write an int
496 ENTRY(std_suword32)
497 movq PCPU(curthread),%rcx
498 movq TD_PCB(%rcx), %rcx
499 movq $fusufault,PCB_ONFAULT(%rcx)
500 movq %rsp,PCB_ONFAULT_SP(%rcx)
502 movq $VM_MAX_USER_ADDRESS-4,%rax
503 cmpq %rax,%rdi /* verify address validity */
504 ja fusufault
506 movl %esi,(%rdi)
507 xorl %eax,%eax
508 movq PCPU(curthread),%rcx
509 movq TD_PCB(%rcx), %rcx
510 movq %rax,PCB_ONFAULT(%rcx)
513 ENTRY(suword16)
514 movq PCPU(curthread),%rcx
515 movq TD_PCB(%rcx), %rcx
516 movq $fusufault,PCB_ONFAULT(%rcx)
517 movq %rsp,PCB_ONFAULT_SP(%rcx)
519 movq $VM_MAX_USER_ADDRESS-2,%rax
520 cmpq %rax,%rdi /* verify address validity */
521 ja fusufault
523 movw %si,(%rdi)
524 xorl %eax,%eax
525 movq PCPU(curthread),%rcx /* restore trashed register */
526 movq TD_PCB(%rcx), %rcx
527 movq %rax,PCB_ONFAULT(%rcx)
530 ENTRY(std_subyte)
531 movq PCPU(curthread),%rcx
532 movq TD_PCB(%rcx), %rcx
533 movq $fusufault,PCB_ONFAULT(%rcx)
534 movq %rsp,PCB_ONFAULT_SP(%rcx)
536 movq $VM_MAX_USER_ADDRESS-1,%rax
537 cmpq %rax,%rdi /* verify address validity */
538 ja fusufault
540 movl %esi,%eax
541 movb %al,(%rdi)
542 xorl %eax,%eax
543 movq PCPU(curthread),%rcx /* restore trashed register */
544 movq TD_PCB(%rcx), %rcx
545 movq %rax,PCB_ONFAULT(%rcx)
549 * std_copyinstr(from, to, maxlen, int *lencopied) - MP SAFE
550 * %rdi, %rsi, %rdx, %rcx
552 * copy a string from from to to, stop when a 0 character is reached.
553 * return ENAMETOOLONG if string is longer than maxlen, and
554 * EFAULT on protection violations. If lencopied is non-zero,
555 * return the actual length in *lencopied.
557 ENTRY(std_copyinstr)
558 movq %rdx,%r8 /* %r8 = maxlen */
559 movq %rcx,%r9 /* %r9 = *len */
560 xchgq %rdi,%rsi /* %rdi = from, %rsi = to */
561 movq PCPU(curthread),%rcx
562 movq TD_PCB(%rcx), %rcx
563 movq $cpystrflt,PCB_ONFAULT(%rcx)
564 movq %rsp,PCB_ONFAULT_SP(%rcx)
566 movq $VM_MAX_USER_ADDRESS,%rax
568 /* make sure 'from' is within bounds */
569 subq %rsi,%rax
570 jbe cpystrflt
572 /* restrict maxlen to <= VM_MAX_USER_ADDRESS-from */
573 cmpq %rdx,%rax
574 jae 1f
575 movq %rax,%rdx
576 movq %rax,%r8
578 incq %rdx
582 decq %rdx
583 jz 3f
585 lodsb
586 stosb
587 orb %al,%al
588 jnz 2b
590 /* Success -- 0 byte reached */
591 decq %rdx
592 xorl %eax,%eax
593 jmp cpystrflt_x
595 /* rdx is zero - return ENAMETOOLONG or EFAULT */
596 movq $VM_MAX_USER_ADDRESS,%rax
597 cmpq %rax,%rsi
598 jae cpystrflt
600 movq $ENAMETOOLONG,%rax
601 jmp cpystrflt_x
603 cpystrflt:
604 movq $EFAULT,%rax
606 cpystrflt_x:
607 /* set *lencopied and return %eax */
608 movq PCPU(curthread),%rcx
609 movq TD_PCB(%rcx), %rcx
610 movq $0,PCB_ONFAULT(%rcx)
612 testq %r9,%r9
613 jz 1f
614 subq %rdx,%r8
615 movq %r8,(%r9)
621 * copystr(from, to, maxlen, int *lencopied) - MP SAFE
622 * %rdi, %rsi, %rdx, %rcx
624 ENTRY(copystr)
625 movq %rdx,%r8 /* %r8 = maxlen */
627 xchgq %rdi,%rsi
628 incq %rdx
631 decq %rdx
632 jz 4f
633 lodsb
634 stosb
635 orb %al,%al
636 jnz 1b
638 /* Success -- 0 byte reached */
639 decq %rdx
640 xorl %eax,%eax
641 jmp 6f
643 /* rdx is zero -- return ENAMETOOLONG */
644 movq $ENAMETOOLONG,%rax
648 testq %rcx,%rcx
649 jz 7f
650 /* set *lencopied and return %rax */
651 subq %rdx,%r8
652 movq %r8,(%rcx)
657 * Handling of special x86_64 registers and descriptor tables etc
658 * %rdi
660 /* void lgdt(struct region_descriptor *rdp); */
661 ENTRY(lgdt)
662 /* reload the descriptor table */
663 lgdt (%rdi)
665 /* flush the prefetch q */
666 jmp 1f
669 movl $KDSEL,%eax
670 movl %eax,%ds
671 movl %eax,%es
672 movl %eax,%fs /* Beware, use wrmsr to set 64 bit base */
673 movl %eax,%gs /* Beware, use wrmsr to set 64 bit base */
674 movl %eax,%ss
676 /* reload code selector by turning return into intersegmental return */
677 popq %rax
678 pushq $KCSEL
679 pushq %rax
680 MEXITCOUNT
681 lretq
683 /*****************************************************************************/
684 /* setjmp, longjmp */
685 /*****************************************************************************/
687 ENTRY(setjmp)
688 movq %rbx,0(%rdi) /* save rbx */
689 movq %rsp,8(%rdi) /* save rsp */
690 movq %rbp,16(%rdi) /* save rbp */
691 movq %r12,24(%rdi) /* save r12 */
692 movq %r13,32(%rdi) /* save r13 */
693 movq %r14,40(%rdi) /* save r14 */
694 movq %r15,48(%rdi) /* save r15 */
695 movq 0(%rsp),%rdx /* get rta */
696 movq %rdx,56(%rdi) /* save rip */
697 xorl %eax,%eax /* return(0); */
700 ENTRY(longjmp)
701 movq 0(%rdi),%rbx /* restore rbx */
702 movq 8(%rdi),%rsp /* restore rsp */
703 movq 16(%rdi),%rbp /* restore rbp */
704 movq 24(%rdi),%r12 /* restore r12 */
705 movq 32(%rdi),%r13 /* restore r13 */
706 movq 40(%rdi),%r14 /* restore r14 */
707 movq 48(%rdi),%r15 /* restore r15 */
708 movq 56(%rdi),%rdx /* get rta */
709 movq %rdx,0(%rsp) /* put in return frame */
710 xorl %eax,%eax /* return(1); */
711 incl %eax
715 * Support for reading MSRs in the safe manner.
717 ENTRY(rdmsr_safe)
718 /* int rdmsr_safe(u_int msr, uint64_t *data) */
719 movq PCPU(curthread),%r8
720 movq TD_PCB(%r8), %r8
721 movq $msr_onfault,PCB_ONFAULT(%r8)
722 movq %rsp,PCB_ONFAULT_SP(%r8)
723 movl %edi,%ecx
724 rdmsr /* Read MSR pointed by %ecx. Returns
725 hi byte in edx, lo in %eax */
726 salq $32,%rdx /* sign-shift %rdx left */
727 movl %eax,%eax /* zero-extend %eax -> %rax */
728 orq %rdx,%rax
729 movq %rax,(%rsi)
730 xorq %rax,%rax
731 movq %rax,PCB_ONFAULT(%r8)
735 * Support for writing MSRs in the safe manner.
737 ENTRY(wrmsr_safe)
738 /* int wrmsr_safe(u_int msr, uint64_t data) */
739 movq PCPU(curthread),%r8
740 movq TD_PCB(%r8), %r8
741 movq $msr_onfault,PCB_ONFAULT(%r8)
742 movq %rsp,PCB_ONFAULT_SP(%rcx)
743 movl %edi,%ecx
744 movl %esi,%eax
745 sarq $32,%rsi
746 movl %esi,%edx
747 wrmsr /* Write MSR pointed by %ecx. Accepts
748 hi byte in edx, lo in %eax. */
749 xorq %rax,%rax
750 movq %rax,PCB_ONFAULT(%r8)
754 * MSR operations fault handler
756 ALIGN_TEXT
757 msr_onfault:
758 movq PCPU(curthread),%r8
759 movq TD_PCB(%r8), %r8
760 movq $0,PCB_ONFAULT(%r8)
761 movl $EFAULT,%eax
765 * Support for BB-profiling (gcc -a). The kernbb program will extract
766 * the data from the kernel.
769 .data
770 ALIGN_DATA
771 .globl bbhead
772 bbhead:
773 .quad 0
775 .text
776 NON_GPROF_ENTRY(__bb_init_func)
777 movq $1,(%rdi)
778 movq bbhead,%rax
779 movq %rax,32(%rdi)
780 movq %rdi,bbhead
781 NON_GPROF_RET