Merge commit '7928f4baf4ab3230557eb6289be68aa7a3003f38'
[unleashed.git] / usr / src / uts / intel / dtrace / dtrace_isa.c
blob7cb44d7ed27b968c119267ffeda99626f371b943
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
28 * Copyright (c) 2013, 2014 by Delphix. All rights reserved.
29 * Copyright (c) 2017 Joyent, Inc.
32 #include <sys/dtrace_impl.h>
33 #include <sys/stack.h>
34 #include <sys/frame.h>
35 #include <sys/cmn_err.h>
36 #include <sys/privregs.h>
37 #include <sys/sysmacros.h>
39 extern uintptr_t kernelbase;
41 int dtrace_ustackdepth_max = 2048;
43 void
44 dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
45 uint32_t *intrpc)
47 struct frame *fp = (struct frame *)dtrace_getfp();
48 struct frame *nextfp, *minfp, *stacktop;
49 int depth = 0;
50 int on_intr, last = 0;
51 uintptr_t pc;
52 uintptr_t caller = CPU->cpu_dtrace_caller;
54 if ((on_intr = CPU_ON_INTR(CPU)) != 0)
55 stacktop = (struct frame *)(CPU->cpu_intr_stack + SA(MINFRAME));
56 else
57 stacktop = (struct frame *)curthread->t_stk;
58 minfp = fp;
60 aframes++;
62 if (intrpc != NULL && depth < pcstack_limit)
63 pcstack[depth++] = (pc_t)intrpc;
65 while (depth < pcstack_limit) {
66 nextfp = (struct frame *)fp->fr_savfp;
67 pc = fp->fr_savpc;
69 if (nextfp <= minfp || nextfp >= stacktop) {
70 if (on_intr) {
72 * Hop from interrupt stack to thread stack.
74 stacktop = (struct frame *)curthread->t_stk;
75 minfp = (struct frame *)curthread->t_stkbase;
76 on_intr = 0;
77 continue;
81 * This is the last frame we can process; indicate
82 * that we should return after processing this frame.
84 last = 1;
87 if (aframes > 0) {
88 if (--aframes == 0 && caller != (uintptr_t)NULL) {
90 * We've just run out of artificial frames,
91 * and we have a valid caller -- fill it in
92 * now.
94 ASSERT(depth < pcstack_limit);
95 pcstack[depth++] = (pc_t)caller;
96 caller = (uintptr_t)NULL;
98 } else {
99 if (depth < pcstack_limit)
100 pcstack[depth++] = (pc_t)pc;
103 if (last) {
104 while (depth < pcstack_limit)
105 pcstack[depth++] = (pc_t)NULL;
106 return;
109 fp = nextfp;
110 minfp = fp;
114 static int
115 dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, uintptr_t pc,
116 uintptr_t sp)
118 klwp_t *lwp = ttolwp(curthread);
119 proc_t *p = curproc;
120 uintptr_t oldcontext = lwp->lwp_oldcontext;
121 uintptr_t oldsp;
122 volatile uint16_t *flags =
123 (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
124 size_t s1, s2;
125 int ret = 0;
127 ASSERT(pcstack == NULL || pcstack_limit > 0);
128 ASSERT(dtrace_ustackdepth_max > 0);
130 if (p->p_model == DATAMODEL_NATIVE) {
131 s1 = sizeof (struct frame) + 2 * sizeof (long);
132 s2 = s1 + sizeof (siginfo_t);
133 } else {
134 s1 = sizeof (struct frame32) + 3 * sizeof (int);
135 s2 = s1 + sizeof (siginfo32_t);
138 while (pc != 0) {
140 * We limit the number of times we can go around this
141 * loop to account for a circular stack.
143 if (ret++ >= dtrace_ustackdepth_max) {
144 *flags |= CPU_DTRACE_BADSTACK;
145 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = sp;
146 break;
149 if (pcstack != NULL) {
150 *pcstack++ = (uint64_t)pc;
151 pcstack_limit--;
152 if (pcstack_limit <= 0)
153 break;
156 if (sp == 0)
157 break;
159 oldsp = sp;
161 if (oldcontext == sp + s1 || oldcontext == sp + s2) {
162 if (p->p_model == DATAMODEL_NATIVE) {
163 ucontext_t *ucp = (ucontext_t *)oldcontext;
164 greg_t *gregs = ucp->uc_mcontext.gregs;
166 sp = dtrace_fulword(&gregs[REG_FP]);
167 pc = dtrace_fulword(&gregs[REG_PC]);
169 oldcontext = dtrace_fulword(&ucp->uc_link);
170 } else {
171 ucontext32_t *ucp = (ucontext32_t *)oldcontext;
172 greg32_t *gregs = ucp->uc_mcontext.gregs;
174 sp = dtrace_fuword32(&gregs[EBP]);
175 pc = dtrace_fuword32(&gregs[EIP]);
177 oldcontext = dtrace_fuword32(&ucp->uc_link);
179 } else {
180 if (p->p_model == DATAMODEL_NATIVE) {
181 struct frame *fr = (struct frame *)sp;
183 pc = dtrace_fulword(&fr->fr_savpc);
184 sp = dtrace_fulword(&fr->fr_savfp);
185 } else {
186 struct frame32 *fr = (struct frame32 *)sp;
188 pc = dtrace_fuword32(&fr->fr_savpc);
189 sp = dtrace_fuword32(&fr->fr_savfp);
193 if (sp == oldsp) {
194 *flags |= CPU_DTRACE_BADSTACK;
195 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = sp;
196 break;
200 * This is totally bogus: if we faulted, we're going to clear
201 * the fault and break. This is to deal with the apparently
202 * broken Java stacks on x86.
204 if (*flags & CPU_DTRACE_FAULT) {
205 *flags &= ~CPU_DTRACE_FAULT;
206 break;
210 return (ret);
213 void
214 dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
216 klwp_t *lwp = ttolwp(curthread);
217 proc_t *p = curproc;
218 struct regs *rp;
219 uintptr_t pc, sp;
220 int n;
222 ASSERT(DTRACE_CPUFLAG_ISSET(CPU_DTRACE_NOFAULT));
224 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
225 return;
227 if (pcstack_limit <= 0)
228 return;
231 * If there's no user context we still need to zero the stack.
233 if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
234 goto zero;
236 *pcstack++ = (uint64_t)p->p_pid;
237 pcstack_limit--;
239 if (pcstack_limit <= 0)
240 return;
242 pc = rp->r_pc;
243 sp = rp->r_fp;
245 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
246 *pcstack++ = (uint64_t)pc;
247 pcstack_limit--;
248 if (pcstack_limit <= 0)
249 return;
251 if (p->p_model == DATAMODEL_NATIVE)
252 pc = dtrace_fulword((void *)rp->r_sp);
253 else
254 pc = dtrace_fuword32((void *)rp->r_sp);
257 n = dtrace_getustack_common(pcstack, pcstack_limit, pc, sp);
258 ASSERT(n >= 0);
259 ASSERT(n <= pcstack_limit);
261 pcstack += n;
262 pcstack_limit -= n;
264 zero:
265 while (pcstack_limit-- > 0)
266 *pcstack++ = (uintptr_t)NULL;
270 dtrace_getustackdepth(void)
272 klwp_t *lwp = ttolwp(curthread);
273 proc_t *p = curproc;
274 struct regs *rp;
275 uintptr_t pc, sp;
276 int n = 0;
278 if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
279 return (0);
281 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
282 return (-1);
284 pc = rp->r_pc;
285 sp = rp->r_fp;
287 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
288 n++;
290 if (p->p_model == DATAMODEL_NATIVE)
291 pc = dtrace_fulword((void *)rp->r_sp);
292 else
293 pc = dtrace_fuword32((void *)rp->r_sp);
296 n += dtrace_getustack_common(NULL, 0, pc, sp);
298 return (n);
301 void
302 dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
304 klwp_t *lwp = ttolwp(curthread);
305 proc_t *p = curproc;
306 struct regs *rp;
307 uintptr_t pc, sp, oldcontext;
308 volatile uint16_t *flags =
309 (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
310 size_t s1, s2;
312 if (*flags & CPU_DTRACE_FAULT)
313 return;
315 if (pcstack_limit <= 0)
316 return;
319 * If there's no user context we still need to zero the stack.
321 if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
322 goto zero;
324 *pcstack++ = (uint64_t)p->p_pid;
325 pcstack_limit--;
327 if (pcstack_limit <= 0)
328 return;
330 pc = rp->r_pc;
331 sp = rp->r_fp;
332 oldcontext = lwp->lwp_oldcontext;
334 if (p->p_model == DATAMODEL_NATIVE) {
335 s1 = sizeof (struct frame) + 2 * sizeof (long);
336 s2 = s1 + sizeof (siginfo_t);
337 } else {
338 s1 = sizeof (struct frame32) + 3 * sizeof (int);
339 s2 = s1 + sizeof (siginfo32_t);
342 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
343 *pcstack++ = (uint64_t)pc;
344 *fpstack++ = 0;
345 pcstack_limit--;
346 if (pcstack_limit <= 0)
347 return;
349 if (p->p_model == DATAMODEL_NATIVE)
350 pc = dtrace_fulword((void *)rp->r_sp);
351 else
352 pc = dtrace_fuword32((void *)rp->r_sp);
355 while (pc != 0) {
356 *pcstack++ = (uint64_t)pc;
357 *fpstack++ = sp;
358 pcstack_limit--;
359 if (pcstack_limit <= 0)
360 break;
362 if (sp == 0)
363 break;
365 if (oldcontext == sp + s1 || oldcontext == sp + s2) {
366 if (p->p_model == DATAMODEL_NATIVE) {
367 ucontext_t *ucp = (ucontext_t *)oldcontext;
368 greg_t *gregs = ucp->uc_mcontext.gregs;
370 sp = dtrace_fulword(&gregs[REG_FP]);
371 pc = dtrace_fulword(&gregs[REG_PC]);
373 oldcontext = dtrace_fulword(&ucp->uc_link);
374 } else {
375 ucontext_t *ucp = (ucontext_t *)oldcontext;
376 greg_t *gregs = ucp->uc_mcontext.gregs;
378 sp = dtrace_fuword32(&gregs[EBP]);
379 pc = dtrace_fuword32(&gregs[EIP]);
381 oldcontext = dtrace_fuword32(&ucp->uc_link);
383 } else {
384 if (p->p_model == DATAMODEL_NATIVE) {
385 struct frame *fr = (struct frame *)sp;
387 pc = dtrace_fulword(&fr->fr_savpc);
388 sp = dtrace_fulword(&fr->fr_savfp);
389 } else {
390 struct frame32 *fr = (struct frame32 *)sp;
392 pc = dtrace_fuword32(&fr->fr_savpc);
393 sp = dtrace_fuword32(&fr->fr_savfp);
398 * This is totally bogus: if we faulted, we're going to clear
399 * the fault and break. This is to deal with the apparently
400 * broken Java stacks on x86.
402 if (*flags & CPU_DTRACE_FAULT) {
403 *flags &= ~CPU_DTRACE_FAULT;
404 break;
408 zero:
409 while (pcstack_limit-- > 0)
410 *pcstack++ = (uintptr_t)NULL;
413 /*ARGSUSED*/
414 uint64_t
415 dtrace_getarg(int arg, int aframes)
417 uintptr_t val;
418 struct frame *fp = (struct frame *)dtrace_getfp();
419 uintptr_t *stack;
420 int i;
421 #if defined(__amd64)
423 * A total of 6 arguments are passed via registers; any argument with
424 * index of 5 or lower is therefore in a register.
426 int inreg = 5;
427 #endif
429 for (i = 1; i <= aframes; i++) {
430 fp = (struct frame *)(fp->fr_savfp);
432 if (fp->fr_savpc == (pc_t)dtrace_invop_callsite) {
433 #if !defined(__amd64)
435 * If we pass through the invalid op handler, we will
436 * use the pointer that it passed to the stack as the
437 * second argument to dtrace_invop() as the pointer to
438 * the stack. When using this stack, we must step
439 * beyond the EIP that was pushed when the trap was
440 * taken -- hence the "+ 1" below.
442 stack = ((uintptr_t **)&fp[1])[1] + 1;
443 #else
445 * In the case of amd64, we will use the pointer to the
446 * regs structure that was pushed when we took the
447 * trap. To get this structure, we must increment
448 * beyond the frame structure, the calling RIP, and
449 * padding stored in dtrace_invop(). If the argument
450 * that we're seeking is passed on the stack, we'll
451 * pull the true stack pointer out of the saved
452 * registers and decrement our argument by the number
453 * of arguments passed in registers; if the argument
454 * we're seeking is passed in regsiters, we can just
455 * load it directly.
457 struct regs *rp = (struct regs *)((uintptr_t)&fp[1] +
458 sizeof (uintptr_t) * 2);
460 if (arg <= inreg) {
461 stack = (uintptr_t *)&rp->r_rdi;
462 } else {
463 stack = (uintptr_t *)(rp->r_rsp);
464 arg -= inreg;
466 #endif
467 goto load;
473 * We know that we did not come through a trap to get into
474 * dtrace_probe() -- the provider simply called dtrace_probe()
475 * directly. As this is the case, we need to shift the argument
476 * that we're looking for: the probe ID is the first argument to
477 * dtrace_probe(), so the argument n will actually be found where
478 * one would expect to find argument (n + 1).
480 arg++;
482 #if defined(__amd64)
483 if (arg <= inreg) {
485 * This shouldn't happen. If the argument is passed in a
486 * register then it should have been, well, passed in a
487 * register...
489 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
490 return (0);
493 arg -= (inreg + 1);
494 #endif
495 stack = (uintptr_t *)&fp[1];
497 load:
498 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
499 val = stack[arg];
500 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
502 return (val);
505 /*ARGSUSED*/
507 dtrace_getstackdepth(int aframes)
509 struct frame *fp = (struct frame *)dtrace_getfp();
510 struct frame *nextfp, *minfp, *stacktop;
511 int depth = 0;
512 int on_intr;
514 if ((on_intr = CPU_ON_INTR(CPU)) != 0)
515 stacktop = (struct frame *)(CPU->cpu_intr_stack + SA(MINFRAME));
516 else
517 stacktop = (struct frame *)curthread->t_stk;
518 minfp = fp;
520 aframes++;
522 for (;;) {
523 depth++;
525 nextfp = (struct frame *)fp->fr_savfp;
527 if (nextfp <= minfp || nextfp >= stacktop) {
528 if (on_intr) {
530 * Hop from interrupt stack to thread stack.
532 stacktop = (struct frame *)curthread->t_stk;
533 minfp = (struct frame *)curthread->t_stkbase;
534 on_intr = 0;
535 continue;
537 break;
540 fp = nextfp;
541 minfp = fp;
544 if (depth <= aframes)
545 return (0);
547 return (depth - aframes);
550 #if defined(__amd64)
551 static const int dtrace_regmap[] = {
552 REG_GS, /* GS */
553 REG_FS, /* FS */
554 REG_ES, /* ES */
555 REG_DS, /* DS */
556 REG_RDI, /* EDI */
557 REG_RSI, /* ESI */
558 REG_RBP, /* EBP */
559 REG_RSP, /* ESP */
560 REG_RBX, /* EBX */
561 REG_RDX, /* EDX */
562 REG_RCX, /* ECX */
563 REG_RAX, /* EAX */
564 REG_TRAPNO, /* TRAPNO */
565 REG_ERR, /* ERR */
566 REG_RIP, /* EIP */
567 REG_CS, /* CS */
568 REG_RFL, /* EFL */
569 REG_RSP, /* UESP */
570 REG_SS /* SS */
572 #endif
575 ulong_t
576 dtrace_getreg(struct regs *rp, uint_t reg)
578 #if defined(__amd64)
579 if (reg <= SS) {
580 if (reg >= sizeof (dtrace_regmap) / sizeof (int)) {
581 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
582 return (0);
585 reg = dtrace_regmap[reg];
586 } else {
587 reg -= SS + 1;
590 switch (reg) {
591 case REG_RDI:
592 return (rp->r_rdi);
593 case REG_RSI:
594 return (rp->r_rsi);
595 case REG_RDX:
596 return (rp->r_rdx);
597 case REG_RCX:
598 return (rp->r_rcx);
599 case REG_R8:
600 return (rp->r_r8);
601 case REG_R9:
602 return (rp->r_r9);
603 case REG_RAX:
604 return (rp->r_rax);
605 case REG_RBX:
606 return (rp->r_rbx);
607 case REG_RBP:
608 return (rp->r_rbp);
609 case REG_R10:
610 return (rp->r_r10);
611 case REG_R11:
612 return (rp->r_r11);
613 case REG_R12:
614 return (rp->r_r12);
615 case REG_R13:
616 return (rp->r_r13);
617 case REG_R14:
618 return (rp->r_r14);
619 case REG_R15:
620 return (rp->r_r15);
621 case REG_DS:
622 return (rp->r_ds);
623 case REG_ES:
624 return (rp->r_es);
625 case REG_FS:
626 return (rp->r_fs);
627 case REG_GS:
628 return (rp->r_gs);
629 case REG_TRAPNO:
630 return (rp->r_trapno);
631 case REG_ERR:
632 return (rp->r_err);
633 case REG_RIP:
634 return (rp->r_rip);
635 case REG_CS:
636 return (rp->r_cs);
637 case REG_SS:
638 return (rp->r_ss);
639 case REG_RFL:
640 return (rp->r_rfl);
641 case REG_RSP:
642 return (rp->r_rsp);
643 default:
644 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
645 return (0);
648 #else
649 if (reg > SS) {
650 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
651 return (0);
654 return ((&rp->r_gs)[reg]);
655 #endif
658 void
659 dtrace_setreg(struct regs *rp, uint_t reg, ulong_t val)
661 #if defined(__amd64)
662 if (reg <= SS) {
663 ASSERT(reg < (sizeof (dtrace_regmap) / sizeof (int)));
665 reg = dtrace_regmap[reg];
666 } else {
667 reg -= SS + 1;
670 switch (reg) {
671 case REG_RDI:
672 rp->r_rdi = val;
673 break;
674 case REG_RSI:
675 rp->r_rsi = val;
676 break;
677 case REG_RDX:
678 rp->r_rdx = val;
679 break;
680 case REG_RCX:
681 rp->r_rcx = val;
682 break;
683 case REG_R8:
684 rp->r_r8 = val;
685 break;
686 case REG_R9:
687 rp->r_r9 = val;
688 break;
689 case REG_RAX:
690 rp->r_rax = val;
691 break;
692 case REG_RBX:
693 rp->r_rbx = val;
694 break;
695 case REG_RBP:
696 rp->r_rbp = val;
697 break;
698 case REG_R10:
699 rp->r_r10 = val;
700 break;
701 case REG_R11:
702 rp->r_r11 = val;
703 break;
704 case REG_R12:
705 rp->r_r12 = val;
706 break;
707 case REG_R13:
708 rp->r_r13 = val;
709 break;
710 case REG_R14:
711 rp->r_r14 = val;
712 break;
713 case REG_R15:
714 rp->r_r15 = val;
715 break;
716 case REG_RSP:
717 rp->r_rsp = val;
718 break;
719 default:
720 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
721 return;
724 #else /* defined(__amd64) */
725 switch (reg) {
726 case EAX:
727 rp->r_eax = val;
728 break;
729 case ECX:
730 rp->r_ecx = val;
731 break;
732 case EDX:
733 rp->r_edx = val;
734 break;
735 case EBX:
736 rp->r_ebx = val;
737 break;
738 case ESP:
739 rp->r_esp = val;
740 break;
741 case EBP:
742 rp->r_ebp = val;
743 break;
744 case ESI:
745 rp->r_esi = val;
746 break;
747 case EDI:
748 rp->r_edi = val;
749 break;
750 default:
751 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
752 return;
754 #endif /* defined(__amd64) */
757 static int
758 dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size)
760 ASSERT(kaddr >= kernelbase && kaddr + size >= kaddr);
762 if (uaddr + size >= kernelbase || uaddr + size < uaddr) {
763 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
764 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
765 return (0);
768 return (1);
771 /*ARGSUSED*/
772 void
773 dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size,
774 volatile uint16_t *flags)
776 if (dtrace_copycheck(uaddr, kaddr, size))
777 dtrace_copy(uaddr, kaddr, size);
780 /*ARGSUSED*/
781 void
782 dtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size,
783 volatile uint16_t *flags)
785 if (dtrace_copycheck(uaddr, kaddr, size))
786 dtrace_copy(kaddr, uaddr, size);
789 void
790 dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size,
791 volatile uint16_t *flags)
793 if (dtrace_copycheck(uaddr, kaddr, size))
794 dtrace_copystr(uaddr, kaddr, size, flags);
797 void
798 dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size,
799 volatile uint16_t *flags)
801 if (dtrace_copycheck(uaddr, kaddr, size))
802 dtrace_copystr(kaddr, uaddr, size, flags);
805 uint8_t
806 dtrace_fuword8(void *uaddr)
808 extern uint8_t dtrace_fuword8_nocheck(void *);
809 if ((uintptr_t)uaddr >= _userlimit) {
810 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
811 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
812 return (0);
814 return (dtrace_fuword8_nocheck(uaddr));
817 uint16_t
818 dtrace_fuword16(void *uaddr)
820 extern uint16_t dtrace_fuword16_nocheck(void *);
821 if ((uintptr_t)uaddr >= _userlimit) {
822 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
823 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
824 return (0);
826 return (dtrace_fuword16_nocheck(uaddr));
829 uint32_t
830 dtrace_fuword32(void *uaddr)
832 extern uint32_t dtrace_fuword32_nocheck(void *);
833 if ((uintptr_t)uaddr >= _userlimit) {
834 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
835 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
836 return (0);
838 return (dtrace_fuword32_nocheck(uaddr));
841 uint64_t
842 dtrace_fuword64(void *uaddr)
844 extern uint64_t dtrace_fuword64_nocheck(void *);
845 if ((uintptr_t)uaddr >= _userlimit) {
846 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
847 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
848 return (0);
850 return (dtrace_fuword64_nocheck(uaddr));