2 * Copyright (c) 1983, 1992, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * $FreeBSD: src/sys/libkern/mcount.c,v 1.16 1999/12/29 04:54:41 peter Exp $
32 #include <sys/param.h>
34 #if defined(_KERNEL) && !defined(_KERNEL_VIRTUAL)
36 #include <sys/systm.h>
39 #include <vm/vm_param.h>
48 * mcount is called on entry to each function compiled with the profiling
49 * switch set. _mcount(), which is declared in a machine-dependent way
50 * with _MCOUNT_DECL, does the actual work and is either inlined into a
51 * C routine or called by an assembly stub. In any case, this magic is
52 * taken care of by the MCOUNT definition in <machine/profile.h>.
54 * _mcount updates data structures that represent traversals of the
55 * program's call graph edges. frompc and selfpc are the return
56 * address and function address that represents the given call graph edge.
58 * Note: the original BSD code used the same variable (frompcindex) for
59 * both frompcindex and frompc. Any reasonable, modern compiler will
60 * perform this optimization.
62 /* _mcount; may be static, inline, etc */
63 _MCOUNT_DECL(uintfptr_t frompc
, uintfptr_t selfpc
)
70 struct tostruct
*top
, *prevtop
;
73 #if defined(_KERNEL) && !defined(_KERNEL_VIRTUAL)
78 #ifndef GUPROF /* XXX */
80 * check that we are profiling
81 * and that we aren't recursively invoked.
83 if (p
->state
!= GMON_PROF_ON
)
86 #if defined(_KERNEL) && !defined(_KERNEL_VIRTUAL)
89 p
->state
= GMON_PROF_BUSY
;
91 frompci
= frompc
- p
->lowpc
;
93 #if defined(_KERNEL) && !defined(_KERNEL_VIRTUAL)
95 * When we are called from an exception handler, frompci may be
96 * for a user address. Convert such frompci's to the index of
97 * user() to merge all user counts.
99 * XXX doesn't work properly with vkernel
101 if (frompci
>= p
->textsize
) {
102 if (frompci
+ p
->lowpc
103 >= (uintfptr_t
)(VM_MAX_USER_ADDRESS
+ UPAGES
* PAGE_SIZE
))
105 frompci
= (uintfptr_t
)user
- p
->lowpc
;
106 if (frompci
>= p
->textsize
)
112 if (p
->state
== GMON_PROF_HIRES
) {
114 * Count the time since cputime() was previously called
115 * against `frompc'. Compensate for overheads.
117 * cputime() sets its prev_count variable to the count when
118 * it is called. This in effect starts a counter for
119 * the next period of execution (normally from now until
120 * the next call to mcount() or mexitcount()). We set
121 * cputime_bias to compensate for our own overhead.
123 * We use the usual sampling counters since they can be
124 * located efficiently. 4-byte counters are usually
125 * necessary. gprof will add up the scattered counts
126 * just like it does for statistical profiling. All
127 * counts are signed so that underflow in the subtractions
128 * doesn't matter much (negative counts are normally
129 * compensated for by larger counts elsewhere). Underflow
130 * shouldn't occur, but may be caused by slightly wrong
131 * calibrations or from not clearing cputime_bias.
133 delta
= cputime() - cputime_bias
- p
->mcount_pre_overhead
;
134 cputime_bias
= p
->mcount_post_overhead
;
135 KCOUNT(p
, frompci
) += delta
;
136 *p
->cputime_count
+= p
->cputime_overhead
;
137 *p
->mcount_count
+= p
->mcount_overhead
;
141 #if defined(_KERNEL) && !defined(_KERNEL_VIRTUAL)
143 * When we are called from an exception handler, frompc is faked
144 * to be for where the exception occurred. We've just solidified
145 * the count for there. Now convert frompci to the index of btrap()
146 * for trap handlers and bintr() for interrupt handlers to make
147 * exceptions appear in the call graph as calls from btrap() and
148 * bintr() instead of calls from all over.
150 if ((uintfptr_t
)selfpc
>= (uintfptr_t
)btrap
151 && (uintfptr_t
)selfpc
< (uintfptr_t
)eintr
) {
152 if ((uintfptr_t
)selfpc
>= (uintfptr_t
)bintr
)
153 frompci
= (uintfptr_t
)bintr
- p
->lowpc
;
155 frompci
= (uintfptr_t
)btrap
- p
->lowpc
;
160 * check that frompc is a reasonable pc value.
161 * for example: signal catchers get called from the stack,
162 * not from text space. too bad.
164 if (frompci
>= p
->textsize
)
167 frompcindex
= &p
->froms
[frompci
/ (p
->hashfraction
* sizeof(*p
->froms
))];
168 toindex
= *frompcindex
;
171 * first time traversing this arc
173 toindex
= ++p
->tos
[0].link
;
174 if (toindex
>= p
->tolimit
)
175 /* halt further profiling */
178 *frompcindex
= toindex
;
179 top
= &p
->tos
[toindex
];
180 top
->selfpc
= selfpc
;
185 top
= &p
->tos
[toindex
];
186 if (top
->selfpc
== selfpc
) {
188 * arc at front of chain; usual case.
194 * have to go looking down chain for it.
195 * top points to what we are looking at,
196 * prevtop points to previous top.
197 * we know it is not at the head of the chain.
199 for (; /* goto done */; ) {
200 if (top
->link
== 0) {
202 * top is end of the chain and none of the chain
203 * had top->selfpc == selfpc.
204 * so we allocate a new tostruct
205 * and link it to the head of the chain.
207 toindex
= ++p
->tos
[0].link
;
208 if (toindex
>= p
->tolimit
)
211 top
= &p
->tos
[toindex
];
212 top
->selfpc
= selfpc
;
214 top
->link
= *frompcindex
;
215 *frompcindex
= toindex
;
219 * otherwise, check the next arc on the chain.
222 top
= &p
->tos
[top
->link
];
223 if (top
->selfpc
== selfpc
) {
226 * increment its count
227 * move it to the head of the chain.
230 toindex
= prevtop
->link
;
231 prevtop
->link
= top
->link
;
232 top
->link
= *frompcindex
;
233 *frompcindex
= toindex
;
239 #if defined(_KERNEL) && !defined(_KERNEL_VIRTUAL)
242 p
->state
= GMON_PROF_ON
;
246 p
->state
= GMON_PROF_ERROR
;
247 #if defined(_KERNEL) && !defined(_KERNEL_VIRTUAL)
254 * Actual definition of mcount function. Defined in <machine/profile.h>,
255 * which is included by <sys/gmon.h>.
261 mexitcount(uintfptr_t selfpc
)
264 uintfptr_t selfpcdiff
;
267 selfpcdiff
= selfpc
- (uintfptr_t
)p
->lowpc
;
268 if (selfpcdiff
< p
->textsize
) {
272 * Count the time since cputime() was previously called
273 * against `selfpc'. Compensate for overheads.
275 delta
= cputime() - cputime_bias
- p
->mexitcount_pre_overhead
;
276 cputime_bias
= p
->mexitcount_post_overhead
;
277 KCOUNT(p
, selfpcdiff
) += delta
;
278 *p
->cputime_count
+= p
->cputime_overhead
;
279 *p
->mexitcount_count
+= p
->mexitcount_overhead
;
288 for (i
= 0; i
< CALIB_SCALE
; i
++)
302 for (i
= 0; i
< CALIB_SCALE
; i
++)