sched_setaffinity.2: Small markup fix.
[dragonfly.git] / sys / libkern / mcount.c
blob0dd2cb5ccb3554eea3a1a878897624c9c8ce3a7e
1 /*-
2 * Copyright (c) 1983, 1992, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
29 * $FreeBSD: src/sys/libkern/mcount.c,v 1.16 1999/12/29 04:54:41 peter Exp $
32 #include <sys/param.h>
33 #include <sys/gmon.h>
34 #if defined(_KERNEL) && !defined(_KERNEL_VIRTUAL)
35 #ifndef GUPROF
36 #include <sys/systm.h>
37 #endif
38 #include <vm/vm.h>
39 #include <vm/vm_param.h>
40 #include <vm/pmap.h>
41 void bintr(void);
42 void btrap(void);
43 void eintr(void);
44 void user(void);
45 #endif
48 * mcount is called on entry to each function compiled with the profiling
49 * switch set. _mcount(), which is declared in a machine-dependent way
50 * with _MCOUNT_DECL, does the actual work and is either inlined into a
51 * C routine or called by an assembly stub. In any case, this magic is
52 * taken care of by the MCOUNT definition in <machine/profile.h>.
54 * _mcount updates data structures that represent traversals of the
55 * program's call graph edges. frompc and selfpc are the return
56 * address and function address that represents the given call graph edge.
58 * Note: the original BSD code used the same variable (frompcindex) for
59 * both frompcindex and frompc. Any reasonable, modern compiler will
60 * perform this optimization.
62 /* _mcount; may be static, inline, etc */
63 _MCOUNT_DECL(uintfptr_t frompc, uintfptr_t selfpc)
65 #ifdef GUPROF
66 int delta;
67 #endif
68 fptrdiff_t frompci;
69 u_short *frompcindex;
70 struct tostruct *top, *prevtop;
71 struct gmonparam *p;
72 long toindex;
73 #if defined(_KERNEL) && !defined(_KERNEL_VIRTUAL)
74 MCOUNT_DECL(s)
75 #endif
77 p = &_gmonparam;
78 #ifndef GUPROF /* XXX */
80 * check that we are profiling
81 * and that we aren't recursively invoked.
83 if (p->state != GMON_PROF_ON)
84 return;
85 #endif
86 #if defined(_KERNEL) && !defined(_KERNEL_VIRTUAL)
87 MCOUNT_ENTER(s);
88 #else
89 p->state = GMON_PROF_BUSY;
90 #endif
91 frompci = frompc - p->lowpc;
93 #if defined(_KERNEL) && !defined(_KERNEL_VIRTUAL)
95 * When we are called from an exception handler, frompci may be
96 * for a user address. Convert such frompci's to the index of
97 * user() to merge all user counts.
99 * XXX doesn't work properly with vkernel
101 if (frompci >= p->textsize) {
102 if (frompci + p->lowpc
103 >= (uintfptr_t)(VM_MAX_USER_ADDRESS + UPAGES * PAGE_SIZE))
104 goto done;
105 frompci = (uintfptr_t)user - p->lowpc;
106 if (frompci >= p->textsize)
107 goto done;
109 #endif
111 #ifdef GUPROF
112 if (p->state == GMON_PROF_HIRES) {
114 * Count the time since cputime() was previously called
115 * against `frompc'. Compensate for overheads.
117 * cputime() sets its prev_count variable to the count when
118 * it is called. This in effect starts a counter for
119 * the next period of execution (normally from now until
120 * the next call to mcount() or mexitcount()). We set
121 * cputime_bias to compensate for our own overhead.
123 * We use the usual sampling counters since they can be
124 * located efficiently. 4-byte counters are usually
125 * necessary. gprof will add up the scattered counts
126 * just like it does for statistical profiling. All
127 * counts are signed so that underflow in the subtractions
128 * doesn't matter much (negative counts are normally
129 * compensated for by larger counts elsewhere). Underflow
130 * shouldn't occur, but may be caused by slightly wrong
131 * calibrations or from not clearing cputime_bias.
133 delta = cputime() - cputime_bias - p->mcount_pre_overhead;
134 cputime_bias = p->mcount_post_overhead;
135 KCOUNT(p, frompci) += delta;
136 *p->cputime_count += p->cputime_overhead;
137 *p->mcount_count += p->mcount_overhead;
139 #endif /* GUPROF */
141 #if defined(_KERNEL) && !defined(_KERNEL_VIRTUAL)
143 * When we are called from an exception handler, frompc is faked
144 * to be for where the exception occurred. We've just solidified
145 * the count for there. Now convert frompci to the index of btrap()
146 * for trap handlers and bintr() for interrupt handlers to make
147 * exceptions appear in the call graph as calls from btrap() and
148 * bintr() instead of calls from all over.
150 if ((uintfptr_t)selfpc >= (uintfptr_t)btrap
151 && (uintfptr_t)selfpc < (uintfptr_t)eintr) {
152 if ((uintfptr_t)selfpc >= (uintfptr_t)bintr)
153 frompci = (uintfptr_t)bintr - p->lowpc;
154 else
155 frompci = (uintfptr_t)btrap - p->lowpc;
157 #endif
160 * check that frompc is a reasonable pc value.
161 * for example: signal catchers get called from the stack,
162 * not from text space. too bad.
164 if (frompci >= p->textsize)
165 goto done;
167 frompcindex = &p->froms[frompci / (p->hashfraction * sizeof(*p->froms))];
168 toindex = *frompcindex;
169 if (toindex == 0) {
171 * first time traversing this arc
173 toindex = ++p->tos[0].link;
174 if (toindex >= p->tolimit)
175 /* halt further profiling */
176 goto overflow;
178 *frompcindex = toindex;
179 top = &p->tos[toindex];
180 top->selfpc = selfpc;
181 top->count = 1;
182 top->link = 0;
183 goto done;
185 top = &p->tos[toindex];
186 if (top->selfpc == selfpc) {
188 * arc at front of chain; usual case.
190 top->count++;
191 goto done;
194 * have to go looking down chain for it.
195 * top points to what we are looking at,
196 * prevtop points to previous top.
197 * we know it is not at the head of the chain.
199 for (; /* goto done */; ) {
200 if (top->link == 0) {
202 * top is end of the chain and none of the chain
203 * had top->selfpc == selfpc.
204 * so we allocate a new tostruct
205 * and link it to the head of the chain.
207 toindex = ++p->tos[0].link;
208 if (toindex >= p->tolimit)
209 goto overflow;
211 top = &p->tos[toindex];
212 top->selfpc = selfpc;
213 top->count = 1;
214 top->link = *frompcindex;
215 *frompcindex = toindex;
216 goto done;
219 * otherwise, check the next arc on the chain.
221 prevtop = top;
222 top = &p->tos[top->link];
223 if (top->selfpc == selfpc) {
225 * there it is.
226 * increment its count
227 * move it to the head of the chain.
229 top->count++;
230 toindex = prevtop->link;
231 prevtop->link = top->link;
232 top->link = *frompcindex;
233 *frompcindex = toindex;
234 goto done;
238 done:
239 #if defined(_KERNEL) && !defined(_KERNEL_VIRTUAL)
240 MCOUNT_EXIT(s);
241 #else
242 p->state = GMON_PROF_ON;
243 #endif
244 return;
245 overflow:
246 p->state = GMON_PROF_ERROR;
247 #if defined(_KERNEL) && !defined(_KERNEL_VIRTUAL)
248 MCOUNT_EXIT(s);
249 #endif
250 return;
254 * Actual definition of mcount function. Defined in <machine/profile.h>,
255 * which is included by <sys/gmon.h>.
257 MCOUNT
259 #ifdef GUPROF
260 void
261 mexitcount(uintfptr_t selfpc)
263 struct gmonparam *p;
264 uintfptr_t selfpcdiff;
266 p = &_gmonparam;
267 selfpcdiff = selfpc - (uintfptr_t)p->lowpc;
268 if (selfpcdiff < p->textsize) {
269 int delta;
272 * Count the time since cputime() was previously called
273 * against `selfpc'. Compensate for overheads.
275 delta = cputime() - cputime_bias - p->mexitcount_pre_overhead;
276 cputime_bias = p->mexitcount_post_overhead;
277 KCOUNT(p, selfpcdiff) += delta;
278 *p->cputime_count += p->cputime_overhead;
279 *p->mexitcount_count += p->mexitcount_overhead;
283 void
284 empty_loop(void)
286 int i;
288 for (i = 0; i < CALIB_SCALE; i++)
292 void
293 nullfunc(void)
297 void
298 nullfunc_loop(void)
300 int i;
302 for (i = 0; i < CALIB_SCALE; i++)
303 nullfunc();
305 #endif /* GUPROF */