yellow go bye bye
[kugel-rb.git] / firmware / profile.c
blob21feb13d26b0d60039a5ea55f55efb7542eb9efd
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Profiling routines counts ticks and calls to each profiled function.
12 * Copyright (C) 2005 by Brandon Low
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version 2
17 * of the License, or (at your option) any later version.
19 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
20 * KIND, either express or implied.
22 ****************************************************************************
24 * profile_func_enter() based on mcount found in gmon.c:
26 ***************************************************************************
27 * Copyright (c) 1991, 1998 The Regents of the University of California.
28 * All rights reserved.
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions
32 * are met:
33 * 1. Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * 2. Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in the
37 * documentation and/or other materials provided with the distribution.
38 * 3. [rescinded 22 July 1999]
39 * 4. Neither the name of the University nor the names of its contributors
40 * may be used to endorse or promote products derived from this software
41 * without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 * @(#)gmon.c 5.3 (Berkeley) 5/22/91
57 #include <file.h>
58 #include <system.h>
59 #include <string.h>
60 #include <timer.h>
61 #include "inttypes.h"
62 #include "profile.h"
64 /* PFD is Profiled Function Data */
66 /* Indices are shorts which means that we use 4k of RAM */
67 #define INDEX_BITS 11 /* What is a reasonable size for this? */
68 #define INDEX_SIZE 2048 /* 2 ^ INDEX_BITS */
69 #define INDEX_MASK 0x7FF /* lower INDEX_BITS 1 */
72 * In the current setup (pfd has 4 longs and 2 shorts) this uses 20k of RAM
73 * for profiling, and allows for profiling sections of code with up-to
74 * 1024 function caller->callee pairs
76 #define NUMPFDS 512
78 struct pfd_struct {
79 void *self_pc;
80 unsigned long count;
81 unsigned long time;
82 unsigned short link;
83 struct pfd_struct *caller;
86 /* Possible states of profiling */
87 #define PROF_ON 0x00
88 #define PROF_BUSY 0x01
89 #define PROF_ERROR 0x02
90 #define PROF_OFF 0x03
91 /* Masks for thread switches */
92 #define PROF_OFF_THREAD 0x10
93 #define PROF_ON_THREAD 0x0F
95 static unsigned short profiling = PROF_OFF;
96 static size_t recursion_level;
97 static unsigned short indices[INDEX_SIZE];
98 static struct pfd_struct pfds[NUMPFDS];
99 /* This holds a pointer to the last pfd effected for time tracking */
100 static struct pfd_struct *last_pfd;
101 /* These are used to track the time when we've lost the CPU so it doesn't count
102 * against any of the profiled functions */
103 static int profiling_thread = -1;
105 /* internal function prototypes */
106 static void profile_timer_tick(void);
107 static void profile_timer_unregister(void);
109 static void write_function_recursive(int fd, struct pfd_struct *pfd, int depth);
111 /* Be careful to use the right one for the size of your variable */
112 #ifdef CPU_COLDFIRE
113 #define ADDQI_L(_var,_value) \
114 asm ("addq.l %[value],%[var];" \
115 : [var] "+g" (_var) \
116 : [value] "I" (_value) )
117 #else
118 #define ADDQI_L(var, value) var += value
119 #endif
121 void profile_thread_stopped(int current_thread) {
122 if (current_thread == profiling_thread) {
123 /* If profiling is busy or idle */
124 if (profiling < PROF_ERROR) {
125 /* Unregister the timer so that other threads aren't interrupted */
126 timer_unregister();
128 /* Make sure we don't waste time profiling when we're running the
129 * wrong thread */
130 profiling |= PROF_OFF_THREAD;
134 void profile_thread_started(int current_thread) {
135 if (current_thread == profiling_thread) {
136 /* Now we are allowed to profile again */
137 profiling &= PROF_ON_THREAD;
138 /* if profiling was busy or idle */
139 if (profiling < PROF_ERROR) {
140 /* After we de-mask, if profiling is active, reactivate the timer */
141 timer_register(0, profile_timer_unregister,
142 TIMER_FREQ/10000, profile_timer_tick IF_COP(, 0 ) );
147 static void profile_timer_tick(void) {
148 if (!profiling) {
149 register struct pfd_struct *my_last_pfd = last_pfd;
150 if (my_last_pfd) {
151 ADDQI_L(my_last_pfd->time,1);
156 static void profile_timer_unregister(void) {
157 profiling = PROF_ERROR;
158 profstop();
161 /* This function clears the links on top level linkers, and clears the needed
162 * parts of memory in the index array */
163 void profstart(int current_thread) {
164 recursion_level = 0;
165 profiling_thread = current_thread;
166 last_pfd = (struct pfd_struct*)0;
167 pfds[0].link = 0;
168 pfds[0].self_pc = 0;
169 memset(indices,0,INDEX_SIZE * sizeof(unsigned short));
170 timer_register(
171 0, profile_timer_unregister, TIMER_FREQ/10000, profile_timer_tick IF_COP(, 0 ) );
172 profiling = PROF_ON;
175 static void write_function_recursive(int fd, struct pfd_struct *pfd, int depth){
176 unsigned short link = pfd->link;
177 fdprintf(fd,"0x%08lX\t%08ld\t%08ld\t%04d\n", (size_t)pfd->self_pc,
178 pfd->count, pfd->time, depth);
179 if (link > 0 && link < NUMPFDS) {
180 write_function_recursive(fd, &pfds[link], ++depth);
184 void profstop() {
185 int profiling_exit = profiling;
186 int fd = 0;
187 int i;
188 unsigned short current_index;
189 timer_unregister();
190 profiling = PROF_OFF;
191 fd = open("/profile.out", O_WRONLY|O_CREAT|O_TRUNC, 0666);
192 if (profiling_exit == PROF_ERROR) {
193 fdprintf(fd,"Profiling exited with an error.\n");
194 fdprintf(fd,"Overflow or timer stolen most likely.\n");
196 fdprintf(fd,"PROFILE_THREAD\tPFDS_USED\n");
197 fdprintf(fd,"%08d\t%08d\n", profiling_thread,
198 pfds[0].link);
199 fdprintf(fd,"FUNCTION_PC\tCALL_COUNT\tTICKS\t\tDEPTH\n");
200 for (i = 0; i < INDEX_SIZE; i++) {
201 current_index = indices[i];
202 if (current_index != 0) {
203 write_function_recursive(fd, &pfds[current_index], 0);
206 fdprintf(fd,"DEBUG PROFILE DATA FOLLOWS\n");
207 fdprintf(fd,"INDEX\tLOCATION\tSELF_PC\t\tCOUNT\t\tTIME\t\tLINK\tCALLER_IDX\n");
208 for (i = 0; i < NUMPFDS; i++) {
209 struct pfd_struct *my_last_pfd = &pfds[i];
210 if (my_last_pfd->self_pc != 0) {
211 fdprintf(fd,
212 "%04d\t0x%08lX\t0x%08lX\t0x%08lX\t0x%08lX\t%04d\t0x%08lX\n",
213 i, (size_t)my_last_pfd, (size_t)my_last_pfd->self_pc,
214 my_last_pfd->count, my_last_pfd->time, my_last_pfd->link,
215 (size_t)my_last_pfd->caller );
218 fdprintf(fd,"INDEX_ADDRESS=INDEX\n");
219 for (i=0; i < INDEX_SIZE; i++) {
220 fdprintf(fd,"%08lX=%04d\n",(size_t)&indices[i],indices[i]);
222 close(fd);
225 void __cyg_profile_func_exit(void *self_pc, void *call_site) {
226 (void)call_site;
227 (void)self_pc;
228 /* When we started timing, we set the time to the tick at that time
229 * less the time already used in function */
230 if (profiling) {
231 return;
233 profiling = PROF_BUSY;
235 register unsigned short my_recursion_level = recursion_level;
236 if (my_recursion_level) {
237 my_recursion_level--;
238 recursion_level = my_recursion_level;
239 } else {
240 /* This shouldn't be necessary, maybe exit could be called first */
241 register struct pfd_struct *my_last_pfd = last_pfd;
242 if (my_last_pfd) {
243 last_pfd = my_last_pfd->caller;
247 profiling = PROF_ON;
250 #define ALLOCATE_PFD(temp) \
251 temp = ++pfds[0].link;\
252 if (temp >= NUMPFDS) goto overflow; \
253 pfd = &pfds[temp];\
254 pfd->self_pc = self_pc; pfd->count = 1; pfd->time = 0
256 void __cyg_profile_func_enter(void *self_pc, void *from_pc) {
257 struct pfd_struct *pfd;
258 struct pfd_struct *prev_pfd;
259 unsigned short *pfd_index_pointer;
260 unsigned short pfd_index;
262 /* check that we are profiling and that we aren't recursively invoked
263 * this is equivalent to 'if (profiling != PROF_ON)' but it's faster */
264 if (profiling) {
265 return;
267 profiling = PROF_BUSY;
268 /* A check that the PC is in the code range here wouldn't hurt, but this is
269 * logically guaranteed to be a valid address unless the constants are
270 * breaking the rules. */
271 pfd_index_pointer = &indices[((size_t)from_pc)&INDEX_MASK];
272 pfd_index = *pfd_index_pointer;
273 if (pfd_index == 0) {
274 /* new caller, allocate new storage */
275 ALLOCATE_PFD(pfd_index);
276 pfd->link = 0;
277 *pfd_index_pointer = pfd_index;
278 goto done;
280 pfd = &pfds[pfd_index];
281 if (pfd->self_pc == self_pc) {
282 /* only / most recent function called by this caller, usual case */
283 /* increment count, start timing and exit */
284 goto found;
286 /* collision, bad for performance, look down the list of functions called by
287 * colliding PCs */
288 for (; /* goto done */; ) {
289 pfd_index = pfd->link;
290 if (pfd_index == 0) {
291 /* no more previously called functions, allocate a new one */
292 ALLOCATE_PFD(pfd_index);
293 /* this function becomes the new head, link to the old head */
294 pfd->link = *pfd_index_pointer;
295 /* and set the index to point to this function */
296 *pfd_index_pointer = pfd_index;
297 /* start timing and exit */
298 goto done;
300 /* move along the chain */
301 prev_pfd = pfd;
302 pfd = &pfds[pfd_index];
303 if (pfd->self_pc == self_pc) {
304 /* found ourself */
305 /* Remove me from my old spot */
306 prev_pfd->link = pfd->link;
307 /* Link to the old head */
308 pfd->link = *pfd_index_pointer;
309 /* Make me head */
310 *pfd_index_pointer = pfd_index;
311 /* increment count, start timing and exit */
312 goto found;
316 /* We've found a pfd, increment it */
317 found:
318 ADDQI_L(pfd->count,1);
319 /* We've (found or created) and updated our pfd, save it and start timing */
320 done:
322 register struct pfd_struct *my_last_pfd = last_pfd;
323 if (pfd != my_last_pfd) {
324 /* If we are not recursing */
325 pfd->caller = my_last_pfd;
326 last_pfd = pfd;
327 } else {
328 ADDQI_L(recursion_level,1);
331 /* Start timing this function */
332 profiling = PROF_ON;
333 return; /* normal return restores saved registers */
335 overflow:
336 /* this is the same as 'profiling = PROF_ERROR' */
337 profiling = PROF_ERROR;
338 return;