4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #pragma ident "%Z%%M% %I% %E% SMI"
28 #include <sys/types.h>
30 #include <sys/mutex.h>
31 #include <sys/cpuvar.h>
32 #include <sys/cmn_err.h>
33 #include <sys/systm.h>
35 #include <sys/sunddi.h>
36 #include <sys/debug.h>
37 #include <sys/param.h>
38 #include <sys/atomic.h>
39 #include <sys/ftrace.h>
44 * ftrace_atboot - whether to start fast tracing at boot.
45 * ftrace_nent - size of the per-CPU event ring buffer.
47 int ftrace_atboot
= 0;
48 int ftrace_nent
= FTRACE_NENT
;
51 * Global Tracing State:
59 * +-------->READY-------+
61 * ftrace_stop() ftrace_start()
63 * +---(ENABLED|READY)<--+
65 * During boot, ftrace_init() is called and the state becomes
66 * READY. If ftrace_atboot is set, ftrace_start() is called at
69 * If FTRACE_READY is set, then tracing can be enabled.
70 * If FTRACE_ENABLED is set, tracing is enabled on the set of CPUs
71 * which are currently FTRACE_READY.
73 static int ftrace_state
= 0;
76 * Per-CPU Tracing State:
78 * +-----------------READY<--------------+
80 * | | ftrace_cpu_fini() |
82 * | ftrace_cpu_init() | |
83 * | | v ftrace_cpu_stop()
86 * ftrace_cpu_start() | |
87 * | ftrace_cpu_fini() |
89 * +----------->(ENABLED|READY)----------+
96 * Trace context code does not use any lock. There is a per-cpu circular trace
97 * buffer that has a head, a tail and a current pointer. Each record of this
98 * buffer is of equal length. Before doing anything, trace context code checks
99 * the per-cpu ENABLED bit. Trace buffer is allocated in non-trace context and
100 * it sets this bit only after allocating and setting up the buffer. So trace
101 * context code can't access the buffer till it is set up completely. The
102 * buffer is freed also in non-trace context. The code that frees the buffer is
103 * executed only after the corresponding cpu is powered off. So when this
104 * happens, no trace context code can be running on it. We only need to make
105 * sure that trace context code is not preempted from the cpu in the middle of
106 * accessing the trace buffer. This can be achieved simply by disabling
107 * interrupts temporarily. This approach makes the least assumption about the
108 * state of the callers of tracing functions.
110 * A single global lock, ftrace_lock protects assignments to all global and
111 * per-cpu trace variables. It does not protect reading of those in some cases.
113 * More specifically, it protects assignments to:
116 * cpu[N]->cpu_ftrace.ftd_state
117 * cpu[N]->cpu_ftrace.ftd_first
118 * cpu[N]->cpu_ftrace.ftd_last
120 * Does _not_ protect reading of cpu[N]->cpu_ftrace.ftd_state
121 * Does _not_ protect cpu[N]->cpu_ftrace.ftd_cur
122 * Does _not_ protect reading of ftrace_state
124 static kmutex_t ftrace_lock
;
127 * Check whether a CPU is installed.
129 #define IS_CPU(i) (cpu[i] != NULL)
132 ftrace_cpu_init(int cpuid
)
137 * This can be called with "cpu[cpuid]->cpu_flags & CPU_EXISTS"
138 * being false - e.g. when a CPU is DR'ed in.
140 ASSERT(MUTEX_HELD(&ftrace_lock
));
141 ASSERT(IS_CPU(cpuid
));
143 ftd
= &cpu
[cpuid
]->cpu_ftrace
;
144 if (ftd
->ftd_state
& FTRACE_READY
)
148 * We don't allocate the buffers until the first time
149 * ftrace_cpu_start() is called, so that they're not
150 * allocated if ftrace is never enabled.
152 ftd
->ftd_state
|= FTRACE_READY
;
153 ASSERT(!(ftd
->ftd_state
& FTRACE_ENABLED
));
157 * Only called from cpu_unconfigure() (and cpu_configure() on error).
158 * At this point, cpu[cpuid] is about to be freed and NULLed out,
159 * so we'd better clean up after ourselves.
162 ftrace_cpu_fini(int cpuid
)
166 ASSERT(MUTEX_HELD(&ftrace_lock
));
167 ASSERT(IS_CPU(cpuid
));
168 ASSERT((cpu
[cpuid
]->cpu_flags
& CPU_POWEROFF
) != 0);
170 ftd
= &cpu
[cpuid
]->cpu_ftrace
;
171 if (!(ftd
->ftd_state
& FTRACE_READY
))
175 * This cpu is powered off and no code can be executing on it. So
176 * we can simply finish our cleanup. There is no need for a xcall
177 * to make sure that this cpu is out of trace context.
179 * The cpu structure will be cleared soon. But, for the sake of
180 * debugging, clear our pointers and state.
182 if (ftd
->ftd_first
!= NULL
) {
183 kmem_free(ftd
->ftd_first
,
184 ftrace_nent
* sizeof (ftrace_record_t
));
186 bzero(ftd
, sizeof (ftrace_data_t
));
190 ftrace_cpu_start(int cpuid
)
194 ASSERT(MUTEX_HELD(&ftrace_lock
));
195 ASSERT(IS_CPU(cpuid
));
196 ASSERT(ftrace_state
& FTRACE_ENABLED
);
198 ftd
= &cpu
[cpuid
]->cpu_ftrace
;
199 if (ftd
->ftd_state
& FTRACE_READY
) {
200 if (ftd
->ftd_first
== NULL
) {
201 ftrace_record_t
*ptrs
;
203 mutex_exit(&ftrace_lock
);
204 ptrs
= kmem_zalloc(ftrace_nent
*
205 sizeof (ftrace_record_t
), KM_SLEEP
);
206 mutex_enter(&ftrace_lock
);
207 if (ftd
->ftd_first
!= NULL
) {
209 * Someone else beat us to it. The winner will
210 * set up the pointers and the state.
213 ftrace_nent
* sizeof (ftrace_record_t
));
217 ftd
->ftd_first
= ptrs
;
218 ftd
->ftd_last
= ptrs
+ (ftrace_nent
- 1);
222 ftd
->ftd_state
|= FTRACE_ENABLED
;
227 ftrace_cpu_stop(int cpuid
)
229 ASSERT(MUTEX_HELD(&ftrace_lock
));
230 ASSERT(IS_CPU(cpuid
));
231 cpu
[cpuid
]->cpu_ftrace
.ftd_state
&= ~(FTRACE_ENABLED
);
239 ftrace_cpu_setup(cpu_setup_t what
, int id
, void *arg
)
241 if (!(ftrace_state
& FTRACE_READY
))
246 mutex_enter(&ftrace_lock
);
248 if (ftrace_state
& FTRACE_ENABLED
)
249 ftrace_cpu_start(id
);
250 mutex_exit(&ftrace_lock
);
254 mutex_enter(&ftrace_lock
);
256 mutex_exit(&ftrace_lock
);
270 ASSERT(!(ftrace_state
& FTRACE_READY
));
271 mutex_init(&ftrace_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
273 mutex_enter(&ftrace_lock
);
274 for (i
= 0; i
< NCPU
; i
++) {
276 /* should have been kmem_zalloc()'ed */
277 ASSERT(cpu
[i
]->cpu_ftrace
.ftd_state
== 0);
278 ASSERT(cpu
[i
]->cpu_ftrace
.ftd_first
== NULL
);
279 ASSERT(cpu
[i
]->cpu_ftrace
.ftd_last
== NULL
);
280 ASSERT(cpu
[i
]->cpu_ftrace
.ftd_cur
== NULL
);
284 if (ftrace_nent
< 1) {
285 mutex_exit(&ftrace_lock
);
289 for (i
= 0; i
< NCPU
; i
++)
293 ftrace_state
|= FTRACE_READY
;
294 mutex_enter(&cpu_lock
);
295 register_cpu_setup_func(ftrace_cpu_setup
, NULL
);
296 mutex_exit(&cpu_lock
);
297 mutex_exit(&ftrace_lock
);
300 (void) ftrace_start();
304 * Called from uadmin ioctl, or via mp_init_table[] during boot.
309 int i
, was_enabled
= 0;
311 if (ftrace_state
& FTRACE_READY
) {
312 mutex_enter(&ftrace_lock
);
313 was_enabled
= ((ftrace_state
& FTRACE_ENABLED
) != 0);
314 ftrace_state
|= FTRACE_ENABLED
;
315 for (i
= 0; i
< NCPU
; i
++)
318 mutex_exit(&ftrace_lock
);
321 return (was_enabled
);
325 * Called from uadmin ioctl, to stop tracing.
330 int i
, was_enabled
= 0;
332 if (ftrace_state
& FTRACE_READY
) {
333 mutex_enter(&ftrace_lock
);
334 if (ftrace_state
& FTRACE_ENABLED
) {
336 for (i
= 0; i
< NCPU
; i
++)
339 ftrace_state
&= ~(FTRACE_ENABLED
);
341 mutex_exit(&ftrace_lock
);
343 return (was_enabled
);
347 * ftrace_X() functions are called from trace context. All callers of ftrace_X()
348 * tests FTRACE_ENABLED first. Although this is not very accurate, it keeps the
349 * overhead very low when tracing is not enabled.
351 * gethrtime_unscaled() appears to be safe to be called in trace context. As an
352 * added precaution, we call these before we disable interrupts on this cpu.
356 ftrace_0(char *str
, caddr_t caller
)
361 ftrace_icookie_t cookie
;
364 timestamp
= gethrtime_unscaled();
366 cookie
= ftrace_interrupt_disable();
369 ftd
= &cp
->cpu_ftrace
;
371 if (!(ftd
->ftd_state
& FTRACE_ENABLED
)) {
372 ftrace_interrupt_enable(cookie
);
378 r
->ftr_thread
= curthread
;
379 r
->ftr_tick
= timestamp
;
380 r
->ftr_caller
= caller
;
382 if (r
++ == ftd
->ftd_last
)
386 ftrace_interrupt_enable(cookie
);
390 ftrace_1(char *str
, ulong_t arg1
, caddr_t caller
)
395 ftrace_icookie_t cookie
;
398 timestamp
= gethrtime_unscaled();
400 cookie
= ftrace_interrupt_disable();
403 ftd
= &cp
->cpu_ftrace
;
405 if (!(ftd
->ftd_state
& FTRACE_ENABLED
)) {
406 ftrace_interrupt_enable(cookie
);
412 r
->ftr_thread
= curthread
;
413 r
->ftr_tick
= timestamp
;
414 r
->ftr_caller
= caller
;
417 if (r
++ == ftd
->ftd_last
)
421 ftrace_interrupt_enable(cookie
);
425 ftrace_2(char *str
, ulong_t arg1
, ulong_t arg2
, caddr_t caller
)
430 ftrace_icookie_t cookie
;
433 timestamp
= gethrtime_unscaled();
435 cookie
= ftrace_interrupt_disable();
438 ftd
= &cp
->cpu_ftrace
;
440 if (!(ftd
->ftd_state
& FTRACE_ENABLED
)) {
441 ftrace_interrupt_enable(cookie
);
447 r
->ftr_thread
= curthread
;
448 r
->ftr_tick
= timestamp
;
449 r
->ftr_caller
= caller
;
453 if (r
++ == ftd
->ftd_last
)
457 ftrace_interrupt_enable(cookie
);
461 ftrace_3(char *str
, ulong_t arg1
, ulong_t arg2
, ulong_t arg3
, caddr_t caller
)
466 ftrace_icookie_t cookie
;
469 timestamp
= gethrtime_unscaled();
471 cookie
= ftrace_interrupt_disable();
474 ftd
= &cp
->cpu_ftrace
;
476 if (!(ftd
->ftd_state
& FTRACE_ENABLED
)) {
477 ftrace_interrupt_enable(cookie
);
483 r
->ftr_thread
= curthread
;
484 r
->ftr_tick
= timestamp
;
485 r
->ftr_caller
= caller
;
490 if (r
++ == ftd
->ftd_last
)
494 ftrace_interrupt_enable(cookie
);
498 ftrace_3_notick(char *str
, ulong_t arg1
, ulong_t arg2
,
499 ulong_t arg3
, caddr_t caller
)
504 ftrace_icookie_t cookie
;
506 cookie
= ftrace_interrupt_disable();
509 ftd
= &cp
->cpu_ftrace
;
511 if (!(ftd
->ftd_state
& FTRACE_ENABLED
)) {
512 ftrace_interrupt_enable(cookie
);
518 r
->ftr_thread
= curthread
;
520 r
->ftr_caller
= caller
;
525 if (r
++ == ftd
->ftd_last
)
529 ftrace_interrupt_enable(cookie
);