5285 pass in cpu_pause_func via pause_cpus
[unleashed.git] / usr / src / uts / sun4 / os / prom_subr.c
blobd4afdc8d19b7be6e6c96a8a660575a614a5c64e4
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #include <sys/types.h>
27 #include <sys/param.h>
28 #include <sys/cmn_err.h>
29 #include <sys/mutex.h>
30 #include <sys/systm.h>
31 #include <sys/sysmacros.h>
32 #include <sys/machsystm.h>
33 #include <sys/archsystm.h>
34 #include <sys/x_call.h>
35 #include <sys/promif.h>
36 #include <sys/prom_isa.h>
37 #include <sys/privregs.h>
38 #include <sys/vmem.h>
39 #include <sys/atomic.h>
40 #include <sys/panic.h>
41 #include <sys/rwlock.h>
42 #include <sys/reboot.h>
43 #include <sys/kdi.h>
44 #include <sys/kdi_machimpl.h>
47 * We are called with a pointer to a cell-sized argument array.
48 * The service name (the first element of the argument array) is
49 * the name of the callback being invoked. When called, we are
50 * running on the firmwares trap table as a trusted subroutine
51 * of the firmware.
53 * We define entry points to allow callback handlers to be dynamically
54 * added and removed, to support obpsym, which is a separate module
55 * and can be dynamically loaded and unloaded and registers its
56 * callback handlers dynamically.
58 * Note: The actual callback handler we register, is the assembly lang.
59 * glue, callback_handler, which takes care of switching from a 64
60 * bit stack and environment to a 32 bit stack and environment, and
61 * back again, if the callback handler returns. callback_handler calls
62 * vx_handler to process the callback.
65 static kmutex_t vx_cmd_lock; /* protect vx_cmd table */
67 #define VX_CMD_MAX 10
68 #define ENDADDR(a) &a[sizeof (a) / sizeof (a[0])]
69 #define vx_cmd_end ((struct vx_cmd *)(ENDADDR(vx_cmd)))
71 static struct vx_cmd {
72 char *service; /* Service name */
73 int take_tba; /* If Non-zero we take over the tba */
74 void (*func)(cell_t *argument_array);
75 } vx_cmd[VX_CMD_MAX+1];
77 void
78 init_vx_handler(void)
80 extern int callback_handler(cell_t *arg_array);
83 * initialize the lock protecting additions and deletions from
84 * the vx_cmd table. At callback time we don't need to grab
85 * this lock. Callback handlers do not need to modify the
86 * callback handler table.
88 mutex_init(&vx_cmd_lock, NULL, MUTEX_DEFAULT, NULL);
91 * Tell OBP about our callback handler.
93 (void) prom_set_callback((void *)callback_handler);
97 * Add a kernel callback handler to the kernel's list.
98 * The table is static, so if you add a callback handler, increase
99 * the value of VX_CMD_MAX. Find the first empty slot and use it.
101 void
102 add_vx_handler(char *name, int flag, void (*func)(cell_t *))
104 struct vx_cmd *vp;
106 mutex_enter(&vx_cmd_lock);
107 for (vp = vx_cmd; vp < vx_cmd_end; vp++) {
108 if (vp->service == NULL) {
109 vp->service = name;
110 vp->take_tba = flag;
111 vp->func = func;
112 mutex_exit(&vx_cmd_lock);
113 return;
116 mutex_exit(&vx_cmd_lock);
118 #ifdef DEBUG
121 * There must be enough entries to handle all callback entries.
122 * Increase VX_CMD_MAX if this happens. This shouldn't happen.
124 cmn_err(CE_PANIC, "add_vx_handler <%s>", name);
125 /* NOTREACHED */
127 #else /* DEBUG */
129 cmn_err(CE_WARN, "add_vx_handler: Can't add callback hander <%s>",
130 name);
132 #endif /* DEBUG */
137 * Remove a vx_handler function -- find the name string in the table,
138 * and clear it.
140 void
141 remove_vx_handler(char *name)
143 struct vx_cmd *vp;
145 mutex_enter(&vx_cmd_lock);
146 for (vp = vx_cmd; vp < vx_cmd_end; vp++) {
147 if (vp->service == NULL)
148 continue;
149 if (strcmp(vp->service, name) != 0)
150 continue;
151 vp->service = 0;
152 vp->take_tba = 0;
153 vp->func = 0;
154 mutex_exit(&vx_cmd_lock);
155 return;
157 mutex_exit(&vx_cmd_lock);
158 cmn_err(CE_WARN, "remove_vx_handler: <%s> not found", name);
162 vx_handler(cell_t *argument_array)
164 char *name;
165 struct vx_cmd *vp;
166 void *old_tba;
168 name = p1275_cell2ptr(*argument_array);
170 for (vp = vx_cmd; vp < vx_cmd_end; vp++) {
171 if (vp->service == (char *)0)
172 continue;
173 if (strcmp(vp->service, name) != 0)
174 continue;
175 if (vp->take_tba != 0) {
176 reestablish_curthread();
177 if (tba_taken_over != 0)
178 old_tba = set_tba((void *)&trap_table);
180 vp->func(argument_array);
181 if ((vp->take_tba != 0) && (tba_taken_over != 0))
182 (void) set_tba(old_tba);
183 return (0); /* Service name was known */
186 return (-1); /* Service name unknown */
190 * PROM Locking Primitives
192 * These routines are called immediately before and immediately after calling
193 * into the firmware. The firmware is single-threaded and assumes that the
194 * kernel will implement locking to prevent simultaneous service calls. In
195 * addition, some service calls (particularly character rendering) can be
196 * slow, so we would like to sleep if we cannot acquire the lock to allow the
197 * caller's CPU to continue to perform useful work in the interim. Service
198 * routines may also be called early in boot as part of slave CPU startup
199 * when mutexes and cvs are not yet available (i.e. they are still running on
200 * the prom's TLB handlers and cannot touch curthread). Therefore, these
201 * routines must reduce to a simple compare-and-swap spin lock when necessary.
202 * Finally, kernel code may wish to acquire the firmware lock before executing
203 * a block of code that includes service calls, so we also allow the firmware
204 * lock to be acquired recursively by the owning CPU after disabling preemption.
206 * To meet these constraints, the lock itself is implemented as a compare-and-
207 * swap spin lock on the global prom_cpu pointer. We implement recursion by
208 * atomically incrementing the integer prom_holdcnt after acquiring the lock.
209 * If the current CPU is an "adult" (determined by testing cpu_m.mutex_ready),
210 * we disable preemption before acquiring the lock and leave it disabled once
211 * the lock is held. The kern_postprom() routine then enables preemption if
212 * we drop the lock and prom_holdcnt returns to zero. If the current CPU is
213 * an adult and the lock is held by another adult CPU, we can safely sleep
214 * until the lock is released. To do so, we acquire the adaptive prom_mutex
215 * and then sleep on prom_cv. Therefore, service routines must not be called
216 * from above LOCK_LEVEL on any adult CPU. Finally, if recursive entry is
217 * attempted on an adult CPU, we must also verify that curthread matches the
218 * saved prom_thread (the original owner) to ensure that low-level interrupt
219 * threads do not step on other threads running on the same CPU.
222 static cpu_t *volatile prom_cpu;
223 static kthread_t *volatile prom_thread;
224 static uint32_t prom_holdcnt;
225 static kmutex_t prom_mutex;
226 static kcondvar_t prom_cv;
229 * The debugger uses PROM services, and is thus unable to run if any of the
230 * CPUs on the system are executing in the PROM at the time of debugger entry.
231 * If a CPU is determined to be in the PROM when the debugger is entered,
232 * prom_return_enter_debugger will be set, thus triggering a programmed debugger
233 * entry when the given CPU returns from the PROM. That CPU is then released by
234 * the debugger, and is allowed to complete PROM-related work.
236 int prom_exit_enter_debugger;
238 void
239 kern_preprom(void)
241 for (;;) {
243 * Load the current CPU pointer and examine the mutex_ready bit.
244 * It doesn't matter if we are preempted here because we are
245 * only trying to determine if we are in the *set* of mutex
246 * ready CPUs. We cannot disable preemption until we confirm
247 * that we are running on a CPU in this set, since a call to
248 * kpreempt_disable() requires access to curthread.
250 processorid_t cpuid = getprocessorid();
251 cpu_t *cp = cpu[cpuid];
252 cpu_t *prcp;
254 if (panicstr)
255 return; /* just return if we are currently panicking */
257 if (CPU_IN_SET(cpu_ready_set, cpuid) && cp->cpu_m.mutex_ready) {
259 * Disable premption, and reload the current CPU. We
260 * can't move from a mutex_ready cpu to a non-ready cpu
261 * so we don't need to re-check cp->cpu_m.mutex_ready.
263 kpreempt_disable();
264 cp = CPU;
265 ASSERT(cp->cpu_m.mutex_ready);
268 * Try the lock. If we don't get the lock, re-enable
269 * preemption and see if we should sleep. If we are
270 * already the lock holder, remove the effect of the
271 * previous kpreempt_disable() before returning since
272 * preemption was disabled by an earlier kern_preprom.
274 prcp = atomic_cas_ptr((void *)&prom_cpu, NULL, cp);
275 if (prcp == NULL ||
276 (prcp == cp && prom_thread == curthread)) {
277 if (prcp == cp)
278 kpreempt_enable();
279 break;
282 kpreempt_enable();
285 * We have to be very careful here since both prom_cpu
286 * and prcp->cpu_m.mutex_ready can be changed at any
287 * time by a non mutex_ready cpu holding the lock.
288 * If the owner is mutex_ready, holding prom_mutex
289 * prevents kern_postprom() from completing. If the
290 * owner isn't mutex_ready, we only know it will clear
291 * prom_cpu before changing cpu_m.mutex_ready, so we
292 * issue a membar after checking mutex_ready and then
293 * re-verify that prom_cpu is still held by the same
294 * cpu before actually proceeding to cv_wait().
296 mutex_enter(&prom_mutex);
297 prcp = prom_cpu;
298 if (prcp != NULL && prcp->cpu_m.mutex_ready != 0) {
299 membar_consumer();
300 if (prcp == prom_cpu)
301 cv_wait(&prom_cv, &prom_mutex);
303 mutex_exit(&prom_mutex);
305 } else {
307 * If we are not yet mutex_ready, just attempt to grab
308 * the lock. If we get it or already hold it, break.
310 ASSERT(getpil() == PIL_MAX);
311 prcp = atomic_cas_ptr((void *)&prom_cpu, NULL, cp);
312 if (prcp == NULL || prcp == cp)
313 break;
318 * We now hold the prom_cpu lock. Increment the hold count by one
319 * and assert our current state before returning to the caller.
321 atomic_inc_32(&prom_holdcnt);
322 ASSERT(prom_holdcnt >= 1);
323 prom_thread = curthread;
327 * Drop the prom lock if it is held by the current CPU. If the lock is held
328 * recursively, return without clearing prom_cpu. If the hold count is now
329 * zero, clear prom_cpu and cv_signal any waiting CPU.
331 void
332 kern_postprom(void)
334 processorid_t cpuid = getprocessorid();
335 cpu_t *cp = cpu[cpuid];
337 if (panicstr)
338 return; /* do not modify lock further if we have panicked */
340 if (prom_cpu != cp)
341 panic("kern_postprom: not owner, cp=%p owner=%p",
342 (void *)cp, (void *)prom_cpu);
344 if (prom_holdcnt == 0)
345 panic("kern_postprom: prom_holdcnt == 0, owner=%p",
346 (void *)prom_cpu);
348 if (atomic_dec_32_nv(&prom_holdcnt) != 0)
349 return; /* prom lock is held recursively by this CPU */
351 if ((boothowto & RB_DEBUG) && prom_exit_enter_debugger)
352 kmdb_enter();
354 prom_thread = NULL;
355 membar_producer();
357 prom_cpu = NULL;
358 membar_producer();
360 if (CPU_IN_SET(cpu_ready_set, cpuid) && cp->cpu_m.mutex_ready) {
361 mutex_enter(&prom_mutex);
362 cv_signal(&prom_cv);
363 mutex_exit(&prom_mutex);
364 kpreempt_enable();
369 * If the frame buffer device is busy, briefly capture the other CPUs so that
370 * another CPU executing code to manipulate the device does not execute at the
371 * same time we are rendering characters. Refer to the comments and code in
372 * common/os/console.c for more information on these callbacks.
374 * Notice that we explicitly acquire the PROM lock using kern_preprom() prior
375 * to idling other CPUs. The idling mechanism will cross-trap the other CPUs
376 * and have them spin at MAX(%pil, XCALL_PIL), so we must be sure that none of
377 * them are holding the PROM lock before we idle them and then call into the
378 * PROM routines that render characters to the frame buffer.
381 console_enter(int busy)
383 int s = 0;
385 if (busy && panicstr == NULL) {
386 kern_preprom();
387 s = splhi();
388 idle_other_cpus();
391 return (s);
394 void
395 console_exit(int busy, int spl)
397 if (busy && panicstr == NULL) {
398 resume_other_cpus();
399 splx(spl);
400 kern_postprom();
405 * This routine is a special form of pause_cpus(). It ensures that
406 * prom functions are callable while the cpus are paused.
408 void
409 promsafe_pause_cpus(void)
411 pause_cpus(NULL, NULL);
413 /* If some other cpu is entering or is in the prom, spin */
414 while (prom_cpu || mutex_owner(&prom_mutex)) {
416 start_cpus();
417 mutex_enter(&prom_mutex);
419 /* Wait for other cpu to exit prom */
420 while (prom_cpu)
421 cv_wait(&prom_cv, &prom_mutex);
423 mutex_exit(&prom_mutex);
424 pause_cpus(NULL, NULL);
427 /* At this point all cpus are paused and none are in the prom */
431 * This routine is a special form of xc_attention(). It ensures that
432 * prom functions are callable while the cpus are at attention.
434 void
435 promsafe_xc_attention(cpuset_t cpuset)
437 xc_attention(cpuset);
439 /* If some other cpu is entering or is in the prom, spin */
440 while (prom_cpu || mutex_owner(&prom_mutex)) {
442 xc_dismissed(cpuset);
443 mutex_enter(&prom_mutex);
445 /* Wait for other cpu to exit prom */
446 while (prom_cpu)
447 cv_wait(&prom_cv, &prom_mutex);
449 mutex_exit(&prom_mutex);
450 xc_attention(cpuset);
453 /* At this point all cpus are paused and none are in the prom */
457 #if defined(PROM_32BIT_ADDRS)
459 #include <sys/promimpl.h>
460 #include <vm/seg_kmem.h>
461 #include <sys/kmem.h>
462 #include <sys/bootconf.h>
465 * These routines are only used to workaround "poor feature interaction"
466 * in OBP. See bug 4115680 for details.
468 * Many of the promif routines need to allocate temporary buffers
469 * with 32-bit addresses to pass in/out of the CIF. The lifetime
470 * of the buffers is extremely short, they are allocated and freed
471 * around the CIF call. We use vmem_alloc() to cache 32-bit memory.
473 * Note the code in promplat_free() to prevent exhausting the 32 bit
474 * heap during boot.
476 static void *promplat_last_free = NULL;
477 static size_t promplat_last_size;
478 static vmem_t *promplat_arena;
479 static kmutex_t promplat_lock; /* protect arena, last_free, and last_size */
481 void *
482 promplat_alloc(size_t size)
485 mutex_enter(&promplat_lock);
486 if (promplat_arena == NULL) {
487 promplat_arena = vmem_create("promplat", NULL, 0, 8,
488 segkmem_alloc, segkmem_free, heap32_arena, 0, VM_SLEEP);
490 mutex_exit(&promplat_lock);
492 return (vmem_alloc(promplat_arena, size, VM_NOSLEEP));
496 * Delaying the free() of small allocations gets more mileage
497 * from pages during boot, otherwise a cycle of allocate/free
498 * calls could burn through available heap32 space too quickly.
500 void
501 promplat_free(void *p, size_t size)
503 void *p2 = NULL;
504 size_t s2;
507 * If VM is initialized, clean up any delayed free().
509 if (kvseg.s_base != 0 && promplat_last_free != NULL) {
510 mutex_enter(&promplat_lock);
511 p2 = promplat_last_free;
512 s2 = promplat_last_size;
513 promplat_last_free = NULL;
514 promplat_last_size = 0;
515 mutex_exit(&promplat_lock);
516 if (p2 != NULL) {
517 vmem_free(promplat_arena, p2, s2);
518 p2 = NULL;
523 * Do the free if VM is initialized or it's a large allocation.
525 if (kvseg.s_base != 0 || size >= PAGESIZE) {
526 vmem_free(promplat_arena, p, size);
527 return;
531 * Otherwise, do the last free request and delay this one.
533 mutex_enter(&promplat_lock);
534 if (promplat_last_free != NULL) {
535 p2 = promplat_last_free;
536 s2 = promplat_last_size;
538 promplat_last_free = p;
539 promplat_last_size = size;
540 mutex_exit(&promplat_lock);
542 if (p2 != NULL)
543 vmem_free(promplat_arena, p2, s2);
546 void
547 promplat_bcopy(const void *src, void *dst, size_t count)
549 bcopy(src, dst, count);
552 #endif /* PROM_32BIT_ADDRS */
554 static prom_generation_cookie_t prom_tree_gen;
555 static krwlock_t prom_tree_lock;
558 prom_tree_access(int (*callback)(void *arg, int has_changed), void *arg,
559 prom_generation_cookie_t *ckp)
561 int chg, rv;
563 rw_enter(&prom_tree_lock, RW_READER);
565 * If the tree has changed since the caller last accessed it
566 * pass 1 as the second argument to the callback function,
567 * otherwise 0.
569 if (ckp != NULL && *ckp != prom_tree_gen) {
570 *ckp = prom_tree_gen;
571 chg = 1;
572 } else
573 chg = 0;
574 rv = callback(arg, chg);
575 rw_exit(&prom_tree_lock);
576 return (rv);
580 prom_tree_update(int (*callback)(void *arg), void *arg)
582 int rv;
584 rw_enter(&prom_tree_lock, RW_WRITER);
585 prom_tree_gen++;
586 rv = callback(arg);
587 rw_exit(&prom_tree_lock);
588 return (rv);