preprocessor cleanup: __xpv
[unleashed.git] / arch / x86 / kernel / platform / i86pc / io / cbe.c
blob284557ea223b6cf4320b6308fbf680e409c11260
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #include <sys/systm.h>
28 #include <sys/cyclic.h>
29 #include <sys/cyclic_impl.h>
30 #include <sys/spl.h>
31 #include <sys/x_call.h>
32 #include <sys/kmem.h>
33 #include <sys/machsystm.h>
34 #include <sys/smp_impldefs.h>
35 #include <sys/psm_types.h>
36 #include <sys/psm.h>
37 #include <sys/atomic.h>
38 #include <sys/clock.h>
39 #include <sys/x86_archext.h>
40 #include <sys/ddi_impldefs.h>
41 #include <sys/ddi_intr.h>
42 #include <sys/avintr.h>
43 #include <sys/note.h>
45 static int cbe_vector;
46 static int cbe_ticks = 0;
49 * cbe_xcall_lock is used to protect the xcall globals since the cyclic
50 * reprogramming API does not use cpu_lock.
52 static kmutex_t cbe_xcall_lock;
53 static cyc_func_t volatile cbe_xcall_func;
54 static cpu_t *volatile cbe_xcall_cpu;
55 static void *cbe_xcall_farg;
56 static cpuset_t cbe_enabled;
58 static ddi_softint_hdl_impl_t cbe_low_hdl =
59 {NULL, 0, NULL, NULL, NULL, NULL, NULL, NULL};
60 static ddi_softint_hdl_impl_t cbe_clock_hdl =
61 {NULL, 0, NULL, NULL, NULL, NULL, NULL, NULL};
63 cyclic_id_t cbe_hres_cyclic;
64 int cbe_psm_timer_mode = TIMER_ONESHOT;
65 static hrtime_t cbe_timer_resolution;
67 extern int tsc_gethrtime_enable;
69 void cbe_hres_tick(void);
71 int
72 cbe_softclock(void)
74 cyclic_softint(CPU, CY_LOCK_LEVEL);
75 return (1);
78 int
79 cbe_low_level(void)
81 cpu_t *cpu = CPU;
83 cyclic_softint(cpu, CY_LOW_LEVEL);
84 return (1);
88 * We can be in cbe_fire() either due to a cyclic-induced cross call, or due
89 * to the timer firing at level-14. Because cyclic_fire() can tolerate
90 * spurious calls, it would not matter if we called cyclic_fire() in both
91 * cases.
93 int
94 cbe_fire(void)
96 cpu_t *cpu = CPU;
97 processorid_t me = cpu->cpu_id, i;
98 int cross_call = (cbe_xcall_func != NULL && cbe_xcall_cpu == cpu);
100 cyclic_fire(cpu);
102 if (cbe_psm_timer_mode != TIMER_ONESHOT && me == 0 && !cross_call) {
103 for (i = 1; i < NCPU; i++) {
104 if (CPU_IN_SET(cbe_enabled, i)) {
105 send_dirint(i, CBE_HIGH_PIL);
110 if (cross_call) {
111 ASSERT(cbe_xcall_func != NULL && cbe_xcall_cpu == cpu);
112 (*cbe_xcall_func)(cbe_xcall_farg);
113 cbe_xcall_func = NULL;
114 cbe_xcall_cpu = NULL;
117 return (1);
120 /*ARGSUSED*/
121 void
122 cbe_softint(void *arg, cyc_level_t level)
124 switch (level) {
125 case CY_LOW_LEVEL:
126 (*setsoftint)(CBE_LOW_PIL, cbe_low_hdl.ih_pending);
127 break;
128 case CY_LOCK_LEVEL:
129 (*setsoftint)(CBE_LOCK_PIL, cbe_clock_hdl.ih_pending);
130 break;
131 default:
132 panic("cbe_softint: unexpected soft level %d", level);
136 /*ARGSUSED*/
137 void
138 cbe_reprogram(void *arg, hrtime_t time)
140 if (cbe_psm_timer_mode == TIMER_ONESHOT)
141 (*psm_timer_reprogram)(time);
144 /*ARGSUSED*/
145 cyc_cookie_t
146 cbe_set_level(void *arg, cyc_level_t level)
148 int ipl;
150 switch (level) {
151 case CY_LOW_LEVEL:
152 ipl = CBE_LOW_PIL;
153 break;
154 case CY_LOCK_LEVEL:
155 ipl = CBE_LOCK_PIL;
156 break;
157 case CY_HIGH_LEVEL:
158 ipl = CBE_HIGH_PIL;
159 break;
160 default:
161 panic("cbe_set_level: unexpected level %d", level);
164 return (splr(ipltospl(ipl)));
167 /*ARGSUSED*/
168 void
169 cbe_restore_level(void *arg, cyc_cookie_t cookie)
171 splx(cookie);
174 /*ARGSUSED*/
175 void
176 cbe_xcall(void *arg, cpu_t *dest, cyc_func_t func, void *farg)
178 kpreempt_disable();
180 if (dest == CPU) {
181 (*func)(farg);
182 kpreempt_enable();
183 return;
186 mutex_enter(&cbe_xcall_lock);
188 ASSERT(cbe_xcall_func == NULL);
190 cbe_xcall_farg = farg;
191 membar_producer();
192 cbe_xcall_cpu = dest;
193 cbe_xcall_func = func;
195 send_dirint(dest->cpu_id, CBE_HIGH_PIL);
197 while (cbe_xcall_func != NULL || cbe_xcall_cpu != NULL)
198 continue;
200 mutex_exit(&cbe_xcall_lock);
202 kpreempt_enable();
205 void *
206 cbe_configure(cpu_t *cpu)
208 return (cpu);
211 void
212 cbe_unconfigure(void *arg)
214 _NOTE(ARGUNUSED(arg));
215 ASSERT(!CPU_IN_SET(cbe_enabled, ((cpu_t *)arg)->cpu_id));
219 * declarations needed for time adjustment
221 extern void tsc_suspend(void);
222 extern void tsc_resume(void);
224 * Call the resume function in the cyclic, instead of inline in the
225 * resume path.
227 extern int tsc_resume_in_cyclic;
229 /*ARGSUSED*/
230 static void
231 cbe_suspend(cyb_arg_t arg)
234 * This is an x86 backend, so let the tsc_suspend
235 * that is specific to x86 platforms do the work.
237 tsc_suspend();
240 /*ARGSUSED*/
241 static void
242 cbe_resume(cyb_arg_t arg)
244 if (tsc_resume_in_cyclic) {
245 tsc_resume();
249 void
250 cbe_enable(void *arg)
252 processorid_t me = ((cpu_t *)arg)->cpu_id;
254 /* neither enable nor disable cpu0 if TIMER_PERIODIC is set */
255 if ((cbe_psm_timer_mode != TIMER_ONESHOT) && (me == 0))
256 return;
259 * Added (me == 0) to the ASSERT because the timer isn't
260 * disabled on CPU 0, and cbe_enable is called when we resume.
262 ASSERT((me == 0) || !CPU_IN_SET(cbe_enabled, me));
263 CPUSET_ADD(cbe_enabled, me);
264 if (cbe_psm_timer_mode == TIMER_ONESHOT)
265 (*psm_timer_enable)();
268 void
269 cbe_disable(void *arg)
271 processorid_t me = ((cpu_t *)arg)->cpu_id;
273 /* neither enable nor disable cpu0 if TIMER_PERIODIC is set */
274 if ((cbe_psm_timer_mode != TIMER_ONESHOT) && (me == 0))
275 return;
277 ASSERT(CPU_IN_SET(cbe_enabled, me));
278 CPUSET_DEL(cbe_enabled, me);
279 if (cbe_psm_timer_mode == TIMER_ONESHOT)
280 (*psm_timer_disable)();
284 * Unbound cyclic, called once per tick (every nsec_per_tick ns).
286 void
287 cbe_hres_tick(void)
289 int s;
291 dtrace_hres_tick();
294 * Because hres_tick effectively locks hres_lock, we must be at the
295 * same PIL as that used for CLOCK_LOCK.
297 s = splr(ipltospl(XC_HI_PIL));
298 hres_tick();
299 splx(s);
301 if ((cbe_ticks % hz) == 0)
302 (*hrtime_tick)();
304 cbe_ticks++;
308 void
309 cbe_init_pre(void)
311 cbe_vector = (*psm_get_clockirq)(CBE_HIGH_PIL);
313 CPUSET_ZERO(cbe_enabled);
315 cbe_timer_resolution = (*clkinitf)(TIMER_ONESHOT, &cbe_psm_timer_mode);
318 void
319 cbe_init(void)
321 cyc_backend_t cbe = {
322 cbe_configure, /* cyb_configure */
323 cbe_unconfigure, /* cyb_unconfigure */
324 cbe_enable, /* cyb_enable */
325 cbe_disable, /* cyb_disable */
326 cbe_reprogram, /* cyb_reprogram */
327 cbe_softint, /* cyb_softint */
328 cbe_set_level, /* cyb_set_level */
329 cbe_restore_level, /* cyb_restore_level */
330 cbe_xcall, /* cyb_xcall */
331 cbe_suspend, /* cyb_suspend */
332 cbe_resume /* cyb_resume */
334 cyc_handler_t hdlr;
335 cyc_time_t when;
337 mutex_init(&cbe_xcall_lock, NULL, MUTEX_DEFAULT, NULL);
339 mutex_enter(&cpu_lock);
340 cyclic_init(&cbe, cbe_timer_resolution);
341 mutex_exit(&cpu_lock);
343 (void) add_avintr(NULL, CBE_HIGH_PIL, (avfunc)cbe_fire,
344 "cbe_fire_master", cbe_vector, 0, NULL, NULL, NULL);
346 if (psm_get_ipivect != NULL) {
347 (void) add_avintr(NULL, CBE_HIGH_PIL, (avfunc)cbe_fire,
348 "cbe_fire_slave",
349 (*psm_get_ipivect)(CBE_HIGH_PIL, PSM_INTR_IPI_HI),
350 0, NULL, NULL, NULL);
353 (void) add_avsoftintr((void *)&cbe_clock_hdl, CBE_LOCK_PIL,
354 (avfunc)cbe_softclock, "softclock", NULL, NULL);
356 (void) add_avsoftintr((void *)&cbe_low_hdl, CBE_LOW_PIL,
357 (avfunc)cbe_low_level, "low level", NULL, NULL);
359 mutex_enter(&cpu_lock);
361 hdlr.cyh_level = CY_HIGH_LEVEL;
362 hdlr.cyh_func = (cyc_func_t)cbe_hres_tick;
363 hdlr.cyh_arg = NULL;
365 when.cyt_when = 0;
366 when.cyt_interval = nsec_per_tick;
368 cbe_hres_cyclic = cyclic_add(&hdlr, &when);
370 if (psm_post_cyclic_setup != NULL)
371 (*psm_post_cyclic_setup)(NULL);
373 mutex_exit(&cpu_lock);