6684208 mirror mounted mountpoints don't want to umount when idle
[illumos-gate.git] / usr / src / uts / i86pc / io / pwrnow.c
blob9ef54d1e1c28879dcae121d191608ff2966c0642
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #pragma ident "%Z%%M% %I% %E% SMI"
28 #include <sys/x86_archext.h>
29 #include <sys/machsystm.h>
30 #include <sys/x_call.h>
31 #include <sys/acpi/acpi.h>
32 #include <sys/acpica.h>
33 #include <sys/pwrnow.h>
34 #include <sys/cpu_acpi.h>
35 #include <sys/cpupm.h>
36 #include <sys/dtrace.h>
37 #include <sys/sdt.h>
40 * Error returns
42 #define PWRNOW_RET_SUCCESS 0x00
43 #define PWRNOW_RET_NO_PM 0x01
44 #define PWRNOW_RET_UNSUP_STATE 0x02
45 #define PWRNOW_RET_TRANS_INCOMPLETE 0x03
47 #define PWRNOW_LATENCY_WAIT 10
50 * MSR registers for changing and reading processor power state.
52 #define PWRNOW_PERF_CTL_MSR 0xC0010062
53 #define PWRNOW_PERF_STATUS_MSR 0xC0010063
55 #define AMD_CPUID_PSTATE_HARDWARE (1<<7)
56 #define AMD_CPUID_TSC_CONSTANT (1<<8)
59 * Debugging support
61 #ifdef DEBUG
62 volatile int pwrnow_debug = 0;
63 #define PWRNOW_DEBUG(arglist) if (pwrnow_debug) printf arglist;
64 #else
65 #define PWRNOW_DEBUG(arglist)
66 #endif
68 typedef struct pwrnow_state {
69 uint32_t pn_state;
70 } pwrnow_state_t;
73 * Read the status register.
75 static int
76 read_status(cpu_acpi_handle_t handle, uint32_t *stat)
78 cpu_acpi_pct_t *pct_stat;
79 uint64_t reg;
80 int ret = 0;
82 pct_stat = CPU_ACPI_PCT_STATUS(handle);
84 switch (pct_stat->pc_addrspace_id) {
85 case ACPI_ADR_SPACE_FIXED_HARDWARE:
86 reg = rdmsr(PWRNOW_PERF_STATUS_MSR);
87 *stat = reg & 0xFFFFFFFF;
88 ret = 0;
89 break;
91 default:
92 DTRACE_PROBE1(pwrnow_status_unsupported_type, uint8_t,
93 pct_stat->pc_addrspace_id);
94 return (-1);
97 DTRACE_PROBE1(pwrnow_status_read, uint32_t, *stat);
98 DTRACE_PROBE1(pwrnow_status_read_err, int, ret);
100 return (ret);
104 * Write the ctrl register.
106 static int
107 write_ctrl(cpu_acpi_handle_t handle, uint32_t ctrl)
109 cpu_acpi_pct_t *pct_ctrl;
110 uint64_t reg;
111 int ret = 0;
113 pct_ctrl = CPU_ACPI_PCT_CTRL(handle);
115 switch (pct_ctrl->pc_addrspace_id) {
116 case ACPI_ADR_SPACE_FIXED_HARDWARE:
117 reg = ctrl;
118 wrmsr(PWRNOW_PERF_CTL_MSR, reg);
119 ret = 0;
120 break;
122 default:
123 DTRACE_PROBE1(pwrnow_ctrl_unsupported_type, uint8_t,
124 pct_ctrl->pc_addrspace_id);
125 return (-1);
128 DTRACE_PROBE1(pwrnow_ctrl_write, uint32_t, ctrl);
129 DTRACE_PROBE1(pwrnow_ctrl_write_err, int, ret);
131 return (ret);
135 * Transition the current processor to the requested state.
137 void
138 pwrnow_pstate_transition(int *ret, cpudrv_devstate_t *cpudsp,
139 uint32_t req_state)
141 pwrnow_state_t *pwrnow_state = cpudsp->module_state;
142 cpu_acpi_handle_t handle = cpudsp->acpi_handle;
143 cpu_acpi_pstate_t *req_pstate;
144 uint32_t ctrl;
145 uint32_t stat;
146 cpu_t *cp;
147 int i;
149 req_pstate = CPU_ACPI_PSTATE(handle, req_state);
150 DTRACE_PROBE1(pwrnow_transition_freq, uint32_t,
151 CPU_ACPI_FREQ(req_pstate));
154 * Initiate the processor p-state change.
156 ctrl = CPU_ACPI_CTRL(req_pstate);
157 if (write_ctrl(handle, ctrl) != 0) {
158 *ret = PWRNOW_RET_UNSUP_STATE;
159 return;
162 /* Wait until switch is complete, but bound the loop just in case. */
163 for (i = CPU_ACPI_TRANSLAT(req_pstate) * 2; i >= 0;
164 i -= PWRNOW_LATENCY_WAIT) {
165 if (read_status(handle, &stat) == 0 &&
166 CPU_ACPI_STAT(req_pstate) == stat)
167 break;
168 drv_usecwait(PWRNOW_LATENCY_WAIT);
171 if (CPU_ACPI_STAT(req_pstate) != stat) {
172 DTRACE_PROBE(pwrnow_transition_incomplete);
173 *ret = PWRNOW_RET_TRANS_INCOMPLETE;
174 return;
177 pwrnow_state->pn_state = req_state;
178 cp = cpu[CPU->cpu_id];
179 cp->cpu_curr_clock = ((uint64_t)
180 CPU_ACPI_FREQ(req_pstate) * 1000000);
182 *ret = PWRNOW_RET_SUCCESS;
186 pwrnow_power(cpudrv_devstate_t *cpudsp, uint32_t req_state)
188 cpuset_t cpus;
189 int ret;
191 CPUSET_ONLY(cpus, cpudsp->cpu_id);
193 kpreempt_disable();
194 xc_call((xc_arg_t)&ret, (xc_arg_t)cpudsp, (xc_arg_t)req_state,
195 X_CALL_HIPRI, cpus, (xc_func_t)pwrnow_pstate_transition);
196 kpreempt_enable();
198 return (ret);
202 * Validate that this processor supports PowerNow! and if so,
203 * get the P-state data from ACPI and cache it.
206 pwrnow_init(cpudrv_devstate_t *cpudsp)
208 pwrnow_state_t *pwrnow_state;
209 cpu_acpi_handle_t handle;
210 cpu_acpi_pct_t *pct_stat;
211 struct cpuid_regs cpu_regs;
212 cpu_t *cp;
213 int domain;
215 PWRNOW_DEBUG(("pwrnow_init: instance %d\n",
216 ddi_get_instance(cpudsp->dip)));
218 /* AMD w/ CPUID support and rdmsr/wrmsr? */
219 if (x86_vendor != X86_VENDOR_AMD ||
220 !(x86_feature & X86_CPUID) ||
221 !(x86_feature & X86_MSR)) {
222 PWRNOW_DEBUG(("Either not AMD or feature not supported.\n"));
223 return (PWRNOW_RET_NO_PM);
227 * Get the Advanced Power Management Information.
229 cpu_regs.cp_eax = 0x80000007;
230 (void) __cpuid_insn(&cpu_regs);
233 * We currently only support CPU power management of
234 * processors that are P-state TSC invariant.
236 if (!(cpu_regs.cp_edx & AMD_CPUID_TSC_CONSTANT)) {
237 PWRNOW_DEBUG(("No support for CPUs that are not P-state "
238 "TSC invariant.\n"));
239 return (PWRNOW_RET_NO_PM);
243 * We only support the "Fire and Forget" style of PowerNow! (i.e.,
244 * single MSR write to change speed).
246 if (!(cpu_regs.cp_edx & AMD_CPUID_PSTATE_HARDWARE)) {
247 PWRNOW_DEBUG(("Hardware P-State control is not supported.\n"));
248 return (PWRNOW_RET_NO_PM);
252 * PowerNow! requires ACPI support. Get a handle
253 * to the correct processor object for this dip.
255 handle = cpudsp->acpi_handle = cpu_acpi_init(cpudsp->dip);
256 if (handle == NULL) {
257 cmn_err(CE_WARN, "!pwrnow_init: instance %d: "
258 "unable to get ACPI handle",
259 ddi_get_instance(cpudsp->dip));
260 cmn_err(CE_NOTE, "!CPU power management will not function.");
261 return (PWRNOW_RET_NO_PM);
264 if (cpu_acpi_cache_data(handle) != 0) {
265 PWRNOW_DEBUG(("Failed to cache ACPI data\n"));
266 cpu_acpi_fini(handle);
267 return (PWRNOW_RET_NO_PM);
270 pct_stat = CPU_ACPI_PCT_STATUS(handle);
271 switch (pct_stat->pc_addrspace_id) {
272 case ACPI_ADR_SPACE_FIXED_HARDWARE:
273 PWRNOW_DEBUG(("Transitions will use fixed hardware\n"));
274 break;
275 default:
276 cmn_err(CE_WARN, "!_PCT configured for unsupported "
277 "addrspace = %d.", pct_stat->pc_addrspace_id);
278 cmn_err(CE_NOTE, "!CPU power management will not function.");
279 cpu_acpi_fini(handle);
280 return (PWRNOW_RET_NO_PM);
283 if (CPU_ACPI_IS_OBJ_CACHED(handle, CPU_ACPI_PSD_CACHED))
284 domain = CPU_ACPI_PSD(handle).pd_domain;
285 else {
286 cp = cpu[CPU->cpu_id];
287 domain = cpuid_get_chipid(cp);
289 cpupm_add_cpu2dependency(cpudsp->dip, domain);
291 pwrnow_state = kmem_zalloc(sizeof (pwrnow_state_t), KM_SLEEP);
292 pwrnow_state->pn_state = NULL;
293 cpudsp->module_state = pwrnow_state;
295 PWRNOW_DEBUG(("Instance %d succeeded.\n",
296 ddi_get_instance(cpudsp->dip)));
297 return (PWRNOW_RET_SUCCESS);
301 * Free resources allocated by pwrnow_init().
303 void
304 pwrnow_fini(cpudrv_devstate_t *cpudsp)
306 cpu_acpi_fini(cpudsp->acpi_handle);
307 kmem_free(cpudsp->module_state, sizeof (pwrnow_state_t));