2 * Copyright (c) 2003-2005 Nate Lawson (SDG)
3 * Copyright (c) 2001 Michael Smith
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * $FreeBSD: src/sys/dev/acpica/acpi_cpu.c,v 1.72 2008/04/12 12:06:00 rpaulo Exp $
31 #include <sys/param.h>
33 #include <sys/cpuhelper.h>
34 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/globaldata.h>
37 #include <sys/power.h>
40 #include <sys/thread2.h>
41 #include <sys/serialize.h>
42 #include <sys/msgport2.h>
43 #include <sys/microtime_pcpu.h>
44 #include <sys/cpu_topology.h>
46 #include <bus/pci/pcivar.h>
47 #include <machine/atomic.h>
48 #include <machine/globaldata.h>
49 #include <machine/md_var.h>
50 #include <machine/smp.h>
56 #include "acpi_cpu_cstate.h"
59 * Support for ACPI Processor devices, including C[1-3+] sleep states.
62 /* Hooks for the ACPICA debugging infrastructure */
63 #define _COMPONENT ACPI_PROCESSOR
64 ACPI_MODULE_NAME("PROCESSOR")
66 #define MAX_CX_STATES 8
68 struct acpi_cst_softc
{
70 struct acpi_cpu_softc
*cst_parent
;
71 ACPI_HANDLE cst_handle
;
73 uint32_t cst_flags
; /* ACPI_CST_FLAG_ */
74 uint32_t cst_p_blk
; /* ACPI P_BLK location */
75 uint32_t cst_p_blk_len
; /* P_BLK length (must be 6). */
76 struct acpi_cst_cx cst_cx_states
[MAX_CX_STATES
];
77 int cst_cx_count
; /* Number of valid Cx states. */
78 int cst_prev_sleep
; /* Last idle sleep duration. */
80 int cst_non_c3
; /* Index of lowest non-C3 state. */
81 u_long cst_cx_stats
[MAX_CX_STATES
];/* Cx usage history. */
82 /* Values for sysctl. */
83 int cst_cx_lowest
; /* Current Cx lowest */
84 int cst_cx_lowest_req
; /* Requested Cx lowest */
85 char cst_cx_supported
[64];
88 #define ACPI_CST_FLAG_PROBING 0x1
89 #define ACPI_CST_FLAG_ATTACHED 0x2
90 /* Match C-states of other hyperthreads on the same core */
91 #define ACPI_CST_FLAG_MATCH_HT 0x4
93 #define PCI_VENDOR_INTEL 0x8086
94 #define PCI_DEVICE_82371AB_3 0x7113 /* PIIX4 chipset for quirks. */
95 #define PCI_REVISION_A_STEP 0
96 #define PCI_REVISION_B_STEP 1
97 #define PCI_REVISION_4E 2
98 #define PCI_REVISION_4M 3
99 #define PIIX4_DEVACTB_REG 0x58
100 #define PIIX4_BRLD_EN_IRQ0 (1<<0)
101 #define PIIX4_BRLD_EN_IRQ (1<<1)
102 #define PIIX4_BRLD_EN_IRQ8 (1<<5)
103 #define PIIX4_STOP_BREAK_MASK (PIIX4_BRLD_EN_IRQ0 | \
104 PIIX4_BRLD_EN_IRQ | \
106 #define PIIX4_PCNTRL_BST_EN (1<<10)
108 /* Platform hardware resource information. */
109 static uint32_t acpi_cst_smi_cmd
; /* Value to write to SMI_CMD. */
110 static uint8_t acpi_cst_ctrl
; /* Indicate we are _CST aware. */
111 int acpi_cst_quirks
; /* Indicate any hardware bugs. */
112 static boolean_t acpi_cst_use_fadt
;
115 static boolean_t acpi_cst_disable_idle
;
116 /* Disable entry to idle function */
117 static int acpi_cst_cx_count
; /* Number of valid Cx states */
119 /* Values for sysctl. */
120 static int acpi_cst_cx_lowest
; /* Current Cx lowest */
121 static int acpi_cst_cx_lowest_req
; /* Requested Cx lowest */
123 static device_t
*acpi_cst_devices
;
124 static int acpi_cst_ndevices
;
125 static struct acpi_cst_softc
**acpi_cst_softc
;
126 static struct lwkt_serialize acpi_cst_slize
= LWKT_SERIALIZE_INITIALIZER
;
128 static int acpi_cst_probe(device_t
);
129 static int acpi_cst_attach(device_t
);
130 static int acpi_cst_suspend(device_t
);
131 static int acpi_cst_resume(device_t
);
132 static int acpi_cst_shutdown(device_t
);
134 static void acpi_cst_notify(device_t
);
135 static void acpi_cst_postattach(void *);
136 static void acpi_cst_idle(void);
137 static void acpi_cst_copy(struct acpi_cst_softc
*,
138 const struct acpi_cst_softc
*);
140 static void acpi_cst_cx_probe(struct acpi_cst_softc
*);
141 static void acpi_cst_cx_probe_fadt(struct acpi_cst_softc
*);
142 static int acpi_cst_cx_probe_cst(struct acpi_cst_softc
*, int);
143 static int acpi_cst_cx_reprobe_cst(struct acpi_cst_softc
*);
145 static void acpi_cst_startup(struct acpi_cst_softc
*);
146 static void acpi_cst_support_list(struct acpi_cst_softc
*);
147 static int acpi_cst_set_lowest(struct acpi_cst_softc
*, int);
148 static int acpi_cst_set_lowest_oncpu(struct acpi_cst_softc
*, int);
149 static void acpi_cst_non_c3(struct acpi_cst_softc
*);
150 static void acpi_cst_global_cx_count(void);
151 static int acpi_cst_set_quirks(void);
152 static void acpi_cst_c3_bm_rld(struct acpi_cst_softc
*);
153 static void acpi_cst_free_resource(struct acpi_cst_softc
*, int);
154 static void acpi_cst_c1_halt(void);
156 static int acpi_cst_usage_sysctl(SYSCTL_HANDLER_ARGS
);
157 static int acpi_cst_lowest_sysctl(SYSCTL_HANDLER_ARGS
);
158 static int acpi_cst_lowest_use_sysctl(SYSCTL_HANDLER_ARGS
);
159 static int acpi_cst_global_lowest_sysctl(SYSCTL_HANDLER_ARGS
);
160 static int acpi_cst_global_lowest_use_sysctl(SYSCTL_HANDLER_ARGS
);
162 static int acpi_cst_cx_setup(struct acpi_cst_cx
*cx
);
163 static void acpi_cst_c1_halt_enter(const struct acpi_cst_cx
*);
164 static void acpi_cst_cx_io_enter(const struct acpi_cst_cx
*);
166 int acpi_cst_force_bmarb
;
167 TUNABLE_INT("hw.acpi.cpu.cst.force_bmarb", &acpi_cst_force_bmarb
);
169 int acpi_cst_force_bmsts
;
170 TUNABLE_INT("hw.acpi.cpu.cst.force_bmsts", &acpi_cst_force_bmsts
);
172 static device_method_t acpi_cst_methods
[] = {
173 /* Device interface */
174 DEVMETHOD(device_probe
, acpi_cst_probe
),
175 DEVMETHOD(device_attach
, acpi_cst_attach
),
176 DEVMETHOD(device_detach
, bus_generic_detach
),
177 DEVMETHOD(device_shutdown
, acpi_cst_shutdown
),
178 DEVMETHOD(device_suspend
, acpi_cst_suspend
),
179 DEVMETHOD(device_resume
, acpi_cst_resume
),
182 DEVMETHOD(bus_add_child
, bus_generic_add_child
),
183 DEVMETHOD(bus_read_ivar
, bus_generic_read_ivar
),
184 DEVMETHOD(bus_get_resource_list
, bus_generic_get_resource_list
),
185 DEVMETHOD(bus_get_resource
, bus_generic_rl_get_resource
),
186 DEVMETHOD(bus_set_resource
, bus_generic_rl_set_resource
),
187 DEVMETHOD(bus_alloc_resource
, bus_generic_rl_alloc_resource
),
188 DEVMETHOD(bus_release_resource
, bus_generic_rl_release_resource
),
189 DEVMETHOD(bus_driver_added
, bus_generic_driver_added
),
190 DEVMETHOD(bus_activate_resource
, bus_generic_activate_resource
),
191 DEVMETHOD(bus_deactivate_resource
, bus_generic_deactivate_resource
),
192 DEVMETHOD(bus_setup_intr
, bus_generic_setup_intr
),
193 DEVMETHOD(bus_teardown_intr
, bus_generic_teardown_intr
),
197 static driver_t acpi_cst_driver
= {
200 sizeof(struct acpi_cst_softc
),
201 .gpri
= KOBJ_GPRI_ACPI
+2
204 static devclass_t acpi_cst_devclass
;
205 DRIVER_MODULE(cpu_cst
, cpu
, acpi_cst_driver
, acpi_cst_devclass
, NULL
, NULL
);
206 MODULE_DEPEND(cpu_cst
, acpi
, 1, 1, 1);
209 acpi_cst_probe(device_t dev
)
213 if (acpi_disabled("cpu_cst") || acpi_get_type(dev
) != ACPI_TYPE_PROCESSOR
)
216 cpu_id
= acpi_get_magic(dev
);
218 if (acpi_cst_softc
== NULL
)
219 acpi_cst_softc
= kmalloc(sizeof(struct acpi_cst_softc
*) *
220 SMP_MAXCPU
, M_TEMP
/* XXX */, M_INTWAIT
| M_ZERO
);
223 * Check if we already probed this processor. We scan the bus twice
224 * so it's possible we've already seen this one.
226 if (acpi_cst_softc
[cpu_id
] != NULL
) {
227 device_printf(dev
, "CPU%d cstate already exist\n", cpu_id
);
231 /* Mark this processor as in-use and save our derived id for attach. */
232 acpi_cst_softc
[cpu_id
] = device_get_softc(dev
);
233 device_set_desc(dev
, "ACPI CPU C-State");
239 acpi_cst_attach(device_t dev
)
243 struct acpi_cst_softc
*sc
;
246 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__
);
248 sc
= device_get_softc(dev
);
250 sc
->cst_parent
= device_get_softc(device_get_parent(dev
));
251 sc
->cst_handle
= acpi_get_handle(dev
);
252 sc
->cst_cpuid
= acpi_get_magic(dev
);
253 acpi_cst_softc
[sc
->cst_cpuid
] = sc
;
254 acpi_cst_smi_cmd
= AcpiGbl_FADT
.SmiCommand
;
255 acpi_cst_ctrl
= AcpiGbl_FADT
.CstControl
;
258 buf
.Length
= ACPI_ALLOCATE_BUFFER
;
259 status
= AcpiEvaluateObject(sc
->cst_handle
, NULL
, NULL
, &buf
);
260 if (ACPI_FAILURE(status
)) {
261 device_printf(dev
, "attach failed to get Processor obj - %s\n",
262 AcpiFormatException(status
));
263 acpi_cst_softc
[sc
->cst_cpuid
] = NULL
;
266 obj
= (ACPI_OBJECT
*)buf
.Pointer
;
267 sc
->cst_p_blk
= obj
->Processor
.PblkAddress
;
268 sc
->cst_p_blk_len
= obj
->Processor
.PblkLength
;
270 ACPI_DEBUG_PRINT((ACPI_DB_INFO
, "cpu_cst%d: P_BLK at %#x/%d\n",
271 device_get_unit(dev
), sc
->cst_p_blk
, sc
->cst_p_blk_len
));
274 * If this is the first cpu we attach, create and initialize the generic
275 * resources that will be used by all acpi cpu devices.
277 if (device_get_unit(dev
) == 0) {
278 /* Assume we won't be using FADT for Cx states by default */
279 acpi_cst_use_fadt
= FALSE
;
281 /* Queue post cpu-probing task handler */
282 AcpiOsExecute(OSL_NOTIFY_HANDLER
, acpi_cst_postattach
, NULL
);
285 /* Probe for Cx state support. */
286 acpi_cst_cx_probe(sc
);
288 sc
->cst_flags
|= ACPI_CST_FLAG_ATTACHED
;
294 * Disable any entry to the idle function during suspend and re-enable it
298 acpi_cst_suspend(device_t dev
)
302 error
= bus_generic_suspend(dev
);
305 acpi_cst_disable_idle
= TRUE
;
310 acpi_cst_resume(device_t dev
)
312 acpi_cst_disable_idle
= FALSE
;
313 return (bus_generic_resume(dev
));
317 acpi_cst_shutdown(device_t dev
)
319 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__
);
321 /* Allow children to shutdown first. */
322 bus_generic_shutdown(dev
);
325 * Disable any entry to the idle function. There is a small race where
326 * an idle thread have passed this check but not gone to sleep. This
327 * is ok since device_shutdown() does not free the softc, otherwise
328 * we'd have to be sure all threads were evicted before returning.
330 acpi_cst_disable_idle
= TRUE
;
336 acpi_cst_cx_probe(struct acpi_cst_softc
*sc
)
338 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__
);
340 /* Use initial sleep value of 1 sec. to start with lowest idle state. */
341 sc
->cst_prev_sleep
= 1000000;
342 sc
->cst_cx_lowest
= 0;
343 sc
->cst_cx_lowest_req
= 0;
346 * Check for the ACPI 2.0 _CST sleep states object. If we can't find
347 * any, we'll revert to FADT/P_BLK Cx control method which will be
348 * handled by acpi_cst_postattach. We need to defer to after having
349 * probed all the cpus in the system before probing for Cx states from
350 * FADT as we may already have found cpus with valid _CST packages.
352 if (!acpi_cst_use_fadt
&& acpi_cst_cx_probe_cst(sc
, 0) != 0) {
354 * We were unable to find a _CST package for this cpu or there
355 * was an error parsing it. Switch back to generic mode.
357 acpi_cst_use_fadt
= TRUE
;
359 device_printf(sc
->cst_dev
, "switching to FADT Cx mode\n");
363 * TODO: _CSD Package should be checked here.
368 acpi_cst_cx_probe_fadt(struct acpi_cst_softc
*sc
)
370 struct acpi_cst_cx
*cx_ptr
;
374 * Free all previously allocated resources.
377 * It is needed, since we could enter here because of other
378 * cpu's _CST probing failure.
380 acpi_cst_free_resource(sc
, 0);
382 sc
->cst_cx_count
= 0;
383 cx_ptr
= sc
->cst_cx_states
;
385 /* Use initial sleep value of 1 sec. to start with lowest idle state. */
386 sc
->cst_prev_sleep
= 1000000;
388 /* C1 has been required since just after ACPI 1.0 */
389 cx_ptr
->gas
.SpaceId
= ACPI_ADR_SPACE_FIXED_HARDWARE
;
390 cx_ptr
->type
= ACPI_STATE_C1
;
391 cx_ptr
->trans_lat
= 0;
392 cx_ptr
->enter
= acpi_cst_c1_halt_enter
;
393 error
= acpi_cst_cx_setup(cx_ptr
);
395 panic("C1 FADT HALT setup failed: %d", error
);
399 /* C2(+) is not supported on MP system */
400 if (ncpus
> 1 && (AcpiGbl_FADT
.Flags
& ACPI_FADT_C2_MP_SUPPORTED
) == 0)
404 * The spec says P_BLK must be 6 bytes long. However, some systems
405 * use it to indicate a fractional set of features present so we
406 * take 5 as C2. Some may also have a value of 7 to indicate
407 * another C3 but most use _CST for this (as required) and having
408 * "only" C1-C3 is not a hardship.
410 if (sc
->cst_p_blk_len
< 5)
413 /* Validate and allocate resources for C2 (P_LVL2). */
414 if (AcpiGbl_FADT
.C2Latency
<= 100) {
415 cx_ptr
->gas
.SpaceId
= ACPI_ADR_SPACE_SYSTEM_IO
;
416 cx_ptr
->gas
.BitWidth
= 8;
417 cx_ptr
->gas
.Address
= sc
->cst_p_blk
+ 4;
419 cx_ptr
->rid
= sc
->cst_parent
->cpu_next_rid
;
420 acpi_bus_alloc_gas(sc
->cst_dev
, &cx_ptr
->res_type
, &cx_ptr
->rid
,
421 &cx_ptr
->gas
, &cx_ptr
->res
, RF_SHAREABLE
);
422 if (cx_ptr
->res
!= NULL
) {
423 sc
->cst_parent
->cpu_next_rid
++;
424 cx_ptr
->type
= ACPI_STATE_C2
;
425 cx_ptr
->trans_lat
= AcpiGbl_FADT
.C2Latency
;
426 cx_ptr
->enter
= acpi_cst_cx_io_enter
;
427 cx_ptr
->btag
= rman_get_bustag(cx_ptr
->res
);
428 cx_ptr
->bhand
= rman_get_bushandle(cx_ptr
->res
);
429 error
= acpi_cst_cx_setup(cx_ptr
);
431 panic("C2 FADT I/O setup failed: %d", error
);
437 if (sc
->cst_p_blk_len
< 6)
440 /* Validate and allocate resources for C3 (P_LVL3). */
441 if (AcpiGbl_FADT
.C3Latency
<= 1000 &&
442 !(acpi_cst_quirks
& ACPI_CST_QUIRK_NO_C3
)) {
443 cx_ptr
->gas
.SpaceId
= ACPI_ADR_SPACE_SYSTEM_IO
;
444 cx_ptr
->gas
.BitWidth
= 8;
445 cx_ptr
->gas
.Address
= sc
->cst_p_blk
+ 5;
447 cx_ptr
->rid
= sc
->cst_parent
->cpu_next_rid
;
448 acpi_bus_alloc_gas(sc
->cst_dev
, &cx_ptr
->res_type
, &cx_ptr
->rid
,
449 &cx_ptr
->gas
, &cx_ptr
->res
, RF_SHAREABLE
);
450 if (cx_ptr
->res
!= NULL
) {
451 sc
->cst_parent
->cpu_next_rid
++;
452 cx_ptr
->type
= ACPI_STATE_C3
;
453 cx_ptr
->trans_lat
= AcpiGbl_FADT
.C3Latency
;
454 cx_ptr
->enter
= acpi_cst_cx_io_enter
;
455 cx_ptr
->btag
= rman_get_bustag(cx_ptr
->res
);
456 cx_ptr
->bhand
= rman_get_bushandle(cx_ptr
->res
);
457 error
= acpi_cst_cx_setup(cx_ptr
);
459 panic("C3 FADT I/O setup failed: %d", error
);
467 acpi_cst_copy(struct acpi_cst_softc
*dst_sc
,
468 const struct acpi_cst_softc
*src_sc
)
470 dst_sc
->cst_non_c3
= src_sc
->cst_non_c3
;
471 dst_sc
->cst_cx_count
= src_sc
->cst_cx_count
;
472 memcpy(dst_sc
->cst_cx_states
, src_sc
->cst_cx_states
,
473 sizeof(dst_sc
->cst_cx_states
));
477 * Parse a _CST package and set up its Cx states. Since the _CST object
478 * can change dynamically, our notify handler may call this function
479 * to clean up and probe the new _CST package.
482 acpi_cst_cx_probe_cst(struct acpi_cst_softc
*sc
, int reprobe
)
484 struct acpi_cst_cx
*cx_ptr
;
492 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__
);
495 cpuhelper_assert(sc
->cst_cpuid
, true);
498 buf
.Length
= ACPI_ALLOCATE_BUFFER
;
499 status
= AcpiEvaluateObject(sc
->cst_handle
, "_CST", NULL
, &buf
);
500 if (ACPI_FAILURE(status
))
503 /* _CST is a package with a count and at least one Cx package. */
504 top
= (ACPI_OBJECT
*)buf
.Pointer
;
505 if (!ACPI_PKG_VALID(top
, 2) || acpi_PkgInt32(top
, 0, &count
) != 0) {
506 device_printf(sc
->cst_dev
, "invalid _CST package\n");
507 AcpiOsFree(buf
.Pointer
);
510 if (count
!= top
->Package
.Count
- 1) {
511 device_printf(sc
->cst_dev
, "invalid _CST state count (%d != %d)\n",
512 count
, top
->Package
.Count
- 1);
513 count
= top
->Package
.Count
- 1;
515 if (count
> MAX_CX_STATES
) {
516 device_printf(sc
->cst_dev
, "_CST has too many states (%d)\n", count
);
517 count
= MAX_CX_STATES
;
520 sc
->cst_flags
|= ACPI_CST_FLAG_PROBING
| ACPI_CST_FLAG_MATCH_HT
;
524 * Free all previously allocated resources
526 * NOTE: It is needed for _CST reprobing.
528 acpi_cst_free_resource(sc
, 0);
530 /* Set up all valid states. */
531 sc
->cst_cx_count
= 0;
532 cx_ptr
= sc
->cst_cx_states
;
533 for (i
= 0; i
< count
; i
++) {
536 pkg
= &top
->Package
.Elements
[i
+ 1];
537 if (!ACPI_PKG_VALID(pkg
, 4) ||
538 acpi_PkgInt32(pkg
, 1, &cx_ptr
->type
) != 0 ||
539 acpi_PkgInt32(pkg
, 2, &cx_ptr
->trans_lat
) != 0 ||
540 acpi_PkgInt32(pkg
, 3, &cx_ptr
->power
) != 0) {
542 device_printf(sc
->cst_dev
, "skipping invalid Cx state package\n");
546 /* Validate the state to see if we should use it. */
547 switch (cx_ptr
->type
) {
550 cx_ptr
->enter
= acpi_cst_c1_halt_enter
;
551 error
= acpi_cst_cx_setup(cx_ptr
);
553 panic("C1 CST HALT setup failed: %d", error
);
554 if (sc
->cst_cx_count
!= 0) {
556 * C1 is not the first C-state; something really stupid
559 sc
->cst_flags
&= ~ACPI_CST_FLAG_MATCH_HT
;
569 if ((acpi_cst_quirks
& ACPI_CST_QUIRK_NO_C3
) != 0) {
570 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
571 "cpu_cst%d: C3[%d] not available.\n",
572 device_get_unit(sc
->cst_dev
), i
));
579 * Allocate the control register for C2 or C3(+).
581 KASSERT(cx_ptr
->res
== NULL
, ("still has res"));
582 acpi_PkgRawGas(pkg
, 0, &cx_ptr
->gas
);
585 * We match number of C2/C3 for hyperthreads, only if the
586 * register is "Fixed Hardware", e.g. on most of the Intel
587 * CPUs. We don't have much to do for the rest of the
590 if (cx_ptr
->gas
.SpaceId
!= ACPI_ADR_SPACE_FIXED_HARDWARE
)
591 sc
->cst_flags
&= ~ACPI_CST_FLAG_MATCH_HT
;
593 cx_ptr
->rid
= sc
->cst_parent
->cpu_next_rid
;
594 acpi_bus_alloc_gas(sc
->cst_dev
, &cx_ptr
->res_type
, &cx_ptr
->rid
,
595 &cx_ptr
->gas
, &cx_ptr
->res
, RF_SHAREABLE
);
596 if (cx_ptr
->res
!= NULL
) {
597 sc
->cst_parent
->cpu_next_rid
++;
598 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
599 "cpu_cst%d: Got C%d - %d latency\n",
600 device_get_unit(sc
->cst_dev
), cx_ptr
->type
,
602 cx_ptr
->enter
= acpi_cst_cx_io_enter
;
603 cx_ptr
->btag
= rman_get_bustag(cx_ptr
->res
);
604 cx_ptr
->bhand
= rman_get_bushandle(cx_ptr
->res
);
605 error
= acpi_cst_cx_setup(cx_ptr
);
607 panic("C%d CST I/O setup failed: %d", cx_ptr
->type
, error
);
611 error
= acpi_cst_cx_setup(cx_ptr
);
613 KASSERT(cx_ptr
->enter
!= NULL
,
614 ("C%d enter is not set", cx_ptr
->type
));
620 AcpiOsFree(buf
.Pointer
);
622 if (sc
->cst_flags
& ACPI_CST_FLAG_MATCH_HT
) {
625 mask
= get_cpumask_from_level(sc
->cst_cpuid
, CORE_LEVEL
);
626 if (CPUMASK_TESTNZERO(mask
)) {
629 for (cpu
= 0; cpu
< ncpus
; ++cpu
) {
630 struct acpi_cst_softc
*sc1
= acpi_cst_softc
[cpu
];
632 if (sc1
== NULL
|| sc1
== sc
||
633 (sc1
->cst_flags
& ACPI_CST_FLAG_ATTACHED
) == 0 ||
634 (sc1
->cst_flags
& ACPI_CST_FLAG_MATCH_HT
) == 0)
636 if (!CPUMASK_TESTBIT(mask
, sc1
->cst_cpuid
))
639 if (sc1
->cst_cx_count
!= sc
->cst_cx_count
) {
640 struct acpi_cst_softc
*src_sc
, *dst_sc
;
643 device_printf(sc
->cst_dev
,
644 "inconstent C-state count: %d, %s has %d\n",
646 device_get_nameunit(sc1
->cst_dev
),
649 if (sc1
->cst_cx_count
> sc
->cst_cx_count
) {
656 acpi_cst_copy(dst_sc
, src_sc
);
663 /* If there are C3(+) states, always enable bus master wakeup */
664 if ((acpi_cst_quirks
& ACPI_CST_QUIRK_NO_BM
) == 0) {
665 for (i
= 0; i
< sc
->cst_cx_count
; ++i
) {
666 struct acpi_cst_cx
*cx
= &sc
->cst_cx_states
[i
];
668 if (cx
->type
>= ACPI_STATE_C3
) {
669 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD
, 1);
675 /* Fix up the lowest Cx being used */
676 acpi_cst_set_lowest_oncpu(sc
, sc
->cst_cx_lowest_req
);
680 * Cache the lowest non-C3 state.
681 * NOTE: must after cst_cx_lowest is set.
686 sc
->cst_flags
&= ~ACPI_CST_FLAG_PROBING
;
692 acpi_cst_cx_reprobe_cst_handler(struct cpuhelper_msg
*msg
)
696 error
= acpi_cst_cx_probe_cst(msg
->ch_cbarg
, 1);
697 cpuhelper_replymsg(msg
, error
);
701 acpi_cst_cx_reprobe_cst(struct acpi_cst_softc
*sc
)
703 struct cpuhelper_msg msg
;
705 cpuhelper_initmsg(&msg
, &curthread
->td_msgport
,
706 acpi_cst_cx_reprobe_cst_handler
, sc
, MSGF_PRIORITY
);
707 return (cpuhelper_domsg(&msg
, sc
->cst_cpuid
));
711 * Call this *after* all CPUs Cx states have been attached.
714 acpi_cst_postattach(void *arg
)
716 struct acpi_cst_softc
*sc
;
719 /* Get set of Cx state devices */
720 devclass_get_devices(acpi_cst_devclass
, &acpi_cst_devices
,
724 * Setup any quirks that might necessary now that we have probed
725 * all the CPUs' Cx states.
727 acpi_cst_set_quirks();
729 if (acpi_cst_use_fadt
) {
731 * We are using Cx mode from FADT, probe for available Cx states
732 * for all processors.
734 for (i
= 0; i
< acpi_cst_ndevices
; i
++) {
735 sc
= device_get_softc(acpi_cst_devices
[i
]);
736 acpi_cst_cx_probe_fadt(sc
);
740 * We are using _CST mode, remove C3 state if necessary.
742 * As we now know for sure that we will be using _CST mode
743 * install our notify handler.
745 for (i
= 0; i
< acpi_cst_ndevices
; i
++) {
746 sc
= device_get_softc(acpi_cst_devices
[i
]);
747 if (acpi_cst_quirks
& ACPI_CST_QUIRK_NO_C3
) {
748 /* Free part of unused resources */
749 acpi_cst_free_resource(sc
, sc
->cst_non_c3
+ 1);
750 sc
->cst_cx_count
= sc
->cst_non_c3
+ 1;
752 sc
->cst_parent
->cpu_cst_notify
= acpi_cst_notify
;
755 acpi_cst_global_cx_count();
757 /* Perform Cx final initialization. */
758 for (i
= 0; i
< acpi_cst_ndevices
; i
++) {
759 sc
= device_get_softc(acpi_cst_devices
[i
]);
760 acpi_cst_startup(sc
);
762 if (sc
->cst_parent
->glob_sysctl_tree
!= NULL
) {
763 struct acpi_cpu_softc
*cpu
= sc
->cst_parent
;
765 /* Add a sysctl handler to handle global Cx lowest setting */
766 SYSCTL_ADD_PROC(&cpu
->glob_sysctl_ctx
,
767 SYSCTL_CHILDREN(cpu
->glob_sysctl_tree
),
768 OID_AUTO
, "cx_lowest",
769 CTLTYPE_STRING
| CTLFLAG_RW
, NULL
, 0,
770 acpi_cst_global_lowest_sysctl
, "A",
771 "Requested global lowest Cx sleep state");
772 SYSCTL_ADD_PROC(&cpu
->glob_sysctl_ctx
,
773 SYSCTL_CHILDREN(cpu
->glob_sysctl_tree
),
774 OID_AUTO
, "cx_lowest_use",
775 CTLTYPE_STRING
| CTLFLAG_RD
, NULL
, 0,
776 acpi_cst_global_lowest_use_sysctl
, "A",
777 "Global lowest Cx sleep state to use");
781 /* Take over idling from cpu_idle_default(). */
782 acpi_cst_cx_lowest
= 0;
783 acpi_cst_cx_lowest_req
= 0;
784 acpi_cst_disable_idle
= FALSE
;
787 cpu_idle_hook
= acpi_cst_idle
;
791 acpi_cst_support_list(struct acpi_cst_softc
*sc
)
797 * Set up the list of Cx states
799 sbuf_new(&sb
, sc
->cst_cx_supported
, sizeof(sc
->cst_cx_supported
),
801 for (i
= 0; i
< sc
->cst_cx_count
; i
++)
802 sbuf_printf(&sb
, "C%d/%d ", i
+ 1, sc
->cst_cx_states
[i
].trans_lat
);
808 acpi_cst_c3_bm_rld_handler(struct cpuhelper_msg
*msg
)
811 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD
, 1);
812 cpuhelper_replymsg(msg
, 0);
816 acpi_cst_c3_bm_rld(struct acpi_cst_softc
*sc
)
818 struct cpuhelper_msg msg
;
820 cpuhelper_initmsg(&msg
, &curthread
->td_msgport
,
821 acpi_cst_c3_bm_rld_handler
, sc
, MSGF_PRIORITY
);
822 cpuhelper_domsg(&msg
, sc
->cst_cpuid
);
826 acpi_cst_startup(struct acpi_cst_softc
*sc
)
828 struct acpi_cpu_softc
*cpu
= sc
->cst_parent
;
829 int i
, bm_rld_done
= 0;
831 for (i
= 0; i
< sc
->cst_cx_count
; ++i
) {
832 struct acpi_cst_cx
*cx
= &sc
->cst_cx_states
[i
];
835 /* If there are C3(+) states, always enable bus master wakeup */
836 if (cx
->type
>= ACPI_STATE_C3
&& !bm_rld_done
&&
837 (acpi_cst_quirks
& ACPI_CST_QUIRK_NO_BM
) == 0) {
838 acpi_cst_c3_bm_rld(sc
);
842 /* Redo the Cx setup, since quirks have been changed */
843 error
= acpi_cst_cx_setup(cx
);
845 panic("C%d startup setup failed: %d", i
+ 1, error
);
848 acpi_cst_support_list(sc
);
850 SYSCTL_ADD_STRING(&cpu
->pcpu_sysctl_ctx
,
851 SYSCTL_CHILDREN(cpu
->pcpu_sysctl_tree
),
852 OID_AUTO
, "cx_supported", CTLFLAG_RD
,
853 sc
->cst_cx_supported
, 0,
854 "Cx/microsecond values for supported Cx states");
855 SYSCTL_ADD_PROC(&cpu
->pcpu_sysctl_ctx
,
856 SYSCTL_CHILDREN(cpu
->pcpu_sysctl_tree
),
857 OID_AUTO
, "cx_lowest", CTLTYPE_STRING
| CTLFLAG_RW
,
858 (void *)sc
, 0, acpi_cst_lowest_sysctl
, "A",
859 "requested lowest Cx sleep state");
860 SYSCTL_ADD_PROC(&cpu
->pcpu_sysctl_ctx
,
861 SYSCTL_CHILDREN(cpu
->pcpu_sysctl_tree
),
862 OID_AUTO
, "cx_lowest_use", CTLTYPE_STRING
| CTLFLAG_RD
,
863 (void *)sc
, 0, acpi_cst_lowest_use_sysctl
, "A",
864 "lowest Cx sleep state to use");
865 SYSCTL_ADD_PROC(&cpu
->pcpu_sysctl_ctx
,
866 SYSCTL_CHILDREN(cpu
->pcpu_sysctl_tree
),
867 OID_AUTO
, "cx_usage", CTLTYPE_STRING
| CTLFLAG_RD
,
868 (void *)sc
, 0, acpi_cst_usage_sysctl
, "A",
869 "percent usage for each Cx state");
872 /* Signal platform that we can handle _CST notification. */
873 if (!acpi_cst_use_fadt
&& acpi_cst_ctrl
!= 0) {
875 AcpiOsWritePort(acpi_cst_smi_cmd
, acpi_cst_ctrl
, 8);
882 * Idle the CPU in the lowest state possible. This function is called with
883 * interrupts disabled. Note that once it re-enables interrupts, a task
884 * switch can occur so do not access shared data (i.e. the softc) after
885 * interrupts are re-enabled.
890 struct acpi_cst_softc
*sc
;
891 struct acpi_cst_cx
*cx_next
;
892 union microtime_pcpu start
, end
;
893 int cx_next_idx
, i
, tdiff
, bm_arb_disabled
= 0;
895 /* If disabled, return immediately. */
896 if (acpi_cst_disable_idle
) {
902 * Look up our CPU id to get our softc. If it's NULL, we'll use C1
903 * since there is no Cx state for this processor.
905 sc
= acpi_cst_softc
[mdcpu
->mi
.gd_cpuid
];
911 /* Still probing; use C1 */
912 if (sc
->cst_flags
& ACPI_CST_FLAG_PROBING
) {
917 /* Find the lowest state that has small enough latency. */
919 for (i
= sc
->cst_cx_lowest
; i
>= 0; i
--) {
920 if (sc
->cst_cx_states
[i
].trans_lat
* 3 <= sc
->cst_prev_sleep
) {
927 * Check for bus master activity if needed for the selected state.
928 * If there was activity, clear the bit and use the lowest non-C3 state.
930 cx_next
= &sc
->cst_cx_states
[cx_next_idx
];
931 if (cx_next
->flags
& ACPI_CST_CX_FLAG_BM_STS
) {
934 AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS
, &bm_active
);
935 if (bm_active
!= 0) {
936 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS
, 1);
937 cx_next_idx
= sc
->cst_non_c3
;
941 /* Select the next state and update statistics. */
942 cx_next
= &sc
->cst_cx_states
[cx_next_idx
];
943 sc
->cst_cx_stats
[cx_next_idx
]++;
944 KASSERT(cx_next
->type
!= ACPI_STATE_C0
, ("C0 sleep"));
947 * Execute HLT (or equivalent) and wait for an interrupt. We can't
948 * calculate the time spent in C1 since the place we wake up is an
949 * ISR. Assume we slept half of quantum and return.
951 if (cx_next
->type
== ACPI_STATE_C1
) {
952 sc
->cst_prev_sleep
= (sc
->cst_prev_sleep
* 3 + 500000 / hz
) / 4;
953 cx_next
->enter(cx_next
);
957 /* Execute the proper preamble before enter the selected state. */
958 if (cx_next
->preamble
== ACPI_CST_CX_PREAMBLE_BM_ARB
) {
959 AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE
, 1);
961 } else if (cx_next
->preamble
== ACPI_CST_CX_PREAMBLE_WBINVD
) {
962 ACPI_FLUSH_CPU_CACHE();
966 * Enter the selected state and check time spent asleep.
968 microtime_pcpu_get(&start
);
971 cx_next
->enter(cx_next
);
974 microtime_pcpu_get(&end
);
976 /* Enable bus master arbitration, if it was disabled. */
978 AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE
, 0);
982 /* Find the actual time asleep in microseconds. */
983 tdiff
= microtime_pcpu_diff(&start
, &end
);
984 sc
->cst_prev_sleep
= (sc
->cst_prev_sleep
* 3 + tdiff
) / 4;
988 * Re-evaluate the _CST object when we are notified that it changed.
991 acpi_cst_notify(device_t dev
)
993 struct acpi_cst_softc
*sc
= device_get_softc(dev
);
995 cpuhelper_assert(mycpuid
, false);
997 lwkt_serialize_enter(&acpi_cst_slize
);
999 /* Update the list of Cx states. */
1000 acpi_cst_cx_reprobe_cst(sc
);
1001 acpi_cst_support_list(sc
);
1003 /* Update the new lowest useable Cx state for all CPUs. */
1004 acpi_cst_global_cx_count();
1007 * Fix up the lowest Cx being used
1009 if (acpi_cst_cx_lowest_req
< acpi_cst_cx_count
)
1010 acpi_cst_cx_lowest
= acpi_cst_cx_lowest_req
;
1011 if (acpi_cst_cx_lowest
> acpi_cst_cx_count
- 1)
1012 acpi_cst_cx_lowest
= acpi_cst_cx_count
- 1;
1014 lwkt_serialize_exit(&acpi_cst_slize
);
1018 acpi_cst_set_quirks(void)
1023 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__
);
1026 * Bus mastering arbitration control is needed to keep caches coherent
1027 * while sleeping in C3. If it's not present but a working flush cache
1028 * instruction is present, flush the caches before entering C3 instead.
1029 * Otherwise, just disable C3 completely.
1031 if (AcpiGbl_FADT
.Pm2ControlBlock
== 0 ||
1032 AcpiGbl_FADT
.Pm2ControlLength
== 0) {
1033 if ((AcpiGbl_FADT
.Flags
& ACPI_FADT_WBINVD
) &&
1034 (AcpiGbl_FADT
.Flags
& ACPI_FADT_WBINVD_FLUSH
) == 0) {
1035 acpi_cst_quirks
|= ACPI_CST_QUIRK_NO_BM
;
1036 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
1037 "cpu_cst: no BM control, using flush cache method\n"));
1039 acpi_cst_quirks
|= ACPI_CST_QUIRK_NO_C3
;
1040 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
1041 "cpu_cst: no BM control, C3 not available\n"));
1045 /* Look for various quirks of the PIIX4 part. */
1046 acpi_dev
= pci_find_device(PCI_VENDOR_INTEL
, PCI_DEVICE_82371AB_3
);
1047 if (acpi_dev
!= NULL
) {
1048 switch (pci_get_revid(acpi_dev
)) {
1050 * Disable C3 support for all PIIX4 chipsets. Some of these parts
1051 * do not report the BMIDE status to the BM status register and
1052 * others have a livelock bug if Type-F DMA is enabled. Linux
1053 * works around the BMIDE bug by reading the BM status directly
1054 * but we take the simpler approach of disabling C3 for these
1057 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
1058 * Livelock") from the January 2002 PIIX4 specification update.
1059 * Applies to all PIIX4 models.
1061 * Also, make sure that all interrupts cause a "Stop Break"
1062 * event to exit from C2 state.
1063 * Also, BRLD_EN_BM (ACPI_BITREG_BUS_MASTER_RLD in ACPI-speak)
1064 * should be set to zero, otherwise it causes C2 to short-sleep.
1065 * PIIX4 doesn't properly support C3 and bus master activity
1066 * need not break out of C2.
1068 case PCI_REVISION_A_STEP
:
1069 case PCI_REVISION_B_STEP
:
1070 case PCI_REVISION_4E
:
1071 case PCI_REVISION_4M
:
1072 acpi_cst_quirks
|= ACPI_CST_QUIRK_NO_C3
;
1073 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
1074 "cpu_cst: working around PIIX4 bug, disabling C3\n"));
1076 val
= pci_read_config(acpi_dev
, PIIX4_DEVACTB_REG
, 4);
1077 if ((val
& PIIX4_STOP_BREAK_MASK
) != PIIX4_STOP_BREAK_MASK
) {
1078 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
1079 "cpu_cst: PIIX4: enabling IRQs to generate Stop Break\n"));
1080 val
|= PIIX4_STOP_BREAK_MASK
;
1081 pci_write_config(acpi_dev
, PIIX4_DEVACTB_REG
, val
, 4);
1083 AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_RLD
, &val
);
1085 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
1086 "cpu_cst: PIIX4: reset BRLD_EN_BM\n"));
1087 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD
, 0);
1099 acpi_cst_usage_sysctl(SYSCTL_HANDLER_ARGS
)
1101 struct acpi_cst_softc
*sc
;
1105 uintmax_t fract
, sum
, whole
;
1107 sc
= (struct acpi_cst_softc
*) arg1
;
1109 for (i
= 0; i
< sc
->cst_cx_count
; i
++)
1110 sum
+= sc
->cst_cx_stats
[i
];
1111 sbuf_new(&sb
, buf
, sizeof(buf
), SBUF_FIXEDLEN
);
1112 for (i
= 0; i
< sc
->cst_cx_count
; i
++) {
1114 whole
= (uintmax_t)sc
->cst_cx_stats
[i
] * 100;
1115 fract
= (whole
% sum
) * 100;
1116 sbuf_printf(&sb
, "%u.%02u%% ", (u_int
)(whole
/ sum
),
1117 (u_int
)(fract
/ sum
));
1119 sbuf_printf(&sb
, "0.00%% ");
1121 sbuf_printf(&sb
, "last %dus", sc
->cst_prev_sleep
);
1124 sysctl_handle_string(oidp
, sbuf_data(&sb
), sbuf_len(&sb
), req
);
1131 acpi_cst_set_lowest_oncpu(struct acpi_cst_softc
*sc
, int val
)
1133 int old_lowest
, error
= 0, old_lowest_req
;
1134 uint32_t old_type
, type
;
1136 KKASSERT(mycpuid
== sc
->cst_cpuid
);
1138 old_lowest_req
= sc
->cst_cx_lowest_req
;
1139 sc
->cst_cx_lowest_req
= val
;
1141 if (val
> sc
->cst_cx_count
- 1)
1142 val
= sc
->cst_cx_count
- 1;
1143 old_lowest
= atomic_swap_int(&sc
->cst_cx_lowest
, val
);
1145 old_type
= sc
->cst_cx_states
[old_lowest
].type
;
1146 type
= sc
->cst_cx_states
[val
].type
;
1147 if (old_type
>= ACPI_STATE_C3
&& type
< ACPI_STATE_C3
) {
1148 cputimer_intr_powersave_remreq();
1149 } else if (type
>= ACPI_STATE_C3
&& old_type
< ACPI_STATE_C3
) {
1150 error
= cputimer_intr_powersave_addreq();
1153 sc
->cst_cx_lowest_req
= old_lowest_req
;
1154 sc
->cst_cx_lowest
= old_lowest
;
1161 /* Cache the new lowest non-C3 state. */
1162 acpi_cst_non_c3(sc
);
1164 /* Reset the statistics counters. */
1165 bzero(sc
->cst_cx_stats
, sizeof(sc
->cst_cx_stats
));
1170 acpi_cst_set_lowest_handler(struct cpuhelper_msg
*msg
)
1174 error
= acpi_cst_set_lowest_oncpu(msg
->ch_cbarg
, msg
->ch_cbarg1
);
1175 cpuhelper_replymsg(msg
, error
);
1179 acpi_cst_set_lowest(struct acpi_cst_softc
*sc
, int val
)
1181 struct cpuhelper_msg msg
;
1183 cpuhelper_initmsg(&msg
, &curthread
->td_msgport
,
1184 acpi_cst_set_lowest_handler
, sc
, MSGF_PRIORITY
);
1185 msg
.ch_cbarg1
= val
;
1187 return (cpuhelper_domsg(&msg
, sc
->cst_cpuid
));
1191 acpi_cst_lowest_sysctl(SYSCTL_HANDLER_ARGS
)
1193 struct acpi_cst_softc
*sc
;
1197 sc
= (struct acpi_cst_softc
*)arg1
;
1198 ksnprintf(state
, sizeof(state
), "C%d", sc
->cst_cx_lowest_req
+ 1);
1199 error
= sysctl_handle_string(oidp
, state
, sizeof(state
), req
);
1200 if (error
!= 0 || req
->newptr
== NULL
)
1202 if (strlen(state
) < 2 || toupper(state
[0]) != 'C')
1204 val
= (int) strtol(state
+ 1, NULL
, 10) - 1;
1208 lwkt_serialize_enter(&acpi_cst_slize
);
1209 error
= acpi_cst_set_lowest(sc
, val
);
1210 lwkt_serialize_exit(&acpi_cst_slize
);
1216 acpi_cst_lowest_use_sysctl(SYSCTL_HANDLER_ARGS
)
1218 struct acpi_cst_softc
*sc
;
1221 sc
= (struct acpi_cst_softc
*)arg1
;
1222 ksnprintf(state
, sizeof(state
), "C%d", sc
->cst_cx_lowest
+ 1);
1223 return sysctl_handle_string(oidp
, state
, sizeof(state
), req
);
1227 acpi_cst_global_lowest_sysctl(SYSCTL_HANDLER_ARGS
)
1229 struct acpi_cst_softc
*sc
;
1233 ksnprintf(state
, sizeof(state
), "C%d", acpi_cst_cx_lowest_req
+ 1);
1234 error
= sysctl_handle_string(oidp
, state
, sizeof(state
), req
);
1235 if (error
!= 0 || req
->newptr
== NULL
)
1237 if (strlen(state
) < 2 || toupper(state
[0]) != 'C')
1239 val
= (int) strtol(state
+ 1, NULL
, 10) - 1;
1243 lwkt_serialize_enter(&acpi_cst_slize
);
1245 acpi_cst_cx_lowest_req
= val
;
1246 acpi_cst_cx_lowest
= val
;
1247 if (acpi_cst_cx_lowest
> acpi_cst_cx_count
- 1)
1248 acpi_cst_cx_lowest
= acpi_cst_cx_count
- 1;
1250 /* Update the new lowest useable Cx state for all CPUs. */
1251 for (i
= 0; i
< acpi_cst_ndevices
; i
++) {
1252 sc
= device_get_softc(acpi_cst_devices
[i
]);
1253 error
= acpi_cst_set_lowest(sc
, val
);
1260 lwkt_serialize_exit(&acpi_cst_slize
);
1266 acpi_cst_global_lowest_use_sysctl(SYSCTL_HANDLER_ARGS
)
1270 ksnprintf(state
, sizeof(state
), "C%d", acpi_cst_cx_lowest
+ 1);
1271 return sysctl_handle_string(oidp
, state
, sizeof(state
), req
);
1275 * Put the CPU in C1 in a machine-dependant way.
1276 * XXX: shouldn't be here!
1279 acpi_cst_c1_halt(void)
1285 acpi_cst_non_c3(struct acpi_cst_softc
*sc
)
1290 for (i
= sc
->cst_cx_lowest
; i
>= 0; i
--) {
1291 if (sc
->cst_cx_states
[i
].type
< ACPI_STATE_C3
) {
1297 device_printf(sc
->cst_dev
, "non-C3 %d\n", sc
->cst_non_c3
);
1301 * Update the largest Cx state supported in the global acpi_cst_cx_count.
1302 * It will be used in the global Cx sysctl handler.
1305 acpi_cst_global_cx_count(void)
1307 struct acpi_cst_softc
*sc
;
1310 if (acpi_cst_ndevices
== 0) {
1311 acpi_cst_cx_count
= 0;
1315 sc
= device_get_softc(acpi_cst_devices
[0]);
1316 acpi_cst_cx_count
= sc
->cst_cx_count
;
1318 for (i
= 1; i
< acpi_cst_ndevices
; i
++) {
1319 struct acpi_cst_softc
*sc
= device_get_softc(acpi_cst_devices
[i
]);
1321 if (sc
->cst_cx_count
< acpi_cst_cx_count
)
1322 acpi_cst_cx_count
= sc
->cst_cx_count
;
1325 kprintf("cpu_cst: global Cx count %d\n", acpi_cst_cx_count
);
1329 acpi_cst_c1_halt_enter(const struct acpi_cst_cx
*cx __unused
)
1335 acpi_cst_cx_io_enter(const struct acpi_cst_cx
*cx
)
1340 * Read I/O to enter this Cx state
1342 bus_space_read_1(cx
->btag
, cx
->bhand
, 0);
1344 * Perform a dummy I/O read. Since it may take an arbitrary time
1345 * to enter the idle state, this read makes sure that we are frozen.
1347 AcpiRead(&dummy
, &AcpiGbl_FADT
.XPmTimerBlock
);
1351 acpi_cst_cx_setup(struct acpi_cst_cx
*cx
)
1353 cx
->flags
&= ~ACPI_CST_CX_FLAG_BM_STS
;
1354 cx
->preamble
= ACPI_CST_CX_PREAMBLE_NONE
;
1356 if (cx
->type
>= ACPI_STATE_C3
) {
1358 * Set the required operations for entering C3(+) state.
1359 * Later acpi_cst_md_cx_setup() may fix them up.
1363 * Always check BM_STS.
1365 if ((acpi_cst_quirks
& ACPI_CST_QUIRK_NO_BM
) == 0)
1366 cx
->flags
|= ACPI_CST_CX_FLAG_BM_STS
;
1369 * According to the ACPI specification, bus master arbitration
1370 * is only available on UP system. For MP system, cache flushing
1373 if (ncpus
== 1 && (acpi_cst_quirks
& ACPI_CST_QUIRK_NO_BM
) == 0)
1374 cx
->preamble
= ACPI_CST_CX_PREAMBLE_BM_ARB
;
1376 cx
->preamble
= ACPI_CST_CX_PREAMBLE_WBINVD
;
1378 return acpi_cst_md_cx_setup(cx
);
1382 acpi_cst_free_resource(struct acpi_cst_softc
*sc
, int start
)
1386 for (i
= start
; i
< MAX_CX_STATES
; ++i
) {
1387 struct acpi_cst_cx
*cx
= &sc
->cst_cx_states
[i
];
1389 if (cx
->res
!= NULL
)
1390 bus_release_resource(sc
->cst_dev
, cx
->res_type
, cx
->rid
, cx
->res
);
1391 memset(cx
, 0, sizeof(*cx
));