soc/intel: Rename to soc_fill_gnvs()
[coreboot.git] / src / soc / intel / skylake / acpi.c
blob2406dc5ca52cb3f6edafa249153b42d0c5f47cb8
1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <acpi/acpi.h>
4 #include <acpi/acpi_gnvs.h>
5 #include <acpi/acpigen.h>
6 #include <arch/cpu.h>
7 #include <arch/ioapic.h>
8 #include <arch/smp/mpspec.h>
9 #include <cbmem.h>
10 #include <console/console.h>
11 #include <cpu/x86/smm.h>
12 #include <cpu/x86/msr.h>
13 #include <cpu/intel/common/common.h>
14 #include <cpu/intel/turbo.h>
15 #include <intelblocks/cpulib.h>
16 #include <intelblocks/lpc_lib.h>
17 #include <intelblocks/sgx.h>
18 #include <intelblocks/uart.h>
19 #include <intelblocks/systemagent.h>
20 #include <soc/intel/common/acpi.h>
21 #include <soc/acpi.h>
22 #include <soc/cpu.h>
23 #include <soc/iomap.h>
24 #include <soc/msr.h>
25 #include <soc/nvs.h>
26 #include <soc/pci_devs.h>
27 #include <soc/pm.h>
28 #include <soc/ramstage.h>
29 #include <soc/systemagent.h>
30 #include <string.h>
31 #include <types.h>
32 #include <wrdd.h>
33 #include <device/pci_ops.h>
35 #include "chip.h"
37 #define CPUID_6_EAX_ISST (1 << 7)
40 * List of suported C-states in this processor.
42 enum {
43 C_STATE_C0, /* 0 */
44 C_STATE_C1, /* 1 */
45 C_STATE_C1E, /* 2 */
46 C_STATE_C3, /* 3 */
47 C_STATE_C6_SHORT_LAT, /* 4 */
48 C_STATE_C6_LONG_LAT, /* 5 */
49 C_STATE_C7_SHORT_LAT, /* 6 */
50 C_STATE_C7_LONG_LAT, /* 7 */
51 C_STATE_C7S_SHORT_LAT, /* 8 */
52 C_STATE_C7S_LONG_LAT, /* 9 */
53 C_STATE_C8, /* 10 */
54 C_STATE_C9, /* 11 */
55 C_STATE_C10, /* 12 */
56 NUM_C_STATES
58 #define MWAIT_RES(state, sub_state) \
59 { \
60 .addrl = (((state) << 4) | (sub_state)), \
61 .space_id = ACPI_ADDRESS_SPACE_FIXED, \
62 .bit_width = ACPI_FFIXEDHW_VENDOR_INTEL, \
63 .bit_offset = ACPI_FFIXEDHW_CLASS_MWAIT, \
64 .access_size = ACPI_FFIXEDHW_FLAG_HW_COORD, \
67 static acpi_cstate_t cstate_map[NUM_C_STATES] = {
68 [C_STATE_C0] = { },
69 [C_STATE_C1] = {
70 .latency = 0,
71 .power = C1_POWER,
72 .resource = MWAIT_RES(0, 0),
74 [C_STATE_C1E] = {
75 .latency = 0,
76 .power = C1_POWER,
77 .resource = MWAIT_RES(0, 1),
79 [C_STATE_C3] = {
80 .latency = C_STATE_LATENCY_FROM_LAT_REG(0),
81 .power = C3_POWER,
82 .resource = MWAIT_RES(1, 0),
84 [C_STATE_C6_SHORT_LAT] = {
85 .latency = C_STATE_LATENCY_FROM_LAT_REG(1),
86 .power = C6_POWER,
87 .resource = MWAIT_RES(2, 0),
89 [C_STATE_C6_LONG_LAT] = {
90 .latency = C_STATE_LATENCY_FROM_LAT_REG(2),
91 .power = C6_POWER,
92 .resource = MWAIT_RES(2, 1),
94 [C_STATE_C7_SHORT_LAT] = {
95 .latency = C_STATE_LATENCY_FROM_LAT_REG(1),
96 .power = C7_POWER,
97 .resource = MWAIT_RES(3, 0),
99 [C_STATE_C7_LONG_LAT] = {
100 .latency = C_STATE_LATENCY_FROM_LAT_REG(2),
101 .power = C7_POWER,
102 .resource = MWAIT_RES(3, 1),
104 [C_STATE_C7S_SHORT_LAT] = {
105 .latency = C_STATE_LATENCY_FROM_LAT_REG(1),
106 .power = C7_POWER,
107 .resource = MWAIT_RES(3, 2),
109 [C_STATE_C7S_LONG_LAT] = {
110 .latency = C_STATE_LATENCY_FROM_LAT_REG(2),
111 .power = C7_POWER,
112 .resource = MWAIT_RES(3, 3),
114 [C_STATE_C8] = {
115 .latency = C_STATE_LATENCY_FROM_LAT_REG(3),
116 .power = C8_POWER,
117 .resource = MWAIT_RES(4, 0),
119 [C_STATE_C9] = {
120 .latency = C_STATE_LATENCY_FROM_LAT_REG(4),
121 .power = C9_POWER,
122 .resource = MWAIT_RES(5, 0),
124 [C_STATE_C10] = {
125 .latency = C_STATE_LATENCY_FROM_LAT_REG(5),
126 .power = C10_POWER,
127 .resource = MWAIT_RES(6, 0),
131 static int cstate_set_s0ix[] = {
132 C_STATE_C1E,
133 C_STATE_C7S_LONG_LAT,
134 C_STATE_C10
137 static int cstate_set_non_s0ix[] = {
138 C_STATE_C1E,
139 C_STATE_C3,
140 C_STATE_C7S_LONG_LAT,
143 static int get_cores_per_package(void)
145 struct cpuinfo_x86 c;
146 struct cpuid_result result;
147 int cores = 1;
149 get_fms(&c, cpuid_eax(1));
150 if (c.x86 != 6)
151 return 1;
153 result = cpuid_ext(0xb, 1);
154 cores = result.ebx & 0xff;
156 return cores;
159 void soc_fill_gnvs(struct global_nvs *gnvs)
161 const struct soc_intel_skylake_config *config = config_of_soc();
163 /* Set unknown wake source */
164 gnvs->pm1i = -1;
166 /* CPU core count */
167 gnvs->pcnt = dev_count_cpu();
169 /* Enable DPTF based on mainboard configuration */
170 gnvs->dpte = config->dptf_enable;
172 /* Fill in the Wifi Region id */
173 gnvs->cid1 = wifi_regulatory_domain();
175 /* Set USB2/USB3 wake enable bitmaps. */
176 gnvs->u2we = config->usb2_wake_enable_bitmap;
177 gnvs->u3we = config->usb3_wake_enable_bitmap;
179 if (CONFIG(SOC_INTEL_COMMON_BLOCK_SGX_ENABLE))
180 sgx_fill_gnvs(gnvs);
182 /* Fill in Above 4GB MMIO resource */
183 sa_fill_gnvs(gnvs);
186 unsigned long acpi_fill_mcfg(unsigned long current)
188 current += acpi_create_mcfg_mmconfig((acpi_mcfg_mmconfig_t *)current,
189 CONFIG_MMCONF_BASE_ADDRESS, 0, 0,
190 (CONFIG_SA_PCIEX_LENGTH >> 20) - 1);
191 return current;
194 unsigned long acpi_fill_madt(unsigned long current)
196 /* Local APICs */
197 current = acpi_create_madt_lapics(current);
199 /* IOAPIC */
200 current += acpi_create_madt_ioapic((acpi_madt_ioapic_t *) current,
201 2, IO_APIC_ADDR, 0);
203 return acpi_madt_irq_overrides(current);
206 static void write_c_state_entries(acpi_cstate_t *map, const int *set, size_t max_c_state)
208 for (size_t i = 0; i < max_c_state; i++) {
209 memcpy(&map[i], &cstate_map[set[i]], sizeof(acpi_cstate_t));
210 map[i].ctype = i + 1;
213 /* Generate C-state tables */
214 acpigen_write_CST_package(map, max_c_state);
217 static void generate_c_state_entries(int s0ix_enable)
219 if (s0ix_enable) {
220 acpi_cstate_t map[ARRAY_SIZE(cstate_set_s0ix)];
221 write_c_state_entries(map, cstate_set_s0ix, ARRAY_SIZE(map));
222 } else {
223 acpi_cstate_t map[ARRAY_SIZE(cstate_set_non_s0ix)];
224 write_c_state_entries(map, cstate_set_non_s0ix, ARRAY_SIZE(map));
228 static int calculate_power(int tdp, int p1_ratio, int ratio)
230 u32 m;
231 u32 power;
234 * M = ((1.1 - ((p1_ratio - ratio) * 0.00625)) / 1.1) ^ 2
236 * Power = (ratio / p1_ratio) * m * tdp
239 m = (110000 - ((p1_ratio - ratio) * 625)) / 11;
240 m = (m * m) / 1000;
242 power = ((ratio * 100000 / p1_ratio) / 100);
243 power *= (m / 100) * (tdp / 1000);
244 power /= 1000;
246 return (int)power;
249 static void generate_p_state_entries(int core, int cores_per_package)
251 int ratio_min, ratio_max, ratio_turbo, ratio_step;
252 int coord_type, power_max, power_unit, num_entries;
253 int ratio, power, clock, clock_max;
254 msr_t msr;
256 /* Determine P-state coordination type from MISC_PWR_MGMT[0] */
257 msr = rdmsr(MSR_MISC_PWR_MGMT);
258 if (msr.lo & MISC_PWR_MGMT_EIST_HW_DIS)
259 coord_type = SW_ANY;
260 else
261 coord_type = HW_ALL;
263 /* Get bus ratio limits and calculate clock speeds */
264 msr = rdmsr(MSR_PLATFORM_INFO);
265 ratio_min = (msr.hi >> (40-32)) & 0xff; /* Max Efficiency Ratio */
267 /* Determine if this CPU has configurable TDP */
268 if (cpu_config_tdp_levels()) {
269 /* Set max ratio to nominal TDP ratio */
270 msr = rdmsr(MSR_CONFIG_TDP_NOMINAL);
271 ratio_max = msr.lo & 0xff;
272 } else {
273 /* Max Non-Turbo Ratio */
274 ratio_max = (msr.lo >> 8) & 0xff;
276 clock_max = ratio_max * CONFIG_CPU_BCLK_MHZ;
278 /* Calculate CPU TDP in mW */
279 msr = rdmsr(MSR_PKG_POWER_SKU_UNIT);
280 power_unit = 2 << ((msr.lo & 0xf) - 1);
281 msr = rdmsr(MSR_PKG_POWER_SKU);
282 power_max = ((msr.lo & 0x7fff) / power_unit) * 1000;
284 /* Write _PCT indicating use of FFixedHW */
285 acpigen_write_empty_PCT();
287 /* Write _PPC with no limit on supported P-state */
288 acpigen_write_PPC_NVS();
290 /* Write PSD indicating configured coordination type */
291 acpigen_write_PSD_package(core, 1, coord_type);
293 /* Add P-state entries in _PSS table */
294 acpigen_write_name("_PSS");
296 /* Determine ratio points */
297 ratio_step = PSS_RATIO_STEP;
298 num_entries = ((ratio_max - ratio_min) / ratio_step) + 1;
299 if (num_entries > PSS_MAX_ENTRIES) {
300 ratio_step += 1;
301 num_entries = ((ratio_max - ratio_min) / ratio_step) + 1;
304 /* P[T] is Turbo state if enabled */
305 if (get_turbo_state() == TURBO_ENABLED) {
306 /* _PSS package count including Turbo */
307 acpigen_write_package(num_entries + 2);
309 msr = rdmsr(MSR_TURBO_RATIO_LIMIT);
310 ratio_turbo = msr.lo & 0xff;
312 /* Add entry for Turbo ratio */
313 acpigen_write_PSS_package(
314 clock_max + 1, /* MHz */
315 power_max, /* mW */
316 PSS_LATENCY_TRANSITION, /* lat1 */
317 PSS_LATENCY_BUSMASTER, /* lat2 */
318 ratio_turbo << 8, /* control */
319 ratio_turbo << 8); /* status */
320 } else {
321 /* _PSS package count without Turbo */
322 acpigen_write_package(num_entries + 1);
325 /* First regular entry is max non-turbo ratio */
326 acpigen_write_PSS_package(
327 clock_max, /* MHz */
328 power_max, /* mW */
329 PSS_LATENCY_TRANSITION, /* lat1 */
330 PSS_LATENCY_BUSMASTER, /* lat2 */
331 ratio_max << 8, /* control */
332 ratio_max << 8); /* status */
334 /* Generate the remaining entries */
335 for (ratio = ratio_min + ((num_entries - 1) * ratio_step);
336 ratio >= ratio_min; ratio -= ratio_step) {
338 /* Calculate power at this ratio */
339 power = calculate_power(power_max, ratio_max, ratio);
340 clock = ratio * CONFIG_CPU_BCLK_MHZ;
342 acpigen_write_PSS_package(
343 clock, /* MHz */
344 power, /* mW */
345 PSS_LATENCY_TRANSITION, /* lat1 */
346 PSS_LATENCY_BUSMASTER, /* lat2 */
347 ratio << 8, /* control */
348 ratio << 8); /* status */
351 /* Fix package length */
352 acpigen_pop_len();
355 static void generate_cppc_entries(int core_id)
357 /* Generate GCPC table in first logical core */
358 if (core_id == 0) {
359 struct cppc_config cppc_config;
360 cpu_init_cppc_config(&cppc_config, CPPC_VERSION_2);
361 acpigen_write_CPPC_package(&cppc_config);
364 /* Write _CST entry for each logical core */
365 acpigen_write_CPPC_method();
368 void generate_cpu_entries(const struct device *device)
370 int core_id, cpu_id, pcontrol_blk = ACPI_BASE_ADDRESS, plen = 6;
371 int totalcores = dev_count_cpu();
372 int cores_per_package = get_cores_per_package();
373 int numcpus = totalcores/cores_per_package;
374 config_t *config = config_of_soc();
375 int is_s0ix_enable = config->s0ix_enable;
376 const bool isst_supported = cpuid_eax(6) & CPUID_6_EAX_ISST;
378 printk(BIOS_DEBUG, "Found %d CPU(s) with %d core(s) each.\n",
379 numcpus, cores_per_package);
381 for (cpu_id = 0; cpu_id < numcpus; cpu_id++) {
382 for (core_id = 0; core_id < cores_per_package; core_id++) {
383 if (core_id > 0) {
384 pcontrol_blk = 0;
385 plen = 0;
388 /* Generate processor \_SB.CPUx */
389 acpigen_write_processor(
390 cpu_id*cores_per_package+core_id,
391 pcontrol_blk, plen);
392 /* Generate C-state tables */
393 generate_c_state_entries(is_s0ix_enable);
395 if (config->eist_enable) {
396 /* Generate P-state tables */
397 generate_p_state_entries(core_id,
398 cores_per_package);
401 if (isst_supported)
402 generate_cppc_entries(core_id);
404 acpigen_pop_len();
408 /* PPKG is usually used for thermal management
409 of the first and only package. */
410 acpigen_write_processor_package("PPKG", 0, cores_per_package);
412 /* Add a method to notify processor nodes */
413 acpigen_write_processor_cnot(cores_per_package);
416 static unsigned long acpi_fill_dmar(unsigned long current)
418 struct device *const igfx_dev = pcidev_path_on_root(SA_DEVFN_IGD);
419 const u32 gfx_vtbar = MCHBAR32(GFXVTBAR) & ~0xfff;
420 const bool gfxvten = MCHBAR32(GFXVTBAR) & 1;
422 /* iGFX has to be enabled, GFXVTBAR set and in 32-bit space. */
423 const bool emit_igd =
424 igfx_dev && igfx_dev->enabled &&
425 gfx_vtbar && gfxvten &&
426 !MCHBAR32(GFXVTBAR + 4);
428 /* First, add DRHD entries */
429 if (emit_igd) {
430 const unsigned long tmp = current;
432 current += acpi_create_dmar_drhd(current, 0, 0, gfx_vtbar);
433 current += acpi_create_dmar_ds_pci(current, 0, 2, 0);
435 acpi_dmar_drhd_fixup(tmp, current);
438 const u32 vtvc0bar = MCHBAR32(VTVC0BAR) & ~0xfff;
439 const bool vtvc0en = MCHBAR32(VTVC0BAR) & 1;
441 /* General VTBAR has to be set and in 32-bit space. */
442 if (vtvc0bar && vtvc0en && !MCHBAR32(VTVC0BAR + 4)) {
443 const unsigned long tmp = current;
445 current += acpi_create_dmar_drhd(current, DRHD_INCLUDE_PCI_ALL, 0, vtvc0bar);
447 current += acpi_create_dmar_ds_ioapic(current, 2, V_P2SB_IBDF_BUS,
448 V_P2SB_IBDF_DEV, V_P2SB_IBDF_FUN);
450 current += acpi_create_dmar_ds_msi_hpet(current, 0, V_P2SB_HBDF_BUS,
451 V_P2SB_HBDF_DEV, V_P2SB_HBDF_FUN);
453 acpi_dmar_drhd_fixup(tmp, current);
456 /* Then, add RMRR entries after all DRHD entries */
457 if (emit_igd) {
458 const unsigned long tmp = current;
460 current += acpi_create_dmar_rmrr(current, 0,
461 sa_get_gsm_base(), sa_get_tolud_base() - 1);
462 current += acpi_create_dmar_ds_pci(current, 0, 2, 0);
463 acpi_dmar_rmrr_fixup(tmp, current);
466 return current;
469 unsigned long northbridge_write_acpi_tables(const struct device *const dev,
470 unsigned long current,
471 struct acpi_rsdp *const rsdp)
473 const struct soc_intel_skylake_config *const config = config_of(dev);
474 acpi_dmar_t *const dmar = (acpi_dmar_t *)current;
476 /* Create DMAR table only if we have VT-d capability. */
477 if (config->ignore_vtd || !soc_is_vtd_capable())
478 return current;
480 printk(BIOS_DEBUG, "ACPI: * DMAR\n");
481 acpi_create_dmar(dmar, DMAR_INTR_REMAP, acpi_fill_dmar);
482 current += dmar->header.length;
483 current = acpi_align_current(current);
484 acpi_add_table(rsdp, dmar);
486 return current;
489 unsigned long acpi_madt_irq_overrides(unsigned long current)
491 int sci = acpi_sci_irq();
492 acpi_madt_irqoverride_t *irqovr;
493 uint16_t flags = MP_IRQ_TRIGGER_LEVEL;
495 /* INT_SRC_OVR */
496 irqovr = (void *)current;
497 current += acpi_create_madt_irqoverride(irqovr, 0, 0, 2, 0);
499 if (sci >= 20)
500 flags |= MP_IRQ_POLARITY_LOW;
501 else
502 flags |= MP_IRQ_POLARITY_HIGH;
504 /* SCI */
505 irqovr = (void *)current;
506 current += acpi_create_madt_irqoverride(irqovr, 0, sci, sci, flags);
508 /* NMI */
509 current += acpi_create_madt_lapic_nmi((acpi_madt_lapic_nmi_t *)current, 0xff, 5, 1);
511 return current;
514 unsigned long southbridge_write_acpi_tables(const struct device *device,
515 unsigned long current,
516 struct acpi_rsdp *rsdp)
518 current = acpi_write_dbg2_pci_uart(rsdp, current,
519 uart_get_device(),
520 ACPI_ACCESS_SIZE_DWORD_ACCESS);
521 current = acpi_write_hpet(device, current, rsdp);
522 return acpi_align_current(current);
525 void southbridge_inject_dsdt(const struct device *device)
527 struct global_nvs *gnvs = acpi_get_gnvs();
528 if (!gnvs)
529 return;
531 soc_fill_gnvs(gnvs);
532 acpi_inject_nvsa();
535 /* Save wake source information for calculating ACPI _SWS values */
536 int soc_fill_acpi_wake(uint32_t *pm1, uint32_t **gpe0)
538 const struct soc_intel_skylake_config *config = config_of_soc();
539 struct chipset_power_state *ps;
540 static uint32_t gpe0_sts[GPE0_REG_MAX];
541 uint32_t pm1_en;
542 uint32_t gpe0_std;
543 int i;
544 const int last_index = GPE0_REG_MAX - 1;
546 ps = cbmem_find(CBMEM_ID_POWER_STATE);
547 if (ps == NULL)
548 return -1;
550 pm1_en = ps->pm1_en;
551 gpe0_std = ps->gpe0_en[3];
554 * Chipset state in the suspend well (but not RTC) is lost in Deep S3
555 * so enable Deep S3 wake events that are configured by the mainboard
557 if (ps->prev_sleep_state == ACPI_S3 &&
558 (config->deep_s3_enable_ac || config->deep_s3_enable_dc)) {
559 pm1_en |= PWRBTN_STS; /* Always enabled as wake source */
560 if (config->deep_sx_config & DSX_EN_LAN_WAKE_PIN)
561 gpe0_std |= LAN_WAK_EN;
562 if (config->deep_sx_config & DSX_EN_WAKE_PIN)
563 pm1_en |= PCIEXPWAK_STS;
566 *pm1 = ps->pm1_sts & pm1_en;
568 /* Mask off GPE0 status bits that are not enabled */
569 *gpe0 = &gpe0_sts[0];
570 for (i = 0; i < last_index; i++)
571 gpe0_sts[i] = ps->gpe0_sts[i] & ps->gpe0_en[i];
572 gpe0_sts[last_index] = ps->gpe0_sts[last_index] & gpe0_std;
574 return GPE0_REG_MAX;
577 const char *soc_acpi_name(const struct device *dev)
579 if (dev->path.type == DEVICE_PATH_DOMAIN)
580 return "PCI0";
582 if (dev->path.type == DEVICE_PATH_USB) {
583 switch (dev->path.usb.port_type) {
584 case 0:
585 /* Root Hub */
586 return "RHUB";
587 case 2:
588 /* USB2 ports */
589 switch (dev->path.usb.port_id) {
590 case 0: return "HS01";
591 case 1: return "HS02";
592 case 2: return "HS03";
593 case 3: return "HS04";
594 case 4: return "HS05";
595 case 5: return "HS06";
596 case 6: return "HS07";
597 case 7: return "HS08";
598 case 8: return "HS09";
599 case 9: return "HS10";
601 break;
602 case 3:
603 /* USB3 ports */
604 switch (dev->path.usb.port_id) {
605 case 0: return "SS01";
606 case 1: return "SS02";
607 case 2: return "SS03";
608 case 3: return "SS04";
609 case 4: return "SS05";
610 case 5: return "SS06";
612 break;
614 return NULL;
617 if (dev->path.type != DEVICE_PATH_PCI)
618 return NULL;
620 /* Match functions 0 and 1 for possible GPUs on a secondary bus */
621 if (dev->bus && dev->bus->secondary > 0) {
622 switch (PCI_FUNC(dev->path.pci.devfn)) {
623 case 0: return "DEV0";
624 case 1: return "DEV1";
626 return NULL;
629 switch (dev->path.pci.devfn) {
630 case SA_DEVFN_ROOT: return "MCHC";
631 case SA_DEVFN_PEG0: return "PEGP";
632 case SA_DEVFN_IGD: return "GFX0";
633 case PCH_DEVFN_ISH: return "ISHB";
634 case PCH_DEVFN_XHCI: return "XHCI";
635 case PCH_DEVFN_USBOTG: return "XDCI";
636 case PCH_DEVFN_THERMAL: return "THRM";
637 case PCH_DEVFN_CIO: return "ICIO";
638 case PCH_DEVFN_I2C0: return "I2C0";
639 case PCH_DEVFN_I2C1: return "I2C1";
640 case PCH_DEVFN_I2C2: return "I2C2";
641 case PCH_DEVFN_I2C3: return "I2C3";
642 case PCH_DEVFN_CSE: return "CSE1";
643 case PCH_DEVFN_CSE_2: return "CSE2";
644 case PCH_DEVFN_CSE_IDER: return "CSED";
645 case PCH_DEVFN_CSE_KT: return "CSKT";
646 case PCH_DEVFN_CSE_3: return "CSE3";
647 case PCH_DEVFN_SATA: return "SATA";
648 case PCH_DEVFN_UART2: return "UAR2";
649 case PCH_DEVFN_I2C4: return "I2C4";
650 case PCH_DEVFN_I2C5: return "I2C5";
651 case PCH_DEVFN_PCIE1: return "RP01";
652 case PCH_DEVFN_PCIE2: return "RP02";
653 case PCH_DEVFN_PCIE3: return "RP03";
654 case PCH_DEVFN_PCIE4: return "RP04";
655 case PCH_DEVFN_PCIE5: return "RP05";
656 case PCH_DEVFN_PCIE6: return "RP06";
657 case PCH_DEVFN_PCIE7: return "RP07";
658 case PCH_DEVFN_PCIE8: return "RP08";
659 case PCH_DEVFN_PCIE9: return "RP09";
660 case PCH_DEVFN_PCIE10: return "RP10";
661 case PCH_DEVFN_PCIE11: return "RP11";
662 case PCH_DEVFN_PCIE12: return "RP12";
663 case PCH_DEVFN_PCIE13: return "RP13";
664 case PCH_DEVFN_PCIE14: return "RP14";
665 case PCH_DEVFN_PCIE15: return "RP15";
666 case PCH_DEVFN_PCIE16: return "RP16";
667 case PCH_DEVFN_UART0: return "UAR0";
668 case PCH_DEVFN_UART1: return "UAR1";
669 case PCH_DEVFN_GSPI0: return "SPI0";
670 case PCH_DEVFN_GSPI1: return "SPI1";
671 case PCH_DEVFN_EMMC: return "EMMC";
672 case PCH_DEVFN_SDIO: return "SDIO";
673 case PCH_DEVFN_SDCARD: return "SDXC";
674 case PCH_DEVFN_P2SB: return "P2SB";
675 case PCH_DEVFN_PMC: return "PMC_";
676 case PCH_DEVFN_HDA: return "HDAS";
677 case PCH_DEVFN_SMBUS: return "SBUS";
678 case PCH_DEVFN_SPI: return "FSPI";
679 case PCH_DEVFN_GBE: return "IGBE";
680 case PCH_DEVFN_TRACEHUB:return "THUB";
683 return NULL;
686 static int acpigen_soc_gpio_op(const char *op, unsigned int gpio_num)
688 /* op (gpio_num) */
689 acpigen_emit_namestring(op);
690 acpigen_write_integer(gpio_num);
691 return 0;
694 static int acpigen_soc_get_gpio_state(const char *op, unsigned int gpio_num)
696 /* Store (op (gpio_num), Local0) */
697 acpigen_write_store();
698 acpigen_soc_gpio_op(op, gpio_num);
699 acpigen_emit_byte(LOCAL0_OP);
700 return 0;
703 int acpigen_soc_read_rx_gpio(unsigned int gpio_num)
705 return acpigen_soc_get_gpio_state("\\_SB.PCI0.GRXS", gpio_num);
708 int acpigen_soc_get_tx_gpio(unsigned int gpio_num)
710 return acpigen_soc_get_gpio_state("\\_SB.PCI0.GTXS", gpio_num);
713 int acpigen_soc_set_tx_gpio(unsigned int gpio_num)
715 return acpigen_soc_gpio_op("\\_SB.PCI0.STXS", gpio_num);
718 int acpigen_soc_clear_tx_gpio(unsigned int gpio_num)
720 return acpigen_soc_gpio_op("\\_SB.PCI0.CTXS", gpio_num);