soc: Remove copyright notices
[coreboot.git] / src / soc / intel / broadwell / me.c
blob4b786c81d92f26b8aaa6a509a156d35ef00d5d41
1 /*
2 * This file is part of the coreboot project.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
16 * This is a ramstage driver for the Intel Management Engine found in the
17 * southbridge. It handles the required boot-time messages over the
18 * MMIO-based Management Engine Interface to tell the ME that the BIOS is
19 * finished with POST. Additional messages are defined for debug but are
20 * not used unless the console loglevel is high enough.
23 #include <arch/acpi.h>
24 #include <device/mmio.h>
25 #include <device/pci_ops.h>
26 #include <console/console.h>
27 #include <device/device.h>
28 #include <device/pci.h>
29 #include <device/pci_ids.h>
30 #include <device/pci_def.h>
31 #include <stdlib.h>
32 #include <string.h>
33 #include <delay.h>
34 #include <elog.h>
35 #include <soc/me.h>
36 #include <soc/lpc.h>
37 #include <soc/pch.h>
38 #include <soc/pci_devs.h>
39 #include <soc/ramstage.h>
40 #include <soc/rcba.h>
41 #include <soc/intel/broadwell/chip.h>
43 #if CONFIG(CHROMEOS)
44 #include <vendorcode/google/chromeos/chromeos.h>
45 #include <vendorcode/google/chromeos/gnvs.h>
46 #endif
48 /* Path that the BIOS should take based on ME state */
49 static const char *me_bios_path_values[] = {
50 [ME_NORMAL_BIOS_PATH] = "Normal",
51 [ME_S3WAKE_BIOS_PATH] = "S3 Wake",
52 [ME_ERROR_BIOS_PATH] = "Error",
53 [ME_RECOVERY_BIOS_PATH] = "Recovery",
54 [ME_DISABLE_BIOS_PATH] = "Disable",
55 [ME_FIRMWARE_UPDATE_BIOS_PATH] = "Firmware Update",
58 /* MMIO base address for MEI interface */
59 static u8 *mei_base_address;
61 static void mei_dump(void *ptr, int dword, int offset, const char *type)
63 struct mei_csr *csr;
65 if (!CONFIG(DEBUG_INTEL_ME))
66 return;
68 printk(BIOS_SPEW, "%-9s[%02x] : ", type, offset);
70 switch (offset) {
71 case MEI_H_CSR:
72 case MEI_ME_CSR_HA:
73 csr = ptr;
74 if (!csr) {
75 printk(BIOS_SPEW, "ERROR: 0x%08x\n", dword);
76 break;
78 printk(BIOS_SPEW, "cbd=%u cbrp=%02u cbwp=%02u ready=%u "
79 "reset=%u ig=%u is=%u ie=%u\n", csr->buffer_depth,
80 csr->buffer_read_ptr, csr->buffer_write_ptr,
81 csr->ready, csr->reset, csr->interrupt_generate,
82 csr->interrupt_status, csr->interrupt_enable);
83 break;
84 case MEI_ME_CB_RW:
85 case MEI_H_CB_WW:
86 printk(BIOS_SPEW, "CB: 0x%08x\n", dword);
87 break;
88 default:
89 printk(BIOS_SPEW, "0x%08x\n", offset);
90 break;
95 * ME/MEI access helpers using memcpy to avoid aliasing.
98 static inline void mei_read_dword_ptr(void *ptr, int offset)
100 u32 dword = read32(mei_base_address + offset);
101 memcpy(ptr, &dword, sizeof(dword));
102 mei_dump(ptr, dword, offset, "READ");
105 static inline void mei_write_dword_ptr(void *ptr, int offset)
107 u32 dword = 0;
108 memcpy(&dword, ptr, sizeof(dword));
109 write32(mei_base_address + offset, dword);
110 mei_dump(ptr, dword, offset, "WRITE");
113 static inline void pci_read_dword_ptr(struct device *dev, void *ptr, int offset)
115 u32 dword = pci_read_config32(dev, offset);
116 memcpy(ptr, &dword, sizeof(dword));
117 mei_dump(ptr, dword, offset, "PCI READ");
120 static inline void read_host_csr(struct mei_csr *csr)
122 mei_read_dword_ptr(csr, MEI_H_CSR);
125 static inline void write_host_csr(struct mei_csr *csr)
127 mei_write_dword_ptr(csr, MEI_H_CSR);
130 static inline void read_me_csr(struct mei_csr *csr)
132 mei_read_dword_ptr(csr, MEI_ME_CSR_HA);
135 static inline void write_cb(u32 dword)
137 write32(mei_base_address + MEI_H_CB_WW, dword);
138 mei_dump(NULL, dword, MEI_H_CB_WW, "WRITE");
141 static inline u32 read_cb(void)
143 u32 dword = read32(mei_base_address + MEI_ME_CB_RW);
144 mei_dump(NULL, dword, MEI_ME_CB_RW, "READ");
145 return dword;
148 /* Wait for ME ready bit to be asserted */
149 static int mei_wait_for_me_ready(void)
151 struct mei_csr me;
152 unsigned int try = ME_RETRY;
154 while (try--) {
155 read_me_csr(&me);
156 if (me.ready)
157 return 0;
158 udelay(ME_DELAY);
161 printk(BIOS_ERR, "ME: failed to become ready\n");
162 return -1;
165 static void mei_reset(void)
167 struct mei_csr host;
169 if (mei_wait_for_me_ready() < 0)
170 return;
172 /* Reset host and ME circular buffers for next message */
173 read_host_csr(&host);
174 host.reset = 1;
175 host.interrupt_generate = 1;
176 write_host_csr(&host);
178 if (mei_wait_for_me_ready() < 0)
179 return;
181 /* Re-init and indicate host is ready */
182 read_host_csr(&host);
183 host.interrupt_generate = 1;
184 host.ready = 1;
185 host.reset = 0;
186 write_host_csr(&host);
189 static int mei_send_packet(struct mei_header *mei, void *req_data)
191 struct mei_csr host;
192 unsigned int ndata, n;
193 u32 *data;
195 /* Number of dwords to write */
196 ndata = mei->length >> 2;
198 /* Pad non-dword aligned request message length */
199 if (mei->length & 3)
200 ndata++;
201 if (!ndata) {
202 printk(BIOS_DEBUG, "ME: request has no data\n");
203 return -1;
205 ndata++; /* Add MEI header */
208 * Make sure there is still room left in the circular buffer.
209 * Reset the buffer pointers if the requested message will not fit.
211 read_host_csr(&host);
212 if ((host.buffer_depth - host.buffer_write_ptr) < ndata) {
213 printk(BIOS_ERR, "ME: circular buffer full, resetting...\n");
214 mei_reset();
215 read_host_csr(&host);
218 /* Ensure the requested length will fit in the circular buffer. */
219 if ((host.buffer_depth - host.buffer_write_ptr) < ndata) {
220 printk(BIOS_ERR, "ME: message (%u) too large for buffer (%u)\n",
221 ndata + 2, host.buffer_depth);
222 return -1;
225 /* Write MEI header */
226 mei_write_dword_ptr(mei, MEI_H_CB_WW);
227 ndata--;
229 /* Write message data */
230 data = req_data;
231 for (n = 0; n < ndata; ++n)
232 write_cb(*data++);
234 /* Generate interrupt to the ME */
235 read_host_csr(&host);
236 host.interrupt_generate = 1;
237 write_host_csr(&host);
239 /* Make sure ME is ready after sending request data */
240 return mei_wait_for_me_ready();
243 static int mei_send_data(u8 me_address, u8 host_address,
244 void *req_data, int req_bytes)
246 struct mei_header header = {
247 .client_address = me_address,
248 .host_address = host_address,
250 struct mei_csr host;
251 int current = 0;
252 u8 *req_ptr = req_data;
254 while (!header.is_complete) {
255 int remain = req_bytes - current;
256 int buf_len;
258 read_host_csr(&host);
259 buf_len = host.buffer_depth - host.buffer_write_ptr;
261 if (buf_len > remain) {
262 /* Send all remaining data as final message */
263 header.length = req_bytes - current;
264 header.is_complete = 1;
265 } else {
266 /* Send as much data as the buffer can hold */
267 header.length = buf_len;
270 mei_send_packet(&header, req_ptr);
272 req_ptr += header.length;
273 current += header.length;
276 return 0;
279 static int mei_send_header(u8 me_address, u8 host_address,
280 void *header, int header_len, int complete)
282 struct mei_header mei = {
283 .client_address = me_address,
284 .host_address = host_address,
285 .length = header_len,
286 .is_complete = complete,
288 return mei_send_packet(&mei, header);
291 static int mei_recv_msg(void *header, int header_bytes,
292 void *rsp_data, int rsp_bytes)
294 struct mei_header mei_rsp;
295 struct mei_csr me, host;
296 unsigned int ndata, n;
297 unsigned int expected;
298 u32 *data;
300 /* Total number of dwords to read from circular buffer */
301 expected = (rsp_bytes + sizeof(mei_rsp) + header_bytes) >> 2;
302 if (rsp_bytes & 3)
303 expected++;
305 if (mei_wait_for_me_ready() < 0)
306 return -1;
309 * The interrupt status bit does not appear to indicate that the
310 * message has actually been received. Instead we wait until the
311 * expected number of dwords are present in the circular buffer.
313 for (n = ME_RETRY; n; --n) {
314 read_me_csr(&me);
315 if ((me.buffer_write_ptr - me.buffer_read_ptr) >= expected)
316 break;
317 udelay(ME_DELAY);
319 if (!n) {
320 printk(BIOS_ERR, "ME: timeout waiting for data: expected "
321 "%u, available %u\n", expected,
322 me.buffer_write_ptr - me.buffer_read_ptr);
323 return -1;
326 /* Read and verify MEI response header from the ME */
327 mei_read_dword_ptr(&mei_rsp, MEI_ME_CB_RW);
328 if (!mei_rsp.is_complete) {
329 printk(BIOS_ERR, "ME: response is not complete\n");
330 return -1;
333 /* Handle non-dword responses and expect at least the header */
334 ndata = mei_rsp.length >> 2;
335 if (mei_rsp.length & 3)
336 ndata++;
337 if (ndata != (expected - 1)) {
338 printk(BIOS_ERR, "ME: response is missing data %d != %d\n",
339 ndata, (expected - 1));
340 return -1;
343 /* Read response header from the ME */
344 data = header;
345 for (n = 0; n < (header_bytes >> 2); ++n)
346 *data++ = read_cb();
347 ndata -= header_bytes >> 2;
349 /* Make sure caller passed a buffer with enough space */
350 if (ndata != (rsp_bytes >> 2)) {
351 printk(BIOS_ERR, "ME: not enough room in response buffer: "
352 "%u != %u\n", ndata, rsp_bytes >> 2);
353 return -1;
356 /* Read response data from the circular buffer */
357 data = rsp_data;
358 for (n = 0; n < ndata; ++n)
359 *data++ = read_cb();
361 /* Tell the ME that we have consumed the response */
362 read_host_csr(&host);
363 host.interrupt_status = 1;
364 host.interrupt_generate = 1;
365 write_host_csr(&host);
367 return mei_wait_for_me_ready();
370 static inline int mei_sendrecv_mkhi(struct mkhi_header *mkhi,
371 void *req_data, int req_bytes,
372 void *rsp_data, int rsp_bytes)
374 struct mkhi_header mkhi_rsp;
376 /* Send header */
377 if (mei_send_header(MEI_ADDRESS_MKHI, MEI_HOST_ADDRESS,
378 mkhi, sizeof(*mkhi), req_bytes ? 0 : 1) < 0)
379 return -1;
381 /* Send data if available */
382 if (req_bytes && mei_send_data(MEI_ADDRESS_MKHI, MEI_HOST_ADDRESS,
383 req_data, req_bytes) < 0)
384 return -1;
386 /* Return now if no response expected */
387 if (!rsp_bytes)
388 return 0;
390 /* Read header and data */
391 if (mei_recv_msg(&mkhi_rsp, sizeof(mkhi_rsp),
392 rsp_data, rsp_bytes) < 0)
393 return -1;
395 if (!mkhi_rsp.is_response ||
396 mkhi->group_id != mkhi_rsp.group_id ||
397 mkhi->command != mkhi_rsp.command) {
398 printk(BIOS_ERR, "ME: invalid response, group %u ?= %u,"
399 "command %u ?= %u, is_response %u\n", mkhi->group_id,
400 mkhi_rsp.group_id, mkhi->command, mkhi_rsp.command,
401 mkhi_rsp.is_response);
402 return -1;
405 return 0;
408 static inline int mei_sendrecv_icc(struct icc_header *icc,
409 void *req_data, int req_bytes,
410 void *rsp_data, int rsp_bytes)
412 struct icc_header icc_rsp;
414 /* Send header */
415 if (mei_send_header(MEI_ADDRESS_ICC, MEI_HOST_ADDRESS,
416 icc, sizeof(*icc), req_bytes ? 0 : 1) < 0)
417 return -1;
419 /* Send data if available */
420 if (req_bytes && mei_send_data(MEI_ADDRESS_ICC, MEI_HOST_ADDRESS,
421 req_data, req_bytes) < 0)
422 return -1;
424 /* Read header and data, if needed */
425 if (rsp_bytes && mei_recv_msg(&icc_rsp, sizeof(icc_rsp),
426 rsp_data, rsp_bytes) < 0)
427 return -1;
429 return 0;
433 * mbp give up routine. This path is taken if hfs.mpb_rdy is 0 or the read
434 * state machine on the BIOS end doesn't match the ME's state machine.
436 static void intel_me_mbp_give_up(struct device *dev)
438 struct mei_csr csr;
440 pci_write_config32(dev, PCI_ME_H_GS2, PCI_ME_MBP_GIVE_UP);
442 read_host_csr(&csr);
443 csr.reset = 1;
444 csr.interrupt_generate = 1;
445 write_host_csr(&csr);
449 * mbp clear routine. This will wait for the ME to indicate that
450 * the MBP has been read and cleared.
452 static void intel_me_mbp_clear(struct device *dev)
454 int count;
455 struct me_hfs2 hfs2;
457 /* Wait for the mbp_cleared indicator */
458 for (count = ME_RETRY; count > 0; --count) {
459 pci_read_dword_ptr(dev, &hfs2, PCI_ME_HFS2);
460 if (hfs2.mbp_cleared)
461 break;
462 udelay(ME_DELAY);
465 if (count == 0) {
466 printk(BIOS_WARNING, "ME: Timeout waiting for mbp_cleared\n");
467 intel_me_mbp_give_up(dev);
468 } else {
469 printk(BIOS_INFO, "ME: MBP cleared\n");
473 static void me_print_fw_version(mbp_fw_version_name *vers_name)
475 if (!vers_name) {
476 printk(BIOS_ERR, "ME: mbp missing version report\n");
477 return;
480 printk(BIOS_DEBUG, "ME: found version %d.%d.%d.%d\n",
481 vers_name->major_version, vers_name->minor_version,
482 vers_name->hotfix_version, vers_name->build_version);
485 static inline void print_cap(const char *name, int state)
487 printk(BIOS_DEBUG, "ME Capability: %-41s : %sabled\n",
488 name, state ? " en" : "dis");
491 /* Get ME Firmware Capabilities */
492 static int mkhi_get_fwcaps(mbp_mefwcaps *cap)
494 u32 rule_id = 0;
495 struct me_fwcaps cap_msg;
496 struct mkhi_header mkhi = {
497 .group_id = MKHI_GROUP_ID_FWCAPS,
498 .command = MKHI_FWCAPS_GET_RULE,
501 /* Send request and wait for response */
502 if (mei_sendrecv_mkhi(&mkhi, &rule_id, sizeof(u32),
503 &cap_msg, sizeof(cap_msg)) < 0) {
504 printk(BIOS_ERR, "ME: GET FWCAPS message failed\n");
505 return -1;
507 *cap = cap_msg.caps_sku;
508 return 0;
511 /* Get ME Firmware Capabilities */
512 static void me_print_fwcaps(mbp_mefwcaps *cap)
514 mbp_mefwcaps local_caps;
515 if (!cap) {
516 cap = &local_caps;
517 printk(BIOS_ERR, "ME: mbp missing fwcaps report\n");
518 if (mkhi_get_fwcaps(cap))
519 return;
522 print_cap("Full Network manageability", cap->full_net);
523 print_cap("Regular Network manageability", cap->std_net);
524 print_cap("Manageability", cap->manageability);
525 print_cap("IntelR Anti-Theft (AT)", cap->intel_at);
526 print_cap("IntelR Capability Licensing Service (CLS)", cap->intel_cls);
527 print_cap("IntelR Power Sharing Technology (MPC)", cap->intel_mpc);
528 print_cap("ICC Over Clocking", cap->icc_over_clocking);
529 print_cap("Protected Audio Video Path (PAVP)", cap->pavp);
530 print_cap("IPV6", cap->ipv6);
531 print_cap("KVM Remote Control (KVM)", cap->kvm);
532 print_cap("Outbreak Containment Heuristic (OCH)", cap->och);
533 print_cap("Virtual LAN (VLAN)", cap->vlan);
534 print_cap("TLS", cap->tls);
535 print_cap("Wireless LAN (WLAN)", cap->wlan);
538 /* Send END OF POST message to the ME */
539 static int mkhi_end_of_post(void)
541 struct mkhi_header mkhi = {
542 .group_id = MKHI_GROUP_ID_GEN,
543 .command = MKHI_END_OF_POST,
545 u32 eop_ack;
547 /* Send request and wait for response */
548 if (mei_sendrecv_mkhi(&mkhi, NULL, 0, &eop_ack, sizeof(eop_ack)) < 0) {
549 printk(BIOS_ERR, "ME: END OF POST message failed\n");
550 return -1;
553 printk(BIOS_INFO, "ME: END OF POST message successful (%d)\n", eop_ack);
554 return 0;
557 /* Send END OF POST message to the ME */
558 static int mkhi_end_of_post_noack(void)
560 struct mkhi_header mkhi = {
561 .group_id = MKHI_GROUP_ID_GEN,
562 .command = MKHI_END_OF_POST_NOACK,
565 /* Send request, do not wait for response */
566 if (mei_sendrecv_mkhi(&mkhi, NULL, 0, NULL, 0) < 0) {
567 printk(BIOS_ERR, "ME: END OF POST NOACK message failed\n");
568 return -1;
571 printk(BIOS_INFO, "ME: END OF POST NOACK message successful\n");
572 return 0;
575 /* Send HMRFPO LOCK message to the ME */
576 static int mkhi_hmrfpo_lock(void)
578 struct mkhi_header mkhi = {
579 .group_id = MKHI_GROUP_ID_HMRFPO,
580 .command = MKHI_HMRFPO_LOCK,
582 u32 ack;
584 /* Send request and wait for response */
585 if (mei_sendrecv_mkhi(&mkhi, NULL, 0, &ack, sizeof(ack)) < 0) {
586 printk(BIOS_ERR, "ME: HMRFPO LOCK message failed\n");
587 return -1;
590 printk(BIOS_INFO, "ME: HMRFPO LOCK message successful (%d)\n", ack);
591 return 0;
594 /* Send HMRFPO LOCK message to the ME, do not wait for response */
595 static int mkhi_hmrfpo_lock_noack(void)
597 struct mkhi_header mkhi = {
598 .group_id = MKHI_GROUP_ID_HMRFPO,
599 .command = MKHI_HMRFPO_LOCK_NOACK,
602 /* Send request, do not wait for response */
603 if (mei_sendrecv_mkhi(&mkhi, NULL, 0, NULL, 0) < 0) {
604 printk(BIOS_ERR, "ME: HMRFPO LOCK NOACK message failed\n");
605 return -1;
608 printk(BIOS_INFO, "ME: HMRFPO LOCK NOACK message successful\n");
609 return 0;
612 static void intel_me_finalize(struct device *dev)
614 u32 reg32;
616 /* S3 path will have hidden this device already */
617 if (!mei_base_address || mei_base_address == (u8 *) 0xfffffff0)
618 return;
620 /* Make sure IO is disabled */
621 reg32 = pci_read_config32(dev, PCI_COMMAND);
622 reg32 &= ~(PCI_COMMAND_MASTER |
623 PCI_COMMAND_MEMORY | PCI_COMMAND_IO);
624 pci_write_config32(dev, PCI_COMMAND, reg32);
626 /* Hide the PCI device */
627 RCBA32_OR(FD2, PCH_DISABLE_MEI1);
628 RCBA32(FD2);
631 static int me_icc_set_clock_enables(u32 mask)
633 struct icc_clock_enables_msg clk = {
634 .clock_enables = 0, /* Turn off specified clocks */
635 .clock_mask = mask,
636 .no_response = 1, /* Do not expect response */
638 struct icc_header icc = {
639 .api_version = ICC_API_VERSION_LYNXPOINT,
640 .icc_command = ICC_SET_CLOCK_ENABLES,
641 .length = sizeof(clk),
644 /* Send request and wait for response */
645 if (mei_sendrecv_icc(&icc, &clk, sizeof(clk), NULL, 0) < 0) {
646 printk(BIOS_ERR, "ME: ICC SET CLOCK ENABLES message failed\n");
647 return -1;
649 printk(BIOS_INFO, "ME: ICC SET CLOCK ENABLES 0x%08x\n", mask);
650 return 0;
653 /* Determine the path that we should take based on ME status */
654 static me_bios_path intel_me_path(struct device *dev)
656 me_bios_path path = ME_DISABLE_BIOS_PATH;
657 struct me_hfs hfs;
658 struct me_hfs2 hfs2;
660 /* Check and dump status */
661 intel_me_status();
663 pci_read_dword_ptr(dev, &hfs, PCI_ME_HFS);
664 pci_read_dword_ptr(dev, &hfs2, PCI_ME_HFS2);
666 /* Check Current Working State */
667 switch (hfs.working_state) {
668 case ME_HFS_CWS_NORMAL:
669 path = ME_NORMAL_BIOS_PATH;
670 break;
671 case ME_HFS_CWS_REC:
672 path = ME_RECOVERY_BIOS_PATH;
673 break;
674 default:
675 path = ME_DISABLE_BIOS_PATH;
676 break;
679 /* Check Current Operation Mode */
680 switch (hfs.operation_mode) {
681 case ME_HFS_MODE_NORMAL:
682 break;
683 case ME_HFS_MODE_DEBUG:
684 case ME_HFS_MODE_DIS:
685 case ME_HFS_MODE_OVER_JMPR:
686 case ME_HFS_MODE_OVER_MEI:
687 default:
688 path = ME_DISABLE_BIOS_PATH;
689 break;
692 /* Check for any error code and valid firmware and MBP */
693 if (hfs.error_code || hfs.fpt_bad)
694 path = ME_ERROR_BIOS_PATH;
696 /* Check if the MBP is ready */
697 if (!hfs2.mbp_rdy) {
698 printk(BIOS_CRIT, "%s: mbp is not ready!\n",
699 __func__);
700 path = ME_ERROR_BIOS_PATH;
703 if (CONFIG(ELOG) && path != ME_NORMAL_BIOS_PATH) {
704 struct elog_event_data_me_extended data = {
705 .current_working_state = hfs.working_state,
706 .operation_state = hfs.operation_state,
707 .operation_mode = hfs.operation_mode,
708 .error_code = hfs.error_code,
709 .progress_code = hfs2.progress_code,
710 .current_pmevent = hfs2.current_pmevent,
711 .current_state = hfs2.current_state,
713 elog_add_event_byte(ELOG_TYPE_MANAGEMENT_ENGINE, path);
714 elog_add_event_raw(ELOG_TYPE_MANAGEMENT_ENGINE_EXT,
715 &data, sizeof(data));
718 return path;
721 /* Prepare ME for MEI messages */
722 static int intel_mei_setup(struct device *dev)
724 struct resource *res;
725 struct mei_csr host;
726 u32 reg32;
728 /* Find the MMIO base for the ME interface */
729 res = find_resource(dev, PCI_BASE_ADDRESS_0);
730 if (!res || res->base == 0 || res->size == 0) {
731 printk(BIOS_DEBUG, "ME: MEI resource not present!\n");
732 return -1;
734 mei_base_address = res2mmio(res, 0, 0);
736 /* Ensure Memory and Bus Master bits are set */
737 reg32 = pci_read_config32(dev, PCI_COMMAND);
738 reg32 |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
739 pci_write_config32(dev, PCI_COMMAND, reg32);
741 /* Clean up status for next message */
742 read_host_csr(&host);
743 host.interrupt_generate = 1;
744 host.ready = 1;
745 host.reset = 0;
746 write_host_csr(&host);
748 return 0;
751 /* Read the Extend register hash of ME firmware */
752 static int intel_me_extend_valid(struct device *dev)
754 struct me_heres status;
755 u32 extend[8] = {0};
756 int i, count = 0;
758 pci_read_dword_ptr(dev, &status, PCI_ME_HERES);
759 if (!status.extend_feature_present) {
760 printk(BIOS_ERR, "ME: Extend Feature not present\n");
761 return -1;
764 if (!status.extend_reg_valid) {
765 printk(BIOS_ERR, "ME: Extend Register not valid\n");
766 return -1;
769 switch (status.extend_reg_algorithm) {
770 case PCI_ME_EXT_SHA1:
771 count = 5;
772 printk(BIOS_DEBUG, "ME: Extend SHA-1: ");
773 break;
774 case PCI_ME_EXT_SHA256:
775 count = 8;
776 printk(BIOS_DEBUG, "ME: Extend SHA-256: ");
777 break;
778 default:
779 printk(BIOS_ERR, "ME: Extend Algorithm %d unknown\n",
780 status.extend_reg_algorithm);
781 return -1;
784 for (i = 0; i < count; ++i) {
785 extend[i] = pci_read_config32(dev, PCI_ME_HER(i));
786 printk(BIOS_DEBUG, "%08x", extend[i]);
788 printk(BIOS_DEBUG, "\n");
790 #if CONFIG(CHROMEOS)
791 /* Save hash in NVS for the OS to verify */
792 chromeos_set_me_hash(extend, count);
793 #endif
795 return 0;
798 static void intel_me_print_mbp(me_bios_payload *mbp_data)
800 me_print_fw_version(mbp_data->fw_version_name);
802 if (CONFIG(DEBUG_INTEL_ME))
803 me_print_fwcaps(mbp_data->fw_capabilities);
805 if (mbp_data->plat_time) {
806 printk(BIOS_DEBUG, "ME: Wake Event to ME Reset: %u ms\n",
807 mbp_data->plat_time->wake_event_mrst_time_ms);
808 printk(BIOS_DEBUG, "ME: ME Reset to Platform Reset: %u ms\n",
809 mbp_data->plat_time->mrst_pltrst_time_ms);
810 printk(BIOS_DEBUG, "ME: Platform Reset to CPU Reset: %u ms\n",
811 mbp_data->plat_time->pltrst_cpurst_time_ms);
815 static u32 me_to_host_words_pending(void)
817 struct mei_csr me;
818 read_me_csr(&me);
819 if (!me.ready)
820 return 0;
821 return (me.buffer_write_ptr - me.buffer_read_ptr) &
822 (me.buffer_depth - 1);
825 struct mbp_payload {
826 mbp_header header;
827 u32 data[0];
831 * Read and print ME MBP data
833 * Return -1 to indicate a problem (give up)
834 * Return 0 to indicate success (send LOCK+EOP)
835 * Return 1 to indicate success (send LOCK+EOP with NOACK)
837 static int intel_me_read_mbp(me_bios_payload *mbp_data, struct device *dev)
839 mbp_header mbp_hdr;
840 u32 me2host_pending;
841 struct mei_csr host;
842 struct me_hfs2 hfs2;
843 struct mbp_payload *mbp;
844 int i;
845 int ret = 0;
847 pci_read_dword_ptr(dev, &hfs2, PCI_ME_HFS2);
849 if (!hfs2.mbp_rdy) {
850 printk(BIOS_ERR, "ME: MBP not ready\n");
851 intel_me_mbp_give_up(dev);
852 return -1;
855 me2host_pending = me_to_host_words_pending();
856 if (!me2host_pending) {
857 printk(BIOS_ERR, "ME: no mbp data!\n");
858 intel_me_mbp_give_up(dev);
859 return -1;
862 /* we know for sure that at least the header is there */
863 mei_read_dword_ptr(&mbp_hdr, MEI_ME_CB_RW);
865 if ((mbp_hdr.num_entries > (mbp_hdr.mbp_size / 2)) ||
866 (me2host_pending < mbp_hdr.mbp_size)) {
867 printk(BIOS_ERR, "ME: mbp of %d entries, total size %d words"
868 " buffer contains %d words\n",
869 mbp_hdr.num_entries, mbp_hdr.mbp_size,
870 me2host_pending);
871 intel_me_mbp_give_up(dev);
872 return -1;
874 mbp = malloc(mbp_hdr.mbp_size * sizeof(u32));
875 if (!mbp) {
876 intel_me_mbp_give_up(dev);
877 return -1;
880 mbp->header = mbp_hdr;
881 me2host_pending--;
883 i = 0;
884 while (i != me2host_pending) {
885 mei_read_dword_ptr(&mbp->data[i], MEI_ME_CB_RW);
886 i++;
889 read_host_csr(&host);
891 /* Check that read and write pointers are equal. */
892 if (host.buffer_read_ptr != host.buffer_write_ptr) {
893 printk(BIOS_INFO, "ME: MBP Read/Write pointer mismatch\n");
894 printk(BIOS_INFO, "ME: MBP Waiting for MBP cleared flag\n");
896 /* Tell ME that the host has finished reading the MBP. */
897 host.interrupt_generate = 1;
898 host.reset = 0;
899 write_host_csr(&host);
901 /* Wait for the mbp_cleared indicator. */
902 intel_me_mbp_clear(dev);
903 } else {
904 /* Indicate NOACK messages should be used. */
905 ret = 1;
908 /* Dump out the MBP contents. */
909 if (CONFIG(DEBUG_INTEL_ME)) {
910 printk(BIOS_INFO, "ME MBP: Header: items: %d, size dw: %d\n",
911 mbp->header.num_entries, mbp->header.mbp_size);
912 for (i = 0; i < mbp->header.mbp_size - 1; i++)
913 printk(BIOS_INFO, "ME MBP: %04x: 0x%08x\n", i, mbp->data[i]);
916 #define ASSIGN_FIELD_PTR(field_, val_) \
918 mbp_data->field_ = (typeof(mbp_data->field_))(void *)val_; \
919 break; \
922 /* Setup the pointers in the me_bios_payload structure. */
923 for (i = 0; i < mbp->header.mbp_size - 1;) {
924 mbp_item_header *item = (void *)&mbp->data[i];
926 switch (MBP_MAKE_IDENT(item->app_id, item->item_id)) {
927 case MBP_IDENT(KERNEL, FW_VER):
928 ASSIGN_FIELD_PTR(fw_version_name, &mbp->data[i+1]);
930 case MBP_IDENT(ICC, PROFILE):
931 ASSIGN_FIELD_PTR(icc_profile, &mbp->data[i+1]);
933 case MBP_IDENT(INTEL_AT, STATE):
934 ASSIGN_FIELD_PTR(at_state, &mbp->data[i+1]);
936 case MBP_IDENT(KERNEL, FW_CAP):
937 ASSIGN_FIELD_PTR(fw_capabilities, &mbp->data[i+1]);
939 case MBP_IDENT(KERNEL, ROM_BIST):
940 ASSIGN_FIELD_PTR(rom_bist_data, &mbp->data[i+1]);
942 case MBP_IDENT(KERNEL, PLAT_KEY):
943 ASSIGN_FIELD_PTR(platform_key, &mbp->data[i+1]);
945 case MBP_IDENT(KERNEL, FW_TYPE):
946 ASSIGN_FIELD_PTR(fw_plat_type, &mbp->data[i+1]);
948 case MBP_IDENT(KERNEL, MFS_FAILURE):
949 ASSIGN_FIELD_PTR(mfsintegrity, &mbp->data[i+1]);
951 case MBP_IDENT(KERNEL, PLAT_TIME):
952 ASSIGN_FIELD_PTR(plat_time, &mbp->data[i+1]);
954 case MBP_IDENT(NFC, SUPPORT_DATA):
955 ASSIGN_FIELD_PTR(nfc_data, &mbp->data[i+1]);
957 i += item->length;
959 #undef ASSIGN_FIELD_PTR
961 free(mbp);
962 return ret;
965 /* Check whether ME is present and do basic init */
966 static void intel_me_init(struct device *dev)
968 config_t *config = config_of(dev);
969 me_bios_path path = intel_me_path(dev);
970 me_bios_payload mbp_data;
971 int mbp_ret;
972 struct me_hfs hfs;
973 struct mei_csr csr;
975 /* Do initial setup and determine the BIOS path */
976 printk(BIOS_NOTICE, "ME: BIOS path: %s\n", me_bios_path_values[path]);
978 if (path == ME_NORMAL_BIOS_PATH) {
979 /* Validate the extend register */
980 intel_me_extend_valid(dev);
983 memset(&mbp_data, 0, sizeof(mbp_data));
986 * According to the ME9 BWG, BIOS is required to fetch MBP data in
987 * all boot flows except S3 Resume.
990 /* Prepare MEI MMIO interface */
991 if (intel_mei_setup(dev) < 0)
992 return;
994 /* Read ME MBP data */
995 mbp_ret = intel_me_read_mbp(&mbp_data, dev);
996 if (mbp_ret < 0)
997 return;
998 intel_me_print_mbp(&mbp_data);
1000 /* Set clock enables according to devicetree */
1001 if (config->icc_clock_disable)
1002 me_icc_set_clock_enables(config->icc_clock_disable);
1004 /* Make sure ME is in a mode that expects EOP */
1005 pci_read_dword_ptr(dev, &hfs, PCI_ME_HFS);
1007 /* Abort and leave device alone if not normal mode */
1008 if (hfs.fpt_bad ||
1009 hfs.working_state != ME_HFS_CWS_NORMAL ||
1010 hfs.operation_mode != ME_HFS_MODE_NORMAL)
1011 return;
1013 if (mbp_ret) {
1015 * MBP Cleared wait is skipped,
1016 * Do not expect ACK and reset when complete.
1019 /* Send HMRFPO Lock command, no response */
1020 mkhi_hmrfpo_lock_noack();
1022 /* Send END OF POST command, no response */
1023 mkhi_end_of_post_noack();
1025 /* Assert reset and interrupt */
1026 read_host_csr(&csr);
1027 csr.interrupt_generate = 1;
1028 csr.reset = 1;
1029 write_host_csr(&csr);
1030 } else {
1032 * MBP Cleared wait was not skipped
1035 /* Send HMRFPO LOCK command */
1036 mkhi_hmrfpo_lock();
1038 /* Send EOP command so ME stops accepting other commands */
1039 mkhi_end_of_post();
1043 static void intel_me_enable(struct device *dev)
1045 /* Avoid talking to the device in S3 path */
1046 if (acpi_is_wakeup_s3()) {
1047 dev->enabled = 0;
1048 pch_disable_devfn(dev);
1052 static struct device_operations device_ops = {
1053 .read_resources = &pci_dev_read_resources,
1054 .set_resources = &pci_dev_set_resources,
1055 .enable_resources = &pci_dev_enable_resources,
1056 .enable = &intel_me_enable,
1057 .init = &intel_me_init,
1058 .final = &intel_me_finalize,
1059 .ops_pci = &broadwell_pci_ops,
1062 static const unsigned short pci_device_ids[] = {
1063 0x9c3a, /* Low Power */
1064 0x9cba, /* WildcatPoint */
1068 static const struct pci_driver intel_me __pci_driver = {
1069 .ops = &device_ops,
1070 .vendor = PCI_VENDOR_ID_INTEL,
1071 .devices = pci_device_ids,