5955 pat_sync is clever enough to check for X86FSET_PAT
[illumos-gate.git] / usr / src / uts / i86pc / os / microcode.c
blobe59bdb8530bdc3dd7fe91c85f015fb4018fd47a6
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
29 #include <sys/asm_linkage.h>
30 #include <sys/bootconf.h>
31 #include <sys/cpuvar.h>
32 #include <sys/cmn_err.h>
33 #include <sys/controlregs.h>
34 #include <sys/debug.h>
35 #include <sys/kobj.h>
36 #include <sys/kobj_impl.h>
37 #include <sys/machsystm.h>
38 #include <sys/ontrap.h>
39 #include <sys/param.h>
40 #include <sys/machparam.h>
41 #include <sys/promif.h>
42 #include <sys/sysmacros.h>
43 #include <sys/systm.h>
44 #include <sys/types.h>
45 #include <sys/thread.h>
46 #include <sys/ucode.h>
47 #include <sys/x86_archext.h>
48 #include <sys/x_call.h>
49 #ifdef __xpv
50 #include <sys/hypervisor.h>
51 #endif
54 * AMD-specific equivalence table
56 static ucode_eqtbl_amd_t *ucode_eqtbl_amd;
59 * mcpu_ucode_info for the boot CPU. Statically allocated.
61 static struct cpu_ucode_info cpu_ucode_info0;
63 static ucode_file_t ucodefile;
65 static void* ucode_zalloc(processorid_t, size_t);
66 static void ucode_free(processorid_t, void *, size_t);
68 static int ucode_capable_amd(cpu_t *);
69 static int ucode_capable_intel(cpu_t *);
71 static ucode_errno_t ucode_extract_amd(ucode_update_t *, uint8_t *, int);
72 static ucode_errno_t ucode_extract_intel(ucode_update_t *, uint8_t *,
73 int);
75 static void ucode_file_reset_amd(ucode_file_t *, processorid_t);
76 static void ucode_file_reset_intel(ucode_file_t *, processorid_t);
78 static uint32_t ucode_load_amd(ucode_file_t *, cpu_ucode_info_t *, cpu_t *);
79 static uint32_t ucode_load_intel(ucode_file_t *, cpu_ucode_info_t *, cpu_t *);
81 #ifdef __xpv
82 static void ucode_load_xpv(ucode_update_t *);
83 static void ucode_chipset_amd(uint8_t *, int);
84 #endif
86 static int ucode_equiv_cpu_amd(cpu_t *, uint16_t *);
88 static ucode_errno_t ucode_locate_amd(cpu_t *, cpu_ucode_info_t *,
89 ucode_file_t *);
90 static ucode_errno_t ucode_locate_intel(cpu_t *, cpu_ucode_info_t *,
91 ucode_file_t *);
93 #ifndef __xpv
94 static ucode_errno_t ucode_match_amd(uint16_t, cpu_ucode_info_t *,
95 ucode_file_amd_t *, int);
96 #endif
97 static ucode_errno_t ucode_match_intel(int, cpu_ucode_info_t *,
98 ucode_header_intel_t *, ucode_ext_table_intel_t *);
100 static void ucode_read_rev_amd(cpu_ucode_info_t *);
101 static void ucode_read_rev_intel(cpu_ucode_info_t *);
103 static const struct ucode_ops ucode_amd = {
104 MSR_AMD_PATCHLOADER,
105 ucode_capable_amd,
106 ucode_file_reset_amd,
107 ucode_read_rev_amd,
108 ucode_load_amd,
109 ucode_validate_amd,
110 ucode_extract_amd,
111 ucode_locate_amd
114 static const struct ucode_ops ucode_intel = {
115 MSR_INTC_UCODE_WRITE,
116 ucode_capable_intel,
117 ucode_file_reset_intel,
118 ucode_read_rev_intel,
119 ucode_load_intel,
120 ucode_validate_intel,
121 ucode_extract_intel,
122 ucode_locate_intel
125 const struct ucode_ops *ucode;
127 static const char ucode_failure_fmt[] =
128 "cpu%d: failed to update microcode from version 0x%x to 0x%x\n";
129 static const char ucode_success_fmt[] =
130 "?cpu%d: microcode has been updated from version 0x%x to 0x%x\n";
133 * Force flag. If set, the first microcode binary that matches
134 * signature and platform id will be used for microcode update,
135 * regardless of version. Should only be used for debugging.
137 int ucode_force_update = 0;
140 * Allocate space for mcpu_ucode_info in the machcpu structure
141 * for all non-boot CPUs.
143 void
144 ucode_alloc_space(cpu_t *cp)
146 ASSERT(cp->cpu_id != 0);
147 ASSERT(cp->cpu_m.mcpu_ucode_info == NULL);
148 cp->cpu_m.mcpu_ucode_info =
149 kmem_zalloc(sizeof (*cp->cpu_m.mcpu_ucode_info), KM_SLEEP);
152 void
153 ucode_free_space(cpu_t *cp)
155 ASSERT(cp->cpu_m.mcpu_ucode_info != NULL);
156 ASSERT(cp->cpu_m.mcpu_ucode_info != &cpu_ucode_info0);
157 kmem_free(cp->cpu_m.mcpu_ucode_info,
158 sizeof (*cp->cpu_m.mcpu_ucode_info));
159 cp->cpu_m.mcpu_ucode_info = NULL;
163 * Called when we are done with microcode update on all processors to free up
164 * space allocated for the microcode file.
166 void
167 ucode_cleanup()
169 if (ucode == NULL)
170 return;
172 ucode->file_reset(&ucodefile, -1);
176 * Allocate/free a buffer used to hold ucode data. Space for the boot CPU is
177 * allocated with BOP_ALLOC() and does not require a free.
179 static void*
180 ucode_zalloc(processorid_t id, size_t size)
182 if (id)
183 return (kmem_zalloc(size, KM_NOSLEEP));
185 /* BOP_ALLOC() failure results in panic */
186 return (BOP_ALLOC(bootops, NULL, size, MMU_PAGESIZE));
189 static void
190 ucode_free(processorid_t id, void* buf, size_t size)
192 if (id)
193 kmem_free(buf, size);
197 * Check whether or not a processor is capable of microcode operations
198 * Returns 1 if it is capable, 0 if not.
200 * At this point we only support microcode update for:
201 * - Intel processors family 6 and above, and
202 * - AMD processors family 0x10 and above.
204 * We also assume that we don't support a mix of Intel and
205 * AMD processors in the same box.
207 * An i86xpv guest domain or VM can't update the microcode.
210 #define XPVDOMU_OR_HVM \
211 ((hwenv == HW_XEN_PV && !is_controldom()) || (hwenv & HW_VIRTUAL) != 0)
213 /*ARGSUSED*/
214 static int
215 ucode_capable_amd(cpu_t *cp)
217 int hwenv = get_hwenv();
219 if (XPVDOMU_OR_HVM)
220 return (0);
222 return (cpuid_getfamily(cp) >= 0x10);
225 static int
226 ucode_capable_intel(cpu_t *cp)
228 int hwenv = get_hwenv();
230 if (XPVDOMU_OR_HVM)
231 return (0);
233 return (cpuid_getfamily(cp) >= 6);
237 * Called when it is no longer necessary to keep the microcode around,
238 * or when the cached microcode doesn't match the CPU being processed.
240 static void
241 ucode_file_reset_amd(ucode_file_t *ufp, processorid_t id)
243 ucode_file_amd_t *ucodefp = ufp->amd;
245 if (ucodefp == NULL)
246 return;
248 ucode_free(id, ucodefp, sizeof (ucode_file_amd_t));
249 ufp->amd = NULL;
252 static void
253 ucode_file_reset_intel(ucode_file_t *ufp, processorid_t id)
255 ucode_file_intel_t *ucodefp = &ufp->intel;
256 int total_size, body_size;
258 if (ucodefp == NULL || ucodefp->uf_header == NULL)
259 return;
261 total_size = UCODE_TOTAL_SIZE_INTEL(ucodefp->uf_header->uh_total_size);
262 body_size = UCODE_BODY_SIZE_INTEL(ucodefp->uf_header->uh_body_size);
263 if (ucodefp->uf_body) {
264 ucode_free(id, ucodefp->uf_body, body_size);
265 ucodefp->uf_body = NULL;
268 if (ucodefp->uf_ext_table) {
269 int size = total_size - body_size - UCODE_HEADER_SIZE_INTEL;
271 ucode_free(id, ucodefp->uf_ext_table, size);
272 ucodefp->uf_ext_table = NULL;
275 ucode_free(id, ucodefp->uf_header, UCODE_HEADER_SIZE_INTEL);
276 ucodefp->uf_header = NULL;
280 * Find the equivalent CPU id in the equivalence table.
282 static int
283 ucode_equiv_cpu_amd(cpu_t *cp, uint16_t *eq_sig)
285 char name[MAXPATHLEN];
286 intptr_t fd;
287 int count;
288 int offset = 0, cpi_sig = cpuid_getsig(cp);
289 ucode_eqtbl_amd_t *eqtbl = ucode_eqtbl_amd;
291 (void) snprintf(name, MAXPATHLEN, "/%s/%s/equivalence-table",
292 UCODE_INSTALL_PATH, cpuid_getvendorstr(cp));
295 * No kmem_zalloc() etc. available on boot cpu.
297 if (cp->cpu_id == 0) {
298 if ((fd = kobj_open(name)) == -1)
299 return (EM_OPENFILE);
300 /* ucode_zalloc() cannot fail on boot cpu */
301 eqtbl = ucode_zalloc(cp->cpu_id, sizeof (*eqtbl));
302 ASSERT(eqtbl);
303 do {
304 count = kobj_read(fd, (int8_t *)eqtbl,
305 sizeof (*eqtbl), offset);
306 if (count != sizeof (*eqtbl)) {
307 (void) kobj_close(fd);
308 return (EM_HIGHERREV);
310 offset += count;
311 } while (eqtbl->ue_inst_cpu && eqtbl->ue_inst_cpu != cpi_sig);
312 (void) kobj_close(fd);
316 * If not already done, load the equivalence table.
317 * Not done on boot CPU.
319 if (eqtbl == NULL) {
320 struct _buf *eq;
321 uint64_t size;
323 if ((eq = kobj_open_file(name)) == (struct _buf *)-1)
324 return (EM_OPENFILE);
326 if (kobj_get_filesize(eq, &size) < 0) {
327 kobj_close_file(eq);
328 return (EM_OPENFILE);
331 ucode_eqtbl_amd = kmem_zalloc(size, KM_NOSLEEP);
332 if (ucode_eqtbl_amd == NULL) {
333 kobj_close_file(eq);
334 return (EM_NOMEM);
337 count = kobj_read_file(eq, (char *)ucode_eqtbl_amd, size, 0);
338 kobj_close_file(eq);
340 if (count != size)
341 return (EM_FILESIZE);
344 /* Get the equivalent CPU id. */
345 if (cp->cpu_id)
346 for (eqtbl = ucode_eqtbl_amd;
347 eqtbl->ue_inst_cpu && eqtbl->ue_inst_cpu != cpi_sig;
348 eqtbl++)
351 *eq_sig = eqtbl->ue_equiv_cpu;
353 /* No equivalent CPU id found, assume outdated microcode file. */
354 if (*eq_sig == 0)
355 return (EM_HIGHERREV);
357 return (EM_OK);
361 * xVM cannot check for the presence of PCI devices. Look for chipset-
362 * specific microcode patches in the container file and disable them
363 * by setting their CPU revision to an invalid value.
365 #ifdef __xpv
366 static void
367 ucode_chipset_amd(uint8_t *buf, int size)
369 ucode_header_amd_t *uh;
370 uint32_t *ptr = (uint32_t *)buf;
371 int len = 0;
373 /* skip to first microcode patch */
374 ptr += 2; len = *ptr++; ptr += len >> 2; size -= len;
376 while (size >= sizeof (ucode_header_amd_t) + 8) {
377 ptr++; len = *ptr++;
378 uh = (ucode_header_amd_t *)ptr;
379 ptr += len >> 2; size -= len;
381 if (uh->uh_nb_id) {
382 cmn_err(CE_WARN, "ignoring northbridge-specific ucode: "
383 "chipset id %x, revision %x",
384 uh->uh_nb_id, uh->uh_nb_rev);
385 uh->uh_cpu_rev = 0xffff;
388 if (uh->uh_sb_id) {
389 cmn_err(CE_WARN, "ignoring southbridge-specific ucode: "
390 "chipset id %x, revision %x",
391 uh->uh_sb_id, uh->uh_sb_rev);
392 uh->uh_cpu_rev = 0xffff;
396 #endif
399 * Populate the ucode file structure from microcode file corresponding to
400 * this CPU, if exists.
402 * Return EM_OK on success, corresponding error code on failure.
404 /*ARGSUSED*/
405 static ucode_errno_t
406 ucode_locate_amd(cpu_t *cp, cpu_ucode_info_t *uinfop, ucode_file_t *ufp)
408 char name[MAXPATHLEN];
409 intptr_t fd;
410 int count, rc;
411 ucode_file_amd_t *ucodefp = ufp->amd;
413 #ifndef __xpv
414 uint16_t eq_sig = 0;
415 int i;
417 /* get equivalent CPU id */
418 if ((rc = ucode_equiv_cpu_amd(cp, &eq_sig)) != EM_OK)
419 return (rc);
422 * Allocate a buffer for the microcode patch. If the buffer has been
423 * allocated before, check for a matching microcode to avoid loading
424 * the file again.
426 if (ucodefp == NULL)
427 ucodefp = ucode_zalloc(cp->cpu_id, sizeof (*ucodefp));
428 else if (ucode_match_amd(eq_sig, uinfop, ucodefp, sizeof (*ucodefp))
429 == EM_OK)
430 return (EM_OK);
432 if (ucodefp == NULL)
433 return (EM_NOMEM);
435 ufp->amd = ucodefp;
438 * Find the patch for this CPU. The patch files are named XXXX-YY, where
439 * XXXX is the equivalent CPU id and YY is the running patch number.
440 * Patches specific to certain chipsets are guaranteed to have lower
441 * numbers than less specific patches, so we can just load the first
442 * patch that matches.
445 for (i = 0; i < 0xff; i++) {
446 (void) snprintf(name, MAXPATHLEN, "/%s/%s/%04X-%02X",
447 UCODE_INSTALL_PATH, cpuid_getvendorstr(cp), eq_sig, i);
448 if ((fd = kobj_open(name)) == -1)
449 return (EM_NOMATCH);
450 count = kobj_read(fd, (char *)ucodefp, sizeof (*ucodefp), 0);
451 (void) kobj_close(fd);
453 if (ucode_match_amd(eq_sig, uinfop, ucodefp, count) == EM_OK)
454 return (EM_OK);
456 return (EM_NOMATCH);
457 #else
458 int size = 0;
459 char c;
462 * The xVM case is special. To support mixed-revision systems, the
463 * hypervisor will choose which patch to load for which CPU, so the
464 * whole microcode patch container file will have to be loaded.
466 * Since this code is only run on the boot cpu, we don't have to care
467 * about failing ucode_zalloc() or freeing allocated memory.
469 if (cp->cpu_id != 0)
470 return (EM_INVALIDARG);
472 (void) snprintf(name, MAXPATHLEN, "/%s/%s/container",
473 UCODE_INSTALL_PATH, cpuid_getvendorstr(cp));
475 if ((fd = kobj_open(name)) == -1)
476 return (EM_OPENFILE);
478 /* get the file size by counting bytes */
479 do {
480 count = kobj_read(fd, &c, 1, size);
481 size += count;
482 } while (count);
484 ucodefp = ucode_zalloc(cp->cpu_id, sizeof (*ucodefp));
485 ASSERT(ucodefp);
486 ufp->amd = ucodefp;
488 ucodefp->usize = size;
489 ucodefp->ucodep = ucode_zalloc(cp->cpu_id, size);
490 ASSERT(ucodefp->ucodep);
492 /* load the microcode patch container file */
493 count = kobj_read(fd, (char *)ucodefp->ucodep, size, 0);
494 (void) kobj_close(fd);
496 if (count != size)
497 return (EM_FILESIZE);
499 /* make sure the container file is valid */
500 rc = ucode->validate(ucodefp->ucodep, ucodefp->usize);
502 if (rc != EM_OK)
503 return (rc);
505 /* disable chipset-specific patches */
506 ucode_chipset_amd(ucodefp->ucodep, ucodefp->usize);
508 return (EM_OK);
509 #endif
512 static ucode_errno_t
513 ucode_locate_intel(cpu_t *cp, cpu_ucode_info_t *uinfop, ucode_file_t *ufp)
515 char name[MAXPATHLEN];
516 intptr_t fd;
517 int count;
518 int header_size = UCODE_HEADER_SIZE_INTEL;
519 int cpi_sig = cpuid_getsig(cp);
520 ucode_errno_t rc = EM_OK;
521 ucode_file_intel_t *ucodefp = &ufp->intel;
523 ASSERT(ucode);
526 * If the microcode matches the CPU we are processing, use it.
528 if (ucode_match_intel(cpi_sig, uinfop, ucodefp->uf_header,
529 ucodefp->uf_ext_table) == EM_OK && ucodefp->uf_body != NULL) {
530 return (EM_OK);
534 * Look for microcode file with the right name.
536 (void) snprintf(name, MAXPATHLEN, "/%s/%s/%08X-%02X",
537 UCODE_INSTALL_PATH, cpuid_getvendorstr(cp), cpi_sig,
538 uinfop->cui_platid);
539 if ((fd = kobj_open(name)) == -1) {
540 return (EM_OPENFILE);
544 * We found a microcode file for the CPU we are processing,
545 * reset the microcode data structure and read in the new
546 * file.
548 ucode->file_reset(ufp, cp->cpu_id);
550 ucodefp->uf_header = ucode_zalloc(cp->cpu_id, header_size);
551 if (ucodefp->uf_header == NULL)
552 return (EM_NOMEM);
554 count = kobj_read(fd, (char *)ucodefp->uf_header, header_size, 0);
556 switch (count) {
557 case UCODE_HEADER_SIZE_INTEL: {
559 ucode_header_intel_t *uhp = ucodefp->uf_header;
560 uint32_t offset = header_size;
561 int total_size, body_size, ext_size;
562 uint32_t sum = 0;
565 * Make sure that the header contains valid fields.
567 if ((rc = ucode_header_validate_intel(uhp)) == EM_OK) {
568 total_size = UCODE_TOTAL_SIZE_INTEL(uhp->uh_total_size);
569 body_size = UCODE_BODY_SIZE_INTEL(uhp->uh_body_size);
570 ucodefp->uf_body = ucode_zalloc(cp->cpu_id, body_size);
571 if (ucodefp->uf_body == NULL) {
572 rc = EM_NOMEM;
573 break;
576 if (kobj_read(fd, (char *)ucodefp->uf_body,
577 body_size, offset) != body_size)
578 rc = EM_FILESIZE;
581 if (rc)
582 break;
584 sum = ucode_checksum_intel(0, header_size,
585 (uint8_t *)ucodefp->uf_header);
586 if (ucode_checksum_intel(sum, body_size, ucodefp->uf_body)) {
587 rc = EM_CHECKSUM;
588 break;
592 * Check to see if there is extended signature table.
594 offset = body_size + header_size;
595 ext_size = total_size - offset;
597 if (ext_size <= 0)
598 break;
600 ucodefp->uf_ext_table = ucode_zalloc(cp->cpu_id, ext_size);
601 if (ucodefp->uf_ext_table == NULL) {
602 rc = EM_NOMEM;
603 break;
606 if (kobj_read(fd, (char *)ucodefp->uf_ext_table,
607 ext_size, offset) != ext_size) {
608 rc = EM_FILESIZE;
609 } else if (ucode_checksum_intel(0, ext_size,
610 (uint8_t *)(ucodefp->uf_ext_table))) {
611 rc = EM_CHECKSUM;
612 } else {
613 int i;
615 ext_size -= UCODE_EXT_TABLE_SIZE_INTEL;
616 for (i = 0; i < ucodefp->uf_ext_table->uet_count;
617 i++) {
618 if (ucode_checksum_intel(0,
619 UCODE_EXT_SIG_SIZE_INTEL,
620 (uint8_t *)(&(ucodefp->uf_ext_table->
621 uet_ext_sig[i])))) {
622 rc = EM_CHECKSUM;
623 break;
627 break;
630 default:
631 rc = EM_FILESIZE;
632 break;
635 kobj_close(fd);
637 if (rc != EM_OK)
638 return (rc);
640 rc = ucode_match_intel(cpi_sig, uinfop, ucodefp->uf_header,
641 ucodefp->uf_ext_table);
643 return (rc);
646 #ifndef __xpv
647 static ucode_errno_t
648 ucode_match_amd(uint16_t eq_sig, cpu_ucode_info_t *uinfop,
649 ucode_file_amd_t *ucodefp, int size)
651 ucode_header_amd_t *uh;
653 if (ucodefp == NULL || size < sizeof (ucode_header_amd_t))
654 return (EM_NOMATCH);
656 uh = &ucodefp->uf_header;
659 * Don't even think about loading patches that would require code
660 * execution. Does not apply to patches for family 0x14 and beyond.
662 if (uh->uh_cpu_rev < 0x5000 &&
663 size > offsetof(ucode_file_amd_t, uf_code_present) &&
664 ucodefp->uf_code_present)
665 return (EM_NOMATCH);
667 if (eq_sig != uh->uh_cpu_rev)
668 return (EM_NOMATCH);
670 if (uh->uh_nb_id) {
671 cmn_err(CE_WARN, "ignoring northbridge-specific ucode: "
672 "chipset id %x, revision %x", uh->uh_nb_id, uh->uh_nb_rev);
673 return (EM_NOMATCH);
676 if (uh->uh_sb_id) {
677 cmn_err(CE_WARN, "ignoring southbridge-specific ucode: "
678 "chipset id %x, revision %x", uh->uh_sb_id, uh->uh_sb_rev);
679 return (EM_NOMATCH);
682 if (uh->uh_patch_id <= uinfop->cui_rev && !ucode_force_update)
683 return (EM_HIGHERREV);
685 return (EM_OK);
687 #endif
690 * Returns 1 if the microcode is for this processor; 0 otherwise.
692 static ucode_errno_t
693 ucode_match_intel(int cpi_sig, cpu_ucode_info_t *uinfop,
694 ucode_header_intel_t *uhp, ucode_ext_table_intel_t *uetp)
696 if (uhp == NULL)
697 return (EM_NOMATCH);
699 if (UCODE_MATCH_INTEL(cpi_sig, uhp->uh_signature,
700 uinfop->cui_platid, uhp->uh_proc_flags)) {
702 if (uinfop->cui_rev >= uhp->uh_rev && !ucode_force_update)
703 return (EM_HIGHERREV);
705 return (EM_OK);
708 if (uetp != NULL) {
709 int i;
711 for (i = 0; i < uetp->uet_count; i++) {
712 ucode_ext_sig_intel_t *uesp;
714 uesp = &uetp->uet_ext_sig[i];
716 if (UCODE_MATCH_INTEL(cpi_sig, uesp->ues_signature,
717 uinfop->cui_platid, uesp->ues_proc_flags)) {
719 if (uinfop->cui_rev >= uhp->uh_rev &&
720 !ucode_force_update)
721 return (EM_HIGHERREV);
723 return (EM_OK);
728 return (EM_NOMATCH);
731 /*ARGSUSED*/
732 static int
733 ucode_write(xc_arg_t arg1, xc_arg_t unused2, xc_arg_t unused3)
735 ucode_update_t *uusp = (ucode_update_t *)arg1;
736 cpu_ucode_info_t *uinfop = CPU->cpu_m.mcpu_ucode_info;
737 #ifndef __xpv
738 on_trap_data_t otd;
739 #endif
741 ASSERT(ucode);
742 ASSERT(uusp->ucodep);
744 #ifndef __xpv
746 * Check one more time to see if it is really necessary to update
747 * microcode just in case this is a hyperthreaded processor where
748 * the threads share the same microcode.
750 if (!ucode_force_update) {
751 ucode->read_rev(uinfop);
752 uusp->new_rev = uinfop->cui_rev;
753 if (uinfop->cui_rev >= uusp->expected_rev)
754 return (0);
757 if (!on_trap(&otd, OT_DATA_ACCESS))
758 wrmsr(ucode->write_msr, (uintptr_t)uusp->ucodep);
760 no_trap();
761 #endif
762 ucode->read_rev(uinfop);
763 uusp->new_rev = uinfop->cui_rev;
765 return (0);
768 /*ARGSUSED*/
769 static uint32_t
770 ucode_load_amd(ucode_file_t *ufp, cpu_ucode_info_t *uinfop, cpu_t *cp)
772 ucode_file_amd_t *ucodefp = ufp->amd;
773 #ifdef __xpv
774 ucode_update_t uus;
775 #else
776 on_trap_data_t otd;
777 #endif
779 ASSERT(ucode);
780 ASSERT(ucodefp);
782 #ifndef __xpv
783 kpreempt_disable();
784 if (on_trap(&otd, OT_DATA_ACCESS)) {
785 no_trap();
786 kpreempt_enable();
787 return (0);
789 wrmsr(ucode->write_msr, (uintptr_t)ucodefp);
790 no_trap();
791 ucode->read_rev(uinfop);
792 kpreempt_enable();
794 return (ucodefp->uf_header.uh_patch_id);
795 #else
796 uus.ucodep = ucodefp->ucodep;
797 uus.usize = ucodefp->usize;
798 ucode_load_xpv(&uus);
799 ucode->read_rev(uinfop);
800 uus.new_rev = uinfop->cui_rev;
802 return (uus.new_rev);
803 #endif
806 /*ARGSUSED2*/
807 static uint32_t
808 ucode_load_intel(ucode_file_t *ufp, cpu_ucode_info_t *uinfop, cpu_t *cp)
810 ucode_file_intel_t *ucodefp = &ufp->intel;
811 #ifdef __xpv
812 uint32_t ext_offset;
813 uint32_t body_size;
814 uint32_t ext_size;
815 uint8_t *ustart;
816 uint32_t usize;
817 ucode_update_t uus;
818 #endif
820 ASSERT(ucode);
822 #ifdef __xpv
824 * the hypervisor wants the header, data, and extended
825 * signature tables. We can only get here from the boot
826 * CPU (cpu #0), we don't need to free as ucode_zalloc() will
827 * use BOP_ALLOC().
829 usize = UCODE_TOTAL_SIZE_INTEL(ucodefp->uf_header->uh_total_size);
830 ustart = ucode_zalloc(cp->cpu_id, usize);
831 ASSERT(ustart);
833 body_size = UCODE_BODY_SIZE_INTEL(ucodefp->uf_header->uh_body_size);
834 ext_offset = body_size + UCODE_HEADER_SIZE_INTEL;
835 ext_size = usize - ext_offset;
836 ASSERT(ext_size >= 0);
838 (void) memcpy(ustart, ucodefp->uf_header, UCODE_HEADER_SIZE_INTEL);
839 (void) memcpy(&ustart[UCODE_HEADER_SIZE_INTEL], ucodefp->uf_body,
840 body_size);
841 if (ext_size > 0) {
842 (void) memcpy(&ustart[ext_offset],
843 ucodefp->uf_ext_table, ext_size);
845 uus.ucodep = ustart;
846 uus.usize = usize;
847 ucode_load_xpv(&uus);
848 ucode->read_rev(uinfop);
849 uus.new_rev = uinfop->cui_rev;
850 #else
851 kpreempt_disable();
852 wrmsr(ucode->write_msr, (uintptr_t)ucodefp->uf_body);
853 ucode->read_rev(uinfop);
854 kpreempt_enable();
855 #endif
857 return (ucodefp->uf_header->uh_rev);
861 #ifdef __xpv
862 static void
863 ucode_load_xpv(ucode_update_t *uusp)
865 xen_platform_op_t op;
866 int e;
868 ASSERT(DOMAIN_IS_INITDOMAIN(xen_info));
870 kpreempt_disable();
871 op.cmd = XENPF_microcode_update;
872 op.interface_version = XENPF_INTERFACE_VERSION;
873 /*LINTED: constant in conditional context*/
874 set_xen_guest_handle(op.u.microcode.data, uusp->ucodep);
875 op.u.microcode.length = uusp->usize;
876 e = HYPERVISOR_platform_op(&op);
877 if (e != 0) {
878 cmn_err(CE_WARN, "hypervisor failed to accept uCode update");
880 kpreempt_enable();
882 #endif /* __xpv */
884 static void
885 ucode_read_rev_amd(cpu_ucode_info_t *uinfop)
887 uinfop->cui_rev = rdmsr(MSR_AMD_PATCHLEVEL);
890 static void
891 ucode_read_rev_intel(cpu_ucode_info_t *uinfop)
893 struct cpuid_regs crs;
896 * The Intel 64 and IA-32 Architecture Software Developer's Manual
897 * recommends that MSR_INTC_UCODE_REV be loaded with 0 first, then
898 * execute cpuid to guarantee the correct reading of this register.
900 wrmsr(MSR_INTC_UCODE_REV, 0);
901 (void) __cpuid_insn(&crs);
902 uinfop->cui_rev = (rdmsr(MSR_INTC_UCODE_REV) >> INTC_UCODE_REV_SHIFT);
905 static ucode_errno_t
906 ucode_extract_amd(ucode_update_t *uusp, uint8_t *ucodep, int size)
908 #ifndef __xpv
909 uint32_t *ptr = (uint32_t *)ucodep;
910 ucode_eqtbl_amd_t *eqtbl;
911 ucode_file_amd_t *ufp;
912 int count;
913 int higher = 0;
914 ucode_errno_t rc = EM_NOMATCH;
915 uint16_t eq_sig;
917 /* skip over magic number & equivalence table header */
918 ptr += 2; size -= 8;
920 count = *ptr++; size -= 4;
921 for (eqtbl = (ucode_eqtbl_amd_t *)ptr;
922 eqtbl->ue_inst_cpu && eqtbl->ue_inst_cpu != uusp->sig;
923 eqtbl++)
926 eq_sig = eqtbl->ue_equiv_cpu;
928 /* No equivalent CPU id found, assume outdated microcode file. */
929 if (eq_sig == 0)
930 return (EM_HIGHERREV);
932 /* Use the first microcode patch that matches. */
933 do {
934 ptr += count >> 2; size -= count;
936 if (!size)
937 return (higher ? EM_HIGHERREV : EM_NOMATCH);
939 ptr++; size -= 4;
940 count = *ptr++; size -= 4;
941 ufp = (ucode_file_amd_t *)ptr;
943 rc = ucode_match_amd(eq_sig, &uusp->info, ufp, count);
944 if (rc == EM_HIGHERREV)
945 higher = 1;
946 } while (rc != EM_OK);
948 uusp->ucodep = (uint8_t *)ufp;
949 uusp->usize = count;
950 uusp->expected_rev = ufp->uf_header.uh_patch_id;
951 #else
953 * The hypervisor will choose the patch to load, so there is no way to
954 * know the "expected revision" in advance. This is especially true on
955 * mixed-revision systems where more than one patch will be loaded.
957 uusp->expected_rev = 0;
958 uusp->ucodep = ucodep;
959 uusp->usize = size;
961 ucode_chipset_amd(ucodep, size);
962 #endif
964 return (EM_OK);
967 static ucode_errno_t
968 ucode_extract_intel(ucode_update_t *uusp, uint8_t *ucodep, int size)
970 uint32_t header_size = UCODE_HEADER_SIZE_INTEL;
971 int remaining;
972 int found = 0;
973 ucode_errno_t search_rc = EM_NOMATCH; /* search result */
976 * Go through the whole buffer in case there are
977 * multiple versions of matching microcode for this
978 * processor.
980 for (remaining = size; remaining > 0; ) {
981 int total_size, body_size, ext_size;
982 uint8_t *curbuf = &ucodep[size - remaining];
983 ucode_header_intel_t *uhp = (ucode_header_intel_t *)curbuf;
984 ucode_ext_table_intel_t *uetp = NULL;
985 ucode_errno_t tmprc;
987 total_size = UCODE_TOTAL_SIZE_INTEL(uhp->uh_total_size);
988 body_size = UCODE_BODY_SIZE_INTEL(uhp->uh_body_size);
989 ext_size = total_size - (header_size + body_size);
991 if (ext_size > 0)
992 uetp = (ucode_ext_table_intel_t *)
993 &curbuf[header_size + body_size];
995 tmprc = ucode_match_intel(uusp->sig, &uusp->info, uhp, uetp);
998 * Since we are searching through a big file
999 * containing microcode for pretty much all the
1000 * processors, we are bound to get EM_NOMATCH
1001 * at one point. However, if we return
1002 * EM_NOMATCH to users, it will really confuse
1003 * them. Therefore, if we ever find a match of
1004 * a lower rev, we will set return code to
1005 * EM_HIGHERREV.
1007 if (tmprc == EM_HIGHERREV)
1008 search_rc = EM_HIGHERREV;
1010 if (tmprc == EM_OK &&
1011 uusp->expected_rev < uhp->uh_rev) {
1012 #ifndef __xpv
1013 uusp->ucodep = (uint8_t *)&curbuf[header_size];
1014 #else
1015 uusp->ucodep = (uint8_t *)curbuf;
1016 #endif
1017 uusp->usize =
1018 UCODE_TOTAL_SIZE_INTEL(uhp->uh_total_size);
1019 uusp->expected_rev = uhp->uh_rev;
1020 found = 1;
1023 remaining -= total_size;
1026 if (!found)
1027 return (search_rc);
1029 return (EM_OK);
1032 * Entry point to microcode update from the ucode_drv driver.
1034 * Returns EM_OK on success, corresponding error code on failure.
1036 ucode_errno_t
1037 ucode_update(uint8_t *ucodep, int size)
1039 int found = 0;
1040 processorid_t id;
1041 ucode_update_t cached = { 0 };
1042 ucode_update_t *cachedp = NULL;
1043 ucode_errno_t rc = EM_OK;
1044 ucode_errno_t search_rc = EM_NOMATCH; /* search result */
1045 cpuset_t cpuset;
1047 ASSERT(ucode);
1048 ASSERT(ucodep);
1049 CPUSET_ZERO(cpuset);
1051 if (!ucode->capable(CPU))
1052 return (EM_NOTSUP);
1054 mutex_enter(&cpu_lock);
1056 for (id = 0; id < max_ncpus; id++) {
1057 cpu_t *cpu;
1058 ucode_update_t uus = { 0 };
1059 ucode_update_t *uusp = &uus;
1062 * If there is no such CPU or it is not xcall ready, skip it.
1064 if ((cpu = cpu_get(id)) == NULL ||
1065 !(cpu->cpu_flags & CPU_READY))
1066 continue;
1068 uusp->sig = cpuid_getsig(cpu);
1069 bcopy(cpu->cpu_m.mcpu_ucode_info, &uusp->info,
1070 sizeof (uusp->info));
1073 * If the current CPU has the same signature and platform
1074 * id as the previous one we processed, reuse the information.
1076 if (cachedp && cachedp->sig == cpuid_getsig(cpu) &&
1077 cachedp->info.cui_platid == uusp->info.cui_platid) {
1078 uusp->ucodep = cachedp->ucodep;
1079 uusp->expected_rev = cachedp->expected_rev;
1081 * Intuitively we should check here to see whether the
1082 * running microcode rev is >= the expected rev, and
1083 * quit if it is. But we choose to proceed with the
1084 * xcall regardless of the running version so that
1085 * the other threads in an HT processor can update
1086 * the cpu_ucode_info structure in machcpu.
1088 } else if ((search_rc = ucode->extract(uusp, ucodep, size))
1089 == EM_OK) {
1090 bcopy(uusp, &cached, sizeof (cached));
1091 cachedp = &cached;
1092 found = 1;
1095 /* Nothing to do */
1096 if (uusp->ucodep == NULL)
1097 continue;
1099 #ifdef __xpv
1101 * for i86xpv, the hypervisor will update all the CPUs.
1102 * the hypervisor wants the header, data, and extended
1103 * signature tables. ucode_write will just read in the
1104 * updated version on all the CPUs after the update has
1105 * completed.
1107 if (id == 0) {
1108 ucode_load_xpv(uusp);
1110 #endif
1112 CPUSET_ADD(cpuset, id);
1113 kpreempt_disable();
1114 xc_sync((xc_arg_t)uusp, 0, 0, CPUSET2BV(cpuset), ucode_write);
1115 kpreempt_enable();
1116 CPUSET_DEL(cpuset, id);
1118 if (uusp->new_rev != 0 && uusp->info.cui_rev == uusp->new_rev &&
1119 !ucode_force_update) {
1120 rc = EM_HIGHERREV;
1121 } else if ((uusp->new_rev == 0) || (uusp->expected_rev != 0 &&
1122 uusp->expected_rev != uusp->new_rev)) {
1123 cmn_err(CE_WARN, ucode_failure_fmt,
1124 id, uusp->info.cui_rev, uusp->expected_rev);
1125 rc = EM_UPDATE;
1126 } else {
1127 cmn_err(CE_CONT, ucode_success_fmt,
1128 id, uusp->info.cui_rev, uusp->new_rev);
1132 mutex_exit(&cpu_lock);
1134 if (!found)
1135 rc = search_rc;
1137 return (rc);
1141 * Initialize mcpu_ucode_info, and perform microcode update if necessary.
1142 * This is the entry point from boot path where pointer to CPU structure
1143 * is available.
1145 * cpuid_info must be initialized before ucode_check can be called.
1147 void
1148 ucode_check(cpu_t *cp)
1150 cpu_ucode_info_t *uinfop;
1151 ucode_errno_t rc = EM_OK;
1152 uint32_t new_rev = 0;
1154 ASSERT(cp);
1156 * Space statically allocated for BSP, ensure pointer is set
1158 if (cp->cpu_id == 0 && cp->cpu_m.mcpu_ucode_info == NULL)
1159 cp->cpu_m.mcpu_ucode_info = &cpu_ucode_info0;
1161 uinfop = cp->cpu_m.mcpu_ucode_info;
1162 ASSERT(uinfop);
1164 /* set up function pointers if not already done */
1165 if (!ucode)
1166 switch (cpuid_getvendor(cp)) {
1167 case X86_VENDOR_AMD:
1168 ucode = &ucode_amd;
1169 break;
1170 case X86_VENDOR_Intel:
1171 ucode = &ucode_intel;
1172 break;
1173 default:
1174 ucode = NULL;
1175 return;
1178 if (!ucode->capable(cp))
1179 return;
1182 * The MSR_INTC_PLATFORM_ID is supported in Celeron and Xeon
1183 * (Family 6, model 5 and above) and all processors after.
1185 if ((cpuid_getvendor(cp) == X86_VENDOR_Intel) &&
1186 ((cpuid_getmodel(cp) >= 5) || (cpuid_getfamily(cp) > 6))) {
1187 uinfop->cui_platid = 1 << ((rdmsr(MSR_INTC_PLATFORM_ID) >>
1188 INTC_PLATFORM_ID_SHIFT) & INTC_PLATFORM_ID_MASK);
1191 ucode->read_rev(uinfop);
1193 #ifdef __xpv
1195 * for i86xpv, the hypervisor will update all the CPUs. We only need
1196 * do do this on one of the CPUs (and there always is a CPU 0).
1198 if (cp->cpu_id != 0) {
1199 return;
1201 #endif
1204 * Check to see if we need ucode update
1206 if ((rc = ucode->locate(cp, uinfop, &ucodefile)) == EM_OK) {
1207 new_rev = ucode->load(&ucodefile, uinfop, cp);
1209 if (uinfop->cui_rev != new_rev)
1210 cmn_err(CE_WARN, ucode_failure_fmt, cp->cpu_id,
1211 uinfop->cui_rev, new_rev);
1215 * If we fail to find a match for any reason, free the file structure
1216 * just in case we have read in a partial file.
1218 * Since the scratch memory for holding the microcode for the boot CPU
1219 * came from BOP_ALLOC, we will reset the data structure as if we
1220 * never did the allocation so we don't have to keep track of this
1221 * special chunk of memory. We free the memory used for the rest
1222 * of the CPUs in start_other_cpus().
1224 if (rc != EM_OK || cp->cpu_id == 0)
1225 ucode->file_reset(&ucodefile, cp->cpu_id);
1229 * Returns microcode revision from the machcpu structure.
1231 ucode_errno_t
1232 ucode_get_rev(uint32_t *revp)
1234 int i;
1236 ASSERT(ucode);
1237 ASSERT(revp);
1239 if (!ucode->capable(CPU))
1240 return (EM_NOTSUP);
1242 mutex_enter(&cpu_lock);
1243 for (i = 0; i < max_ncpus; i++) {
1244 cpu_t *cpu;
1246 if ((cpu = cpu_get(i)) == NULL)
1247 continue;
1249 revp[i] = cpu->cpu_m.mcpu_ucode_info->cui_rev;
1251 mutex_exit(&cpu_lock);
1253 return (EM_OK);