9744 invalidate cache before microcode update
[unleashed.git] / usr / src / uts / i86pc / os / microcode.c
blobafc48953f5315accb6b3c2c7a406cbdbcf3460ff
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
27 * Copyright (c) 2018, Joyent, Inc.
30 #include <sys/asm_linkage.h>
31 #include <sys/bootconf.h>
32 #include <sys/cpuvar.h>
33 #include <sys/cmn_err.h>
34 #include <sys/controlregs.h>
35 #include <sys/debug.h>
36 #include <sys/kobj.h>
37 #include <sys/kobj_impl.h>
38 #include <sys/machsystm.h>
39 #include <sys/ontrap.h>
40 #include <sys/param.h>
41 #include <sys/machparam.h>
42 #include <sys/promif.h>
43 #include <sys/sysmacros.h>
44 #include <sys/systm.h>
45 #include <sys/types.h>
46 #include <sys/thread.h>
47 #include <sys/ucode.h>
48 #include <sys/x86_archext.h>
49 #include <sys/x_call.h>
50 #ifdef __xpv
51 #include <sys/hypervisor.h>
52 #endif
55 * AMD-specific equivalence table
57 static ucode_eqtbl_amd_t *ucode_eqtbl_amd;
60 * mcpu_ucode_info for the boot CPU. Statically allocated.
62 static struct cpu_ucode_info cpu_ucode_info0;
64 static ucode_file_t ucodefile;
66 static void* ucode_zalloc(processorid_t, size_t);
67 static void ucode_free(processorid_t, void *, size_t);
69 static int ucode_capable_amd(cpu_t *);
70 static int ucode_capable_intel(cpu_t *);
72 static ucode_errno_t ucode_extract_amd(ucode_update_t *, uint8_t *, int);
73 static ucode_errno_t ucode_extract_intel(ucode_update_t *, uint8_t *,
74 int);
76 static void ucode_file_reset_amd(ucode_file_t *, processorid_t);
77 static void ucode_file_reset_intel(ucode_file_t *, processorid_t);
79 static uint32_t ucode_load_amd(ucode_file_t *, cpu_ucode_info_t *, cpu_t *);
80 static uint32_t ucode_load_intel(ucode_file_t *, cpu_ucode_info_t *, cpu_t *);
82 #ifdef __xpv
83 static void ucode_load_xpv(ucode_update_t *);
84 static void ucode_chipset_amd(uint8_t *, int);
85 #endif
87 static int ucode_equiv_cpu_amd(cpu_t *, uint16_t *);
89 static ucode_errno_t ucode_locate_amd(cpu_t *, cpu_ucode_info_t *,
90 ucode_file_t *);
91 static ucode_errno_t ucode_locate_intel(cpu_t *, cpu_ucode_info_t *,
92 ucode_file_t *);
94 #ifndef __xpv
95 static ucode_errno_t ucode_match_amd(uint16_t, cpu_ucode_info_t *,
96 ucode_file_amd_t *, int);
97 #endif
98 static ucode_errno_t ucode_match_intel(int, cpu_ucode_info_t *,
99 ucode_header_intel_t *, ucode_ext_table_intel_t *);
101 static void ucode_read_rev_amd(cpu_ucode_info_t *);
102 static void ucode_read_rev_intel(cpu_ucode_info_t *);
104 static const struct ucode_ops ucode_amd = {
105 MSR_AMD_PATCHLOADER,
106 ucode_capable_amd,
107 ucode_file_reset_amd,
108 ucode_read_rev_amd,
109 ucode_load_amd,
110 ucode_validate_amd,
111 ucode_extract_amd,
112 ucode_locate_amd
115 static const struct ucode_ops ucode_intel = {
116 MSR_INTC_UCODE_WRITE,
117 ucode_capable_intel,
118 ucode_file_reset_intel,
119 ucode_read_rev_intel,
120 ucode_load_intel,
121 ucode_validate_intel,
122 ucode_extract_intel,
123 ucode_locate_intel
126 const struct ucode_ops *ucode;
128 static const char ucode_failure_fmt[] =
129 "cpu%d: failed to update microcode from version 0x%x to 0x%x\n";
130 static const char ucode_success_fmt[] =
131 "?cpu%d: microcode has been updated from version 0x%x to 0x%x\n";
134 * Force flag. If set, the first microcode binary that matches
135 * signature and platform id will be used for microcode update,
136 * regardless of version. Should only be used for debugging.
138 int ucode_force_update = 0;
141 * Allocate space for mcpu_ucode_info in the machcpu structure
142 * for all non-boot CPUs.
144 void
145 ucode_alloc_space(cpu_t *cp)
147 ASSERT(cp->cpu_id != 0);
148 ASSERT(cp->cpu_m.mcpu_ucode_info == NULL);
149 cp->cpu_m.mcpu_ucode_info =
150 kmem_zalloc(sizeof (*cp->cpu_m.mcpu_ucode_info), KM_SLEEP);
153 void
154 ucode_free_space(cpu_t *cp)
156 ASSERT(cp->cpu_m.mcpu_ucode_info != NULL);
157 ASSERT(cp->cpu_m.mcpu_ucode_info != &cpu_ucode_info0);
158 kmem_free(cp->cpu_m.mcpu_ucode_info,
159 sizeof (*cp->cpu_m.mcpu_ucode_info));
160 cp->cpu_m.mcpu_ucode_info = NULL;
164 * Called when we are done with microcode update on all processors to free up
165 * space allocated for the microcode file.
167 void
168 ucode_cleanup()
170 if (ucode == NULL)
171 return;
173 ucode->file_reset(&ucodefile, -1);
177 * Allocate/free a buffer used to hold ucode data. Space for the boot CPU is
178 * allocated with BOP_ALLOC() and does not require a free.
180 static void*
181 ucode_zalloc(processorid_t id, size_t size)
183 if (id)
184 return (kmem_zalloc(size, KM_NOSLEEP));
186 /* BOP_ALLOC() failure results in panic */
187 return (BOP_ALLOC(bootops, NULL, size, MMU_PAGESIZE));
190 static void
191 ucode_free(processorid_t id, void* buf, size_t size)
193 if (id)
194 kmem_free(buf, size);
198 * Check whether or not a processor is capable of microcode operations
199 * Returns 1 if it is capable, 0 if not.
201 * At this point we only support microcode update for:
202 * - Intel processors family 6 and above, and
203 * - AMD processors family 0x10 and above.
205 * We also assume that we don't support a mix of Intel and
206 * AMD processors in the same box.
208 * An i86xpv guest domain or VM can't update the microcode.
211 #define XPVDOMU_OR_HVM \
212 ((hwenv == HW_XEN_PV && !is_controldom()) || (hwenv & HW_VIRTUAL) != 0)
214 /*ARGSUSED*/
215 static int
216 ucode_capable_amd(cpu_t *cp)
218 int hwenv = get_hwenv();
220 if (XPVDOMU_OR_HVM)
221 return (0);
223 return (cpuid_getfamily(cp) >= 0x10);
226 static int
227 ucode_capable_intel(cpu_t *cp)
229 int hwenv = get_hwenv();
231 if (XPVDOMU_OR_HVM)
232 return (0);
234 return (cpuid_getfamily(cp) >= 6);
238 * Called when it is no longer necessary to keep the microcode around,
239 * or when the cached microcode doesn't match the CPU being processed.
241 static void
242 ucode_file_reset_amd(ucode_file_t *ufp, processorid_t id)
244 ucode_file_amd_t *ucodefp = ufp->amd;
246 if (ucodefp == NULL)
247 return;
249 ucode_free(id, ucodefp, sizeof (ucode_file_amd_t));
250 ufp->amd = NULL;
253 static void
254 ucode_file_reset_intel(ucode_file_t *ufp, processorid_t id)
256 ucode_file_intel_t *ucodefp = &ufp->intel;
257 int total_size, body_size;
259 if (ucodefp == NULL || ucodefp->uf_header == NULL)
260 return;
262 total_size = UCODE_TOTAL_SIZE_INTEL(ucodefp->uf_header->uh_total_size);
263 body_size = UCODE_BODY_SIZE_INTEL(ucodefp->uf_header->uh_body_size);
264 if (ucodefp->uf_body) {
265 ucode_free(id, ucodefp->uf_body, body_size);
266 ucodefp->uf_body = NULL;
269 if (ucodefp->uf_ext_table) {
270 int size = total_size - body_size - UCODE_HEADER_SIZE_INTEL;
272 ucode_free(id, ucodefp->uf_ext_table, size);
273 ucodefp->uf_ext_table = NULL;
276 ucode_free(id, ucodefp->uf_header, UCODE_HEADER_SIZE_INTEL);
277 ucodefp->uf_header = NULL;
281 * Find the equivalent CPU id in the equivalence table.
283 static int
284 ucode_equiv_cpu_amd(cpu_t *cp, uint16_t *eq_sig)
286 char name[MAXPATHLEN];
287 intptr_t fd;
288 int count;
289 int offset = 0, cpi_sig = cpuid_getsig(cp);
290 ucode_eqtbl_amd_t *eqtbl = ucode_eqtbl_amd;
292 (void) snprintf(name, MAXPATHLEN, "/%s/%s/equivalence-table",
293 UCODE_INSTALL_PATH, cpuid_getvendorstr(cp));
296 * No kmem_zalloc() etc. available on boot cpu.
298 if (cp->cpu_id == 0) {
299 if ((fd = kobj_open(name)) == -1)
300 return (EM_OPENFILE);
301 /* ucode_zalloc() cannot fail on boot cpu */
302 eqtbl = ucode_zalloc(cp->cpu_id, sizeof (*eqtbl));
303 ASSERT(eqtbl);
304 do {
305 count = kobj_read(fd, (int8_t *)eqtbl,
306 sizeof (*eqtbl), offset);
307 if (count != sizeof (*eqtbl)) {
308 (void) kobj_close(fd);
309 return (EM_HIGHERREV);
311 offset += count;
312 } while (eqtbl->ue_inst_cpu && eqtbl->ue_inst_cpu != cpi_sig);
313 (void) kobj_close(fd);
317 * If not already done, load the equivalence table.
318 * Not done on boot CPU.
320 if (eqtbl == NULL) {
321 struct _buf *eq;
322 uint64_t size;
324 if ((eq = kobj_open_file(name)) == (struct _buf *)-1)
325 return (EM_OPENFILE);
327 if (kobj_get_filesize(eq, &size) < 0) {
328 kobj_close_file(eq);
329 return (EM_OPENFILE);
332 ucode_eqtbl_amd = kmem_zalloc(size, KM_NOSLEEP);
333 if (ucode_eqtbl_amd == NULL) {
334 kobj_close_file(eq);
335 return (EM_NOMEM);
338 count = kobj_read_file(eq, (char *)ucode_eqtbl_amd, size, 0);
339 kobj_close_file(eq);
341 if (count != size)
342 return (EM_FILESIZE);
345 /* Get the equivalent CPU id. */
346 if (cp->cpu_id)
347 for (eqtbl = ucode_eqtbl_amd;
348 eqtbl->ue_inst_cpu && eqtbl->ue_inst_cpu != cpi_sig;
349 eqtbl++)
352 *eq_sig = eqtbl->ue_equiv_cpu;
354 /* No equivalent CPU id found, assume outdated microcode file. */
355 if (*eq_sig == 0)
356 return (EM_HIGHERREV);
358 return (EM_OK);
362 * xVM cannot check for the presence of PCI devices. Look for chipset-
363 * specific microcode patches in the container file and disable them
364 * by setting their CPU revision to an invalid value.
366 #ifdef __xpv
367 static void
368 ucode_chipset_amd(uint8_t *buf, int size)
370 ucode_header_amd_t *uh;
371 uint32_t *ptr = (uint32_t *)buf;
372 int len = 0;
374 /* skip to first microcode patch */
375 ptr += 2; len = *ptr++; ptr += len >> 2; size -= len;
377 while (size >= sizeof (ucode_header_amd_t) + 8) {
378 ptr++; len = *ptr++;
379 uh = (ucode_header_amd_t *)ptr;
380 ptr += len >> 2; size -= len;
382 if (uh->uh_nb_id) {
383 cmn_err(CE_WARN, "ignoring northbridge-specific ucode: "
384 "chipset id %x, revision %x",
385 uh->uh_nb_id, uh->uh_nb_rev);
386 uh->uh_cpu_rev = 0xffff;
389 if (uh->uh_sb_id) {
390 cmn_err(CE_WARN, "ignoring southbridge-specific ucode: "
391 "chipset id %x, revision %x",
392 uh->uh_sb_id, uh->uh_sb_rev);
393 uh->uh_cpu_rev = 0xffff;
397 #endif
400 * Populate the ucode file structure from microcode file corresponding to
401 * this CPU, if exists.
403 * Return EM_OK on success, corresponding error code on failure.
405 /*ARGSUSED*/
406 static ucode_errno_t
407 ucode_locate_amd(cpu_t *cp, cpu_ucode_info_t *uinfop, ucode_file_t *ufp)
409 char name[MAXPATHLEN];
410 intptr_t fd;
411 int count, rc;
412 ucode_file_amd_t *ucodefp = ufp->amd;
414 #ifndef __xpv
415 uint16_t eq_sig = 0;
416 int i;
418 /* get equivalent CPU id */
419 if ((rc = ucode_equiv_cpu_amd(cp, &eq_sig)) != EM_OK)
420 return (rc);
423 * Allocate a buffer for the microcode patch. If the buffer has been
424 * allocated before, check for a matching microcode to avoid loading
425 * the file again.
427 if (ucodefp == NULL)
428 ucodefp = ucode_zalloc(cp->cpu_id, sizeof (*ucodefp));
429 else if (ucode_match_amd(eq_sig, uinfop, ucodefp, sizeof (*ucodefp))
430 == EM_OK)
431 return (EM_OK);
433 if (ucodefp == NULL)
434 return (EM_NOMEM);
436 ufp->amd = ucodefp;
439 * Find the patch for this CPU. The patch files are named XXXX-YY, where
440 * XXXX is the equivalent CPU id and YY is the running patch number.
441 * Patches specific to certain chipsets are guaranteed to have lower
442 * numbers than less specific patches, so we can just load the first
443 * patch that matches.
446 for (i = 0; i < 0xff; i++) {
447 (void) snprintf(name, MAXPATHLEN, "/%s/%s/%04X-%02X",
448 UCODE_INSTALL_PATH, cpuid_getvendorstr(cp), eq_sig, i);
449 if ((fd = kobj_open(name)) == -1)
450 return (EM_NOMATCH);
451 count = kobj_read(fd, (char *)ucodefp, sizeof (*ucodefp), 0);
452 (void) kobj_close(fd);
454 if (ucode_match_amd(eq_sig, uinfop, ucodefp, count) == EM_OK)
455 return (EM_OK);
457 return (EM_NOMATCH);
458 #else
459 int size = 0;
460 char c;
463 * The xVM case is special. To support mixed-revision systems, the
464 * hypervisor will choose which patch to load for which CPU, so the
465 * whole microcode patch container file will have to be loaded.
467 * Since this code is only run on the boot cpu, we don't have to care
468 * about failing ucode_zalloc() or freeing allocated memory.
470 if (cp->cpu_id != 0)
471 return (EM_INVALIDARG);
473 (void) snprintf(name, MAXPATHLEN, "/%s/%s/container",
474 UCODE_INSTALL_PATH, cpuid_getvendorstr(cp));
476 if ((fd = kobj_open(name)) == -1)
477 return (EM_OPENFILE);
479 /* get the file size by counting bytes */
480 do {
481 count = kobj_read(fd, &c, 1, size);
482 size += count;
483 } while (count);
485 ucodefp = ucode_zalloc(cp->cpu_id, sizeof (*ucodefp));
486 ASSERT(ucodefp);
487 ufp->amd = ucodefp;
489 ucodefp->usize = size;
490 ucodefp->ucodep = ucode_zalloc(cp->cpu_id, size);
491 ASSERT(ucodefp->ucodep);
493 /* load the microcode patch container file */
494 count = kobj_read(fd, (char *)ucodefp->ucodep, size, 0);
495 (void) kobj_close(fd);
497 if (count != size)
498 return (EM_FILESIZE);
500 /* make sure the container file is valid */
501 rc = ucode->validate(ucodefp->ucodep, ucodefp->usize);
503 if (rc != EM_OK)
504 return (rc);
506 /* disable chipset-specific patches */
507 ucode_chipset_amd(ucodefp->ucodep, ucodefp->usize);
509 return (EM_OK);
510 #endif
513 static ucode_errno_t
514 ucode_locate_intel(cpu_t *cp, cpu_ucode_info_t *uinfop, ucode_file_t *ufp)
516 char name[MAXPATHLEN];
517 intptr_t fd;
518 int count;
519 int header_size = UCODE_HEADER_SIZE_INTEL;
520 int cpi_sig = cpuid_getsig(cp);
521 ucode_errno_t rc = EM_OK;
522 ucode_file_intel_t *ucodefp = &ufp->intel;
524 ASSERT(ucode);
527 * If the microcode matches the CPU we are processing, use it.
529 if (ucode_match_intel(cpi_sig, uinfop, ucodefp->uf_header,
530 ucodefp->uf_ext_table) == EM_OK && ucodefp->uf_body != NULL) {
531 return (EM_OK);
535 * Look for microcode file with the right name.
537 (void) snprintf(name, MAXPATHLEN, "/%s/%s/%08X-%02X",
538 UCODE_INSTALL_PATH, cpuid_getvendorstr(cp), cpi_sig,
539 uinfop->cui_platid);
540 if ((fd = kobj_open(name)) == -1) {
541 return (EM_OPENFILE);
545 * We found a microcode file for the CPU we are processing,
546 * reset the microcode data structure and read in the new
547 * file.
549 ucode->file_reset(ufp, cp->cpu_id);
551 ucodefp->uf_header = ucode_zalloc(cp->cpu_id, header_size);
552 if (ucodefp->uf_header == NULL)
553 return (EM_NOMEM);
555 count = kobj_read(fd, (char *)ucodefp->uf_header, header_size, 0);
557 switch (count) {
558 case UCODE_HEADER_SIZE_INTEL: {
560 ucode_header_intel_t *uhp = ucodefp->uf_header;
561 uint32_t offset = header_size;
562 int total_size, body_size, ext_size;
563 uint32_t sum = 0;
566 * Make sure that the header contains valid fields.
568 if ((rc = ucode_header_validate_intel(uhp)) == EM_OK) {
569 total_size = UCODE_TOTAL_SIZE_INTEL(uhp->uh_total_size);
570 body_size = UCODE_BODY_SIZE_INTEL(uhp->uh_body_size);
571 ucodefp->uf_body = ucode_zalloc(cp->cpu_id, body_size);
572 if (ucodefp->uf_body == NULL) {
573 rc = EM_NOMEM;
574 break;
577 if (kobj_read(fd, (char *)ucodefp->uf_body,
578 body_size, offset) != body_size)
579 rc = EM_FILESIZE;
582 if (rc)
583 break;
585 sum = ucode_checksum_intel(0, header_size,
586 (uint8_t *)ucodefp->uf_header);
587 if (ucode_checksum_intel(sum, body_size, ucodefp->uf_body)) {
588 rc = EM_CHECKSUM;
589 break;
593 * Check to see if there is extended signature table.
595 offset = body_size + header_size;
596 ext_size = total_size - offset;
598 if (ext_size <= 0)
599 break;
601 ucodefp->uf_ext_table = ucode_zalloc(cp->cpu_id, ext_size);
602 if (ucodefp->uf_ext_table == NULL) {
603 rc = EM_NOMEM;
604 break;
607 if (kobj_read(fd, (char *)ucodefp->uf_ext_table,
608 ext_size, offset) != ext_size) {
609 rc = EM_FILESIZE;
610 } else if (ucode_checksum_intel(0, ext_size,
611 (uint8_t *)(ucodefp->uf_ext_table))) {
612 rc = EM_CHECKSUM;
613 } else {
614 int i;
616 ext_size -= UCODE_EXT_TABLE_SIZE_INTEL;
617 for (i = 0; i < ucodefp->uf_ext_table->uet_count;
618 i++) {
619 if (ucode_checksum_intel(0,
620 UCODE_EXT_SIG_SIZE_INTEL,
621 (uint8_t *)(&(ucodefp->uf_ext_table->
622 uet_ext_sig[i])))) {
623 rc = EM_CHECKSUM;
624 break;
628 break;
631 default:
632 rc = EM_FILESIZE;
633 break;
636 kobj_close(fd);
638 if (rc != EM_OK)
639 return (rc);
641 rc = ucode_match_intel(cpi_sig, uinfop, ucodefp->uf_header,
642 ucodefp->uf_ext_table);
644 return (rc);
647 #ifndef __xpv
648 static ucode_errno_t
649 ucode_match_amd(uint16_t eq_sig, cpu_ucode_info_t *uinfop,
650 ucode_file_amd_t *ucodefp, int size)
652 ucode_header_amd_t *uh;
654 if (ucodefp == NULL || size < sizeof (ucode_header_amd_t))
655 return (EM_NOMATCH);
657 uh = &ucodefp->uf_header;
660 * Don't even think about loading patches that would require code
661 * execution. Does not apply to patches for family 0x14 and beyond.
663 if (uh->uh_cpu_rev < 0x5000 &&
664 size > offsetof(ucode_file_amd_t, uf_code_present) &&
665 ucodefp->uf_code_present)
666 return (EM_NOMATCH);
668 if (eq_sig != uh->uh_cpu_rev)
669 return (EM_NOMATCH);
671 if (uh->uh_nb_id) {
672 cmn_err(CE_WARN, "ignoring northbridge-specific ucode: "
673 "chipset id %x, revision %x", uh->uh_nb_id, uh->uh_nb_rev);
674 return (EM_NOMATCH);
677 if (uh->uh_sb_id) {
678 cmn_err(CE_WARN, "ignoring southbridge-specific ucode: "
679 "chipset id %x, revision %x", uh->uh_sb_id, uh->uh_sb_rev);
680 return (EM_NOMATCH);
683 if (uh->uh_patch_id <= uinfop->cui_rev && !ucode_force_update)
684 return (EM_HIGHERREV);
686 return (EM_OK);
688 #endif
691 * Returns 1 if the microcode is for this processor; 0 otherwise.
693 static ucode_errno_t
694 ucode_match_intel(int cpi_sig, cpu_ucode_info_t *uinfop,
695 ucode_header_intel_t *uhp, ucode_ext_table_intel_t *uetp)
697 if (uhp == NULL)
698 return (EM_NOMATCH);
700 if (UCODE_MATCH_INTEL(cpi_sig, uhp->uh_signature,
701 uinfop->cui_platid, uhp->uh_proc_flags)) {
703 if (uinfop->cui_rev >= uhp->uh_rev && !ucode_force_update)
704 return (EM_HIGHERREV);
706 return (EM_OK);
709 if (uetp != NULL) {
710 int i;
712 for (i = 0; i < uetp->uet_count; i++) {
713 ucode_ext_sig_intel_t *uesp;
715 uesp = &uetp->uet_ext_sig[i];
717 if (UCODE_MATCH_INTEL(cpi_sig, uesp->ues_signature,
718 uinfop->cui_platid, uesp->ues_proc_flags)) {
720 if (uinfop->cui_rev >= uhp->uh_rev &&
721 !ucode_force_update)
722 return (EM_HIGHERREV);
724 return (EM_OK);
729 return (EM_NOMATCH);
732 /*ARGSUSED*/
733 static int
734 ucode_write(xc_arg_t arg1, xc_arg_t unused2, xc_arg_t unused3)
736 ucode_update_t *uusp = (ucode_update_t *)arg1;
737 cpu_ucode_info_t *uinfop = CPU->cpu_m.mcpu_ucode_info;
738 #ifndef __xpv
739 on_trap_data_t otd;
740 #endif
742 ASSERT(ucode);
743 ASSERT(uusp->ucodep);
745 #ifndef __xpv
747 * Check one more time to see if it is really necessary to update
748 * microcode just in case this is a hyperthreaded processor where
749 * the threads share the same microcode.
751 if (!ucode_force_update) {
752 ucode->read_rev(uinfop);
753 uusp->new_rev = uinfop->cui_rev;
754 if (uinfop->cui_rev >= uusp->expected_rev)
755 return (0);
758 if (!on_trap(&otd, OT_DATA_ACCESS)) {
760 * On some platforms a cache invalidation is required for the
761 * ucode update to be successful due to the parts of the
762 * processor that the microcode is updating.
764 invalidate_cache();
765 wrmsr(ucode->write_msr, (uintptr_t)uusp->ucodep);
768 no_trap();
769 #endif
770 ucode->read_rev(uinfop);
771 uusp->new_rev = uinfop->cui_rev;
773 return (0);
776 /*ARGSUSED*/
777 static uint32_t
778 ucode_load_amd(ucode_file_t *ufp, cpu_ucode_info_t *uinfop, cpu_t *cp)
780 ucode_file_amd_t *ucodefp = ufp->amd;
781 #ifdef __xpv
782 ucode_update_t uus;
783 #else
784 on_trap_data_t otd;
785 #endif
787 ASSERT(ucode);
788 ASSERT(ucodefp);
790 #ifndef __xpv
791 kpreempt_disable();
792 if (on_trap(&otd, OT_DATA_ACCESS)) {
793 no_trap();
794 kpreempt_enable();
795 return (0);
797 wrmsr(ucode->write_msr, (uintptr_t)ucodefp);
798 no_trap();
799 ucode->read_rev(uinfop);
800 kpreempt_enable();
802 return (ucodefp->uf_header.uh_patch_id);
803 #else
804 uus.ucodep = ucodefp->ucodep;
805 uus.usize = ucodefp->usize;
806 ucode_load_xpv(&uus);
807 ucode->read_rev(uinfop);
808 uus.new_rev = uinfop->cui_rev;
810 return (uus.new_rev);
811 #endif
814 /*ARGSUSED2*/
815 static uint32_t
816 ucode_load_intel(ucode_file_t *ufp, cpu_ucode_info_t *uinfop, cpu_t *cp)
818 ucode_file_intel_t *ucodefp = &ufp->intel;
819 #ifdef __xpv
820 uint32_t ext_offset;
821 uint32_t body_size;
822 uint32_t ext_size;
823 uint8_t *ustart;
824 uint32_t usize;
825 ucode_update_t uus;
826 #endif
828 ASSERT(ucode);
830 #ifdef __xpv
832 * the hypervisor wants the header, data, and extended
833 * signature tables. We can only get here from the boot
834 * CPU (cpu #0), we don't need to free as ucode_zalloc() will
835 * use BOP_ALLOC().
837 usize = UCODE_TOTAL_SIZE_INTEL(ucodefp->uf_header->uh_total_size);
838 ustart = ucode_zalloc(cp->cpu_id, usize);
839 ASSERT(ustart);
841 body_size = UCODE_BODY_SIZE_INTEL(ucodefp->uf_header->uh_body_size);
842 ext_offset = body_size + UCODE_HEADER_SIZE_INTEL;
843 ext_size = usize - ext_offset;
844 ASSERT(ext_size >= 0);
846 (void) memcpy(ustart, ucodefp->uf_header, UCODE_HEADER_SIZE_INTEL);
847 (void) memcpy(&ustart[UCODE_HEADER_SIZE_INTEL], ucodefp->uf_body,
848 body_size);
849 if (ext_size > 0) {
850 (void) memcpy(&ustart[ext_offset],
851 ucodefp->uf_ext_table, ext_size);
853 uus.ucodep = ustart;
854 uus.usize = usize;
855 ucode_load_xpv(&uus);
856 ucode->read_rev(uinfop);
857 uus.new_rev = uinfop->cui_rev;
858 #else
859 kpreempt_disable();
861 * On some platforms a cache invalidation is required for the
862 * ucode update to be successful due to the parts of the
863 * processor that the microcode is updating.
865 invalidate_cache();
866 wrmsr(ucode->write_msr, (uintptr_t)ucodefp->uf_body);
867 ucode->read_rev(uinfop);
868 kpreempt_enable();
869 #endif
871 return (ucodefp->uf_header->uh_rev);
875 #ifdef __xpv
876 static void
877 ucode_load_xpv(ucode_update_t *uusp)
879 xen_platform_op_t op;
880 int e;
882 ASSERT(DOMAIN_IS_INITDOMAIN(xen_info));
884 kpreempt_disable();
885 op.cmd = XENPF_microcode_update;
886 op.interface_version = XENPF_INTERFACE_VERSION;
887 /*LINTED: constant in conditional context*/
888 set_xen_guest_handle(op.u.microcode.data, uusp->ucodep);
889 op.u.microcode.length = uusp->usize;
890 e = HYPERVISOR_platform_op(&op);
891 if (e != 0) {
892 cmn_err(CE_WARN, "hypervisor failed to accept uCode update");
894 kpreempt_enable();
896 #endif /* __xpv */
898 static void
899 ucode_read_rev_amd(cpu_ucode_info_t *uinfop)
901 uinfop->cui_rev = rdmsr(MSR_AMD_PATCHLEVEL);
904 static void
905 ucode_read_rev_intel(cpu_ucode_info_t *uinfop)
907 struct cpuid_regs crs;
910 * The Intel 64 and IA-32 Architecture Software Developer's Manual
911 * recommends that MSR_INTC_UCODE_REV be loaded with 0 first, then
912 * execute cpuid to guarantee the correct reading of this register.
914 wrmsr(MSR_INTC_UCODE_REV, 0);
915 (void) __cpuid_insn(&crs);
916 uinfop->cui_rev = (rdmsr(MSR_INTC_UCODE_REV) >> INTC_UCODE_REV_SHIFT);
919 static ucode_errno_t
920 ucode_extract_amd(ucode_update_t *uusp, uint8_t *ucodep, int size)
922 #ifndef __xpv
923 uint32_t *ptr = (uint32_t *)ucodep;
924 ucode_eqtbl_amd_t *eqtbl;
925 ucode_file_amd_t *ufp;
926 int count;
927 int higher = 0;
928 ucode_errno_t rc = EM_NOMATCH;
929 uint16_t eq_sig;
931 /* skip over magic number & equivalence table header */
932 ptr += 2; size -= 8;
934 count = *ptr++; size -= 4;
935 for (eqtbl = (ucode_eqtbl_amd_t *)ptr;
936 eqtbl->ue_inst_cpu && eqtbl->ue_inst_cpu != uusp->sig;
937 eqtbl++)
940 eq_sig = eqtbl->ue_equiv_cpu;
942 /* No equivalent CPU id found, assume outdated microcode file. */
943 if (eq_sig == 0)
944 return (EM_HIGHERREV);
946 /* Use the first microcode patch that matches. */
947 do {
948 ptr += count >> 2; size -= count;
950 if (!size)
951 return (higher ? EM_HIGHERREV : EM_NOMATCH);
953 ptr++; size -= 4;
954 count = *ptr++; size -= 4;
955 ufp = (ucode_file_amd_t *)ptr;
957 rc = ucode_match_amd(eq_sig, &uusp->info, ufp, count);
958 if (rc == EM_HIGHERREV)
959 higher = 1;
960 } while (rc != EM_OK);
962 uusp->ucodep = (uint8_t *)ufp;
963 uusp->usize = count;
964 uusp->expected_rev = ufp->uf_header.uh_patch_id;
965 #else
967 * The hypervisor will choose the patch to load, so there is no way to
968 * know the "expected revision" in advance. This is especially true on
969 * mixed-revision systems where more than one patch will be loaded.
971 uusp->expected_rev = 0;
972 uusp->ucodep = ucodep;
973 uusp->usize = size;
975 ucode_chipset_amd(ucodep, size);
976 #endif
978 return (EM_OK);
981 static ucode_errno_t
982 ucode_extract_intel(ucode_update_t *uusp, uint8_t *ucodep, int size)
984 uint32_t header_size = UCODE_HEADER_SIZE_INTEL;
985 int remaining;
986 int found = 0;
987 ucode_errno_t search_rc = EM_NOMATCH; /* search result */
990 * Go through the whole buffer in case there are
991 * multiple versions of matching microcode for this
992 * processor.
994 for (remaining = size; remaining > 0; ) {
995 int total_size, body_size, ext_size;
996 uint8_t *curbuf = &ucodep[size - remaining];
997 ucode_header_intel_t *uhp = (ucode_header_intel_t *)curbuf;
998 ucode_ext_table_intel_t *uetp = NULL;
999 ucode_errno_t tmprc;
1001 total_size = UCODE_TOTAL_SIZE_INTEL(uhp->uh_total_size);
1002 body_size = UCODE_BODY_SIZE_INTEL(uhp->uh_body_size);
1003 ext_size = total_size - (header_size + body_size);
1005 if (ext_size > 0)
1006 uetp = (ucode_ext_table_intel_t *)
1007 &curbuf[header_size + body_size];
1009 tmprc = ucode_match_intel(uusp->sig, &uusp->info, uhp, uetp);
1012 * Since we are searching through a big file
1013 * containing microcode for pretty much all the
1014 * processors, we are bound to get EM_NOMATCH
1015 * at one point. However, if we return
1016 * EM_NOMATCH to users, it will really confuse
1017 * them. Therefore, if we ever find a match of
1018 * a lower rev, we will set return code to
1019 * EM_HIGHERREV.
1021 if (tmprc == EM_HIGHERREV)
1022 search_rc = EM_HIGHERREV;
1024 if (tmprc == EM_OK &&
1025 uusp->expected_rev < uhp->uh_rev) {
1026 #ifndef __xpv
1027 uusp->ucodep = (uint8_t *)&curbuf[header_size];
1028 #else
1029 uusp->ucodep = (uint8_t *)curbuf;
1030 #endif
1031 uusp->usize =
1032 UCODE_TOTAL_SIZE_INTEL(uhp->uh_total_size);
1033 uusp->expected_rev = uhp->uh_rev;
1034 found = 1;
1037 remaining -= total_size;
1040 if (!found)
1041 return (search_rc);
1043 return (EM_OK);
1046 * Entry point to microcode update from the ucode_drv driver.
1048 * Returns EM_OK on success, corresponding error code on failure.
1050 ucode_errno_t
1051 ucode_update(uint8_t *ucodep, int size)
1053 int found = 0;
1054 processorid_t id;
1055 ucode_update_t cached = { 0 };
1056 ucode_update_t *cachedp = NULL;
1057 ucode_errno_t rc = EM_OK;
1058 ucode_errno_t search_rc = EM_NOMATCH; /* search result */
1059 cpuset_t cpuset;
1061 ASSERT(ucode);
1062 ASSERT(ucodep);
1063 CPUSET_ZERO(cpuset);
1065 if (!ucode->capable(CPU))
1066 return (EM_NOTSUP);
1068 mutex_enter(&cpu_lock);
1070 for (id = 0; id < max_ncpus; id++) {
1071 cpu_t *cpu;
1072 ucode_update_t uus = { 0 };
1073 ucode_update_t *uusp = &uus;
1076 * If there is no such CPU or it is not xcall ready, skip it.
1078 if ((cpu = cpu_get(id)) == NULL ||
1079 !(cpu->cpu_flags & CPU_READY))
1080 continue;
1082 uusp->sig = cpuid_getsig(cpu);
1083 bcopy(cpu->cpu_m.mcpu_ucode_info, &uusp->info,
1084 sizeof (uusp->info));
1087 * If the current CPU has the same signature and platform
1088 * id as the previous one we processed, reuse the information.
1090 if (cachedp && cachedp->sig == cpuid_getsig(cpu) &&
1091 cachedp->info.cui_platid == uusp->info.cui_platid) {
1092 uusp->ucodep = cachedp->ucodep;
1093 uusp->expected_rev = cachedp->expected_rev;
1095 * Intuitively we should check here to see whether the
1096 * running microcode rev is >= the expected rev, and
1097 * quit if it is. But we choose to proceed with the
1098 * xcall regardless of the running version so that
1099 * the other threads in an HT processor can update
1100 * the cpu_ucode_info structure in machcpu.
1102 } else if ((search_rc = ucode->extract(uusp, ucodep, size))
1103 == EM_OK) {
1104 bcopy(uusp, &cached, sizeof (cached));
1105 cachedp = &cached;
1106 found = 1;
1109 /* Nothing to do */
1110 if (uusp->ucodep == NULL)
1111 continue;
1113 #ifdef __xpv
1115 * for i86xpv, the hypervisor will update all the CPUs.
1116 * the hypervisor wants the header, data, and extended
1117 * signature tables. ucode_write will just read in the
1118 * updated version on all the CPUs after the update has
1119 * completed.
1121 if (id == 0) {
1122 ucode_load_xpv(uusp);
1124 #endif
1126 CPUSET_ADD(cpuset, id);
1127 kpreempt_disable();
1128 xc_sync((xc_arg_t)uusp, 0, 0, CPUSET2BV(cpuset), ucode_write);
1129 kpreempt_enable();
1130 CPUSET_DEL(cpuset, id);
1132 if (uusp->new_rev != 0 && uusp->info.cui_rev == uusp->new_rev &&
1133 !ucode_force_update) {
1134 rc = EM_HIGHERREV;
1135 } else if ((uusp->new_rev == 0) || (uusp->expected_rev != 0 &&
1136 uusp->expected_rev != uusp->new_rev)) {
1137 cmn_err(CE_WARN, ucode_failure_fmt,
1138 id, uusp->info.cui_rev, uusp->expected_rev);
1139 rc = EM_UPDATE;
1140 } else {
1141 cmn_err(CE_CONT, ucode_success_fmt,
1142 id, uusp->info.cui_rev, uusp->new_rev);
1146 mutex_exit(&cpu_lock);
1148 if (!found)
1149 rc = search_rc;
1151 return (rc);
1155 * Initialize mcpu_ucode_info, and perform microcode update if necessary.
1156 * This is the entry point from boot path where pointer to CPU structure
1157 * is available.
1159 * cpuid_info must be initialized before ucode_check can be called.
1161 void
1162 ucode_check(cpu_t *cp)
1164 cpu_ucode_info_t *uinfop;
1165 ucode_errno_t rc = EM_OK;
1166 uint32_t new_rev = 0;
1168 ASSERT(cp);
1170 * Space statically allocated for BSP, ensure pointer is set
1172 if (cp->cpu_id == 0 && cp->cpu_m.mcpu_ucode_info == NULL)
1173 cp->cpu_m.mcpu_ucode_info = &cpu_ucode_info0;
1175 uinfop = cp->cpu_m.mcpu_ucode_info;
1176 ASSERT(uinfop);
1178 /* set up function pointers if not already done */
1179 if (!ucode)
1180 switch (cpuid_getvendor(cp)) {
1181 case X86_VENDOR_AMD:
1182 ucode = &ucode_amd;
1183 break;
1184 case X86_VENDOR_Intel:
1185 ucode = &ucode_intel;
1186 break;
1187 default:
1188 ucode = NULL;
1189 return;
1192 if (!ucode->capable(cp))
1193 return;
1196 * The MSR_INTC_PLATFORM_ID is supported in Celeron and Xeon
1197 * (Family 6, model 5 and above) and all processors after.
1199 if ((cpuid_getvendor(cp) == X86_VENDOR_Intel) &&
1200 ((cpuid_getmodel(cp) >= 5) || (cpuid_getfamily(cp) > 6))) {
1201 uinfop->cui_platid = 1 << ((rdmsr(MSR_INTC_PLATFORM_ID) >>
1202 INTC_PLATFORM_ID_SHIFT) & INTC_PLATFORM_ID_MASK);
1205 ucode->read_rev(uinfop);
1207 #ifdef __xpv
1209 * for i86xpv, the hypervisor will update all the CPUs. We only need
1210 * do do this on one of the CPUs (and there always is a CPU 0).
1212 if (cp->cpu_id != 0) {
1213 return;
1215 #endif
1218 * Check to see if we need ucode update
1220 if ((rc = ucode->locate(cp, uinfop, &ucodefile)) == EM_OK) {
1221 new_rev = ucode->load(&ucodefile, uinfop, cp);
1223 if (uinfop->cui_rev != new_rev)
1224 cmn_err(CE_WARN, ucode_failure_fmt, cp->cpu_id,
1225 uinfop->cui_rev, new_rev);
1229 * If we fail to find a match for any reason, free the file structure
1230 * just in case we have read in a partial file.
1232 * Since the scratch memory for holding the microcode for the boot CPU
1233 * came from BOP_ALLOC, we will reset the data structure as if we
1234 * never did the allocation so we don't have to keep track of this
1235 * special chunk of memory. We free the memory used for the rest
1236 * of the CPUs in start_other_cpus().
1238 if (rc != EM_OK || cp->cpu_id == 0)
1239 ucode->file_reset(&ucodefile, cp->cpu_id);
1243 * Returns microcode revision from the machcpu structure.
1245 ucode_errno_t
1246 ucode_get_rev(uint32_t *revp)
1248 int i;
1250 ASSERT(ucode);
1251 ASSERT(revp);
1253 if (!ucode->capable(CPU))
1254 return (EM_NOTSUP);
1256 mutex_enter(&cpu_lock);
1257 for (i = 0; i < max_ncpus; i++) {
1258 cpu_t *cpu;
1260 if ((cpu = cpu_get(i)) == NULL)
1261 continue;
1263 revp[i] = cpu->cpu_m.mcpu_ucode_info->cui_rev;
1265 mutex_exit(&cpu_lock);
1267 return (EM_OK);